summaryrefslogtreecommitdiffstats
path: root/src/3rdparty/v8/src/arm
diff options
context:
space:
mode:
Diffstat (limited to 'src/3rdparty/v8/src/arm')
-rw-r--r--src/3rdparty/v8/src/arm/assembler-arm-inl.h549
-rw-r--r--src/3rdparty/v8/src/arm/assembler-arm.cc3052
-rw-r--r--src/3rdparty/v8/src/arm/assembler-arm.h1518
-rw-r--r--src/3rdparty/v8/src/arm/builtins-arm.cc1901
-rw-r--r--src/3rdparty/v8/src/arm/code-stubs-arm.cc8166
-rw-r--r--src/3rdparty/v8/src/arm/code-stubs-arm.h800
-rw-r--r--src/3rdparty/v8/src/arm/codegen-arm.cc708
-rw-r--r--src/3rdparty/v8/src/arm/codegen-arm.h115
-rw-r--r--src/3rdparty/v8/src/arm/constants-arm.cc154
-rw-r--r--src/3rdparty/v8/src/arm/constants-arm.h789
-rw-r--r--src/3rdparty/v8/src/arm/cpu-arm.cc134
-rw-r--r--src/3rdparty/v8/src/arm/debug-arm.cc345
-rw-r--r--src/3rdparty/v8/src/arm/deoptimizer-arm.cc1106
-rw-r--r--src/3rdparty/v8/src/arm/disasm-arm.cc1572
-rw-r--r--src/3rdparty/v8/src/arm/frames-arm.cc45
-rw-r--r--src/3rdparty/v8/src/arm/frames-arm.h172
-rw-r--r--src/3rdparty/v8/src/arm/full-codegen-arm.cc4622
-rw-r--r--src/3rdparty/v8/src/arm/ic-arm.cc1685
-rw-r--r--src/3rdparty/v8/src/arm/lithium-arm.cc2515
-rw-r--r--src/3rdparty/v8/src/arm/lithium-arm.h2742
-rw-r--r--src/3rdparty/v8/src/arm/lithium-codegen-arm.cc6408
-rw-r--r--src/3rdparty/v8/src/arm/lithium-codegen-arm.h513
-rw-r--r--src/3rdparty/v8/src/arm/lithium-gap-resolver-arm.cc318
-rw-r--r--src/3rdparty/v8/src/arm/lithium-gap-resolver-arm.h83
-rw-r--r--src/3rdparty/v8/src/arm/macro-assembler-arm.cc4012
-rw-r--r--src/3rdparty/v8/src/arm/macro-assembler-arm.h1439
-rw-r--r--src/3rdparty/v8/src/arm/regexp-macro-assembler-arm.cc1429
-rw-r--r--src/3rdparty/v8/src/arm/regexp-macro-assembler-arm.h259
-rw-r--r--src/3rdparty/v8/src/arm/simulator-arm.cc3475
-rw-r--r--src/3rdparty/v8/src/arm/simulator-arm.h468
-rw-r--r--src/3rdparty/v8/src/arm/stub-cache-arm.cc4091
31 files changed, 0 insertions, 55185 deletions
diff --git a/src/3rdparty/v8/src/arm/assembler-arm-inl.h b/src/3rdparty/v8/src/arm/assembler-arm-inl.h
deleted file mode 100644
index af29bb8..0000000
--- a/src/3rdparty/v8/src/arm/assembler-arm-inl.h
+++ /dev/null
@@ -1,549 +0,0 @@
-// Copyright (c) 1994-2006 Sun Microsystems Inc.
-// All Rights Reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-//
-// - Redistributions of source code must retain the above copyright notice,
-// this list of conditions and the following disclaimer.
-//
-// - Redistribution in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the
-// distribution.
-//
-// - Neither the name of Sun Microsystems or the names of contributors may
-// be used to endorse or promote products derived from this software without
-// specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
-// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
-// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
-// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
-// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
-// OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// The original source code covered by the above license above has been modified
-// significantly by Google Inc.
-// Copyright 2012 the V8 project authors. All rights reserved.
-
-#ifndef V8_ARM_ASSEMBLER_ARM_INL_H_
-#define V8_ARM_ASSEMBLER_ARM_INL_H_
-
-#include "arm/assembler-arm.h"
-
-#include "cpu.h"
-#include "debug.h"
-
-
-namespace v8 {
-namespace internal {
-
-
-int Register::NumAllocatableRegisters() {
- if (CpuFeatures::IsSupported(VFP2)) {
- return kMaxNumAllocatableRegisters;
- } else {
- return kMaxNumAllocatableRegisters - kGPRsPerNonVFP2Double;
- }
-}
-
-
-int DwVfpRegister::NumRegisters() {
- if (CpuFeatures::IsSupported(VFP2)) {
- return CpuFeatures::IsSupported(VFP32DREGS) ? 32 : 16;
- } else {
- return 1;
- }
-}
-
-
-int DwVfpRegister::NumAllocatableRegisters() {
- if (CpuFeatures::IsSupported(VFP2)) {
- return NumRegisters() - kNumReservedRegisters;
- } else {
- return 1;
- }
-}
-
-
-int DwVfpRegister::ToAllocationIndex(DwVfpRegister reg) {
- ASSERT(!reg.is(kDoubleRegZero));
- ASSERT(!reg.is(kScratchDoubleReg));
- if (reg.code() > kDoubleRegZero.code()) {
- return reg.code() - kNumReservedRegisters;
- }
- return reg.code();
-}
-
-
-DwVfpRegister DwVfpRegister::FromAllocationIndex(int index) {
- ASSERT(index >= 0 && index < NumAllocatableRegisters());
- ASSERT(kScratchDoubleReg.code() - kDoubleRegZero.code() ==
- kNumReservedRegisters - 1);
- if (index >= kDoubleRegZero.code()) {
- return from_code(index + kNumReservedRegisters);
- }
- return from_code(index);
-}
-
-
-void RelocInfo::apply(intptr_t delta) {
- if (RelocInfo::IsInternalReference(rmode_)) {
- // absolute code pointer inside code object moves with the code object.
- int32_t* p = reinterpret_cast<int32_t*>(pc_);
- *p += delta; // relocate entry
- }
- // We do not use pc relative addressing on ARM, so there is
- // nothing else to do.
-}
-
-
-Address RelocInfo::target_address() {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
- return Assembler::target_address_at(pc_);
-}
-
-
-Address RelocInfo::target_address_address() {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY
- || rmode_ == EMBEDDED_OBJECT
- || rmode_ == EXTERNAL_REFERENCE);
- return reinterpret_cast<Address>(Assembler::target_pointer_address_at(pc_));
-}
-
-
-int RelocInfo::target_address_size() {
- return kPointerSize;
-}
-
-
-void RelocInfo::set_target_address(Address target, WriteBarrierMode mode) {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
- Assembler::set_target_address_at(pc_, target);
- if (mode == UPDATE_WRITE_BARRIER && host() != NULL && IsCodeTarget(rmode_)) {
- Object* target_code = Code::GetCodeFromTargetAddress(target);
- host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
- host(), this, HeapObject::cast(target_code));
- }
-}
-
-
-Object* RelocInfo::target_object() {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- return reinterpret_cast<Object*>(Assembler::target_pointer_at(pc_));
-}
-
-
-Handle<Object> RelocInfo::target_object_handle(Assembler* origin) {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- return Handle<Object>(reinterpret_cast<Object**>(
- Assembler::target_pointer_at(pc_)));
-}
-
-
-Object** RelocInfo::target_object_address() {
- // Provide a "natural pointer" to the embedded object,
- // which can be de-referenced during heap iteration.
- ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- reconstructed_obj_ptr_ =
- reinterpret_cast<Object*>(Assembler::target_pointer_at(pc_));
- return &reconstructed_obj_ptr_;
-}
-
-
-void RelocInfo::set_target_object(Object* target, WriteBarrierMode mode) {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- Assembler::set_target_pointer_at(pc_, reinterpret_cast<Address>(target));
- if (mode == UPDATE_WRITE_BARRIER &&
- host() != NULL &&
- target->IsHeapObject()) {
- host()->GetHeap()->incremental_marking()->RecordWrite(
- host(), &Memory::Object_at(pc_), HeapObject::cast(target));
- }
-}
-
-
-Address* RelocInfo::target_reference_address() {
- ASSERT(rmode_ == EXTERNAL_REFERENCE);
- reconstructed_adr_ptr_ = Assembler::target_address_at(pc_);
- return &reconstructed_adr_ptr_;
-}
-
-
-Handle<JSGlobalPropertyCell> RelocInfo::target_cell_handle() {
- ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
- Address address = Memory::Address_at(pc_);
- return Handle<JSGlobalPropertyCell>(
- reinterpret_cast<JSGlobalPropertyCell**>(address));
-}
-
-
-JSGlobalPropertyCell* RelocInfo::target_cell() {
- ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
- return JSGlobalPropertyCell::FromValueAddress(Memory::Address_at(pc_));
-}
-
-
-void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell,
- WriteBarrierMode mode) {
- ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
- Address address = cell->address() + JSGlobalPropertyCell::kValueOffset;
- Memory::Address_at(pc_) = address;
- if (mode == UPDATE_WRITE_BARRIER && host() != NULL) {
- // TODO(1550) We are passing NULL as a slot because cell can never be on
- // evacuation candidate.
- host()->GetHeap()->incremental_marking()->RecordWrite(
- host(), NULL, cell);
- }
-}
-
-
-static const int kNoCodeAgeSequenceLength = 3;
-
-Code* RelocInfo::code_age_stub() {
- ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
- return Code::GetCodeFromTargetAddress(
- Memory::Address_at(pc_ + Assembler::kInstrSize *
- (kNoCodeAgeSequenceLength - 1)));
-}
-
-
-void RelocInfo::set_code_age_stub(Code* stub) {
- ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
- Memory::Address_at(pc_ + Assembler::kInstrSize *
- (kNoCodeAgeSequenceLength - 1)) =
- stub->instruction_start();
-}
-
-
-Address RelocInfo::call_address() {
- // The 2 instructions offset assumes patched debug break slot or return
- // sequence.
- ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
- (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
- return Memory::Address_at(pc_ + 2 * Assembler::kInstrSize);
-}
-
-
-void RelocInfo::set_call_address(Address target) {
- ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
- (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
- Memory::Address_at(pc_ + 2 * Assembler::kInstrSize) = target;
- if (host() != NULL) {
- Object* target_code = Code::GetCodeFromTargetAddress(target);
- host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
- host(), this, HeapObject::cast(target_code));
- }
-}
-
-
-Object* RelocInfo::call_object() {
- return *call_object_address();
-}
-
-
-void RelocInfo::set_call_object(Object* target) {
- *call_object_address() = target;
-}
-
-
-Object** RelocInfo::call_object_address() {
- ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
- (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
- return reinterpret_cast<Object**>(pc_ + 2 * Assembler::kInstrSize);
-}
-
-
-bool RelocInfo::IsPatchedReturnSequence() {
- Instr current_instr = Assembler::instr_at(pc_);
- Instr next_instr = Assembler::instr_at(pc_ + Assembler::kInstrSize);
-#ifdef USE_BLX
- // A patched return sequence is:
- // ldr ip, [pc, #0]
- // blx ip
- return ((current_instr & kLdrPCMask) == kLdrPCPattern)
- && ((next_instr & kBlxRegMask) == kBlxRegPattern);
-#else
- // A patched return sequence is:
- // mov lr, pc
- // ldr pc, [pc, #-4]
- return (current_instr == kMovLrPc)
- && ((next_instr & kLdrPCMask) == kLdrPCPattern);
-#endif
-}
-
-
-bool RelocInfo::IsPatchedDebugBreakSlotSequence() {
- Instr current_instr = Assembler::instr_at(pc_);
- return !Assembler::IsNop(current_instr, Assembler::DEBUG_BREAK_NOP);
-}
-
-
-void RelocInfo::Visit(ObjectVisitor* visitor) {
- RelocInfo::Mode mode = rmode();
- if (mode == RelocInfo::EMBEDDED_OBJECT) {
- visitor->VisitEmbeddedPointer(this);
- } else if (RelocInfo::IsCodeTarget(mode)) {
- visitor->VisitCodeTarget(this);
- } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
- visitor->VisitGlobalPropertyCell(this);
- } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
- visitor->VisitExternalReference(this);
- } else if (RelocInfo::IsCodeAgeSequence(mode)) {
- visitor->VisitCodeAgeSequence(this);
-#ifdef ENABLE_DEBUGGER_SUPPORT
- // TODO(isolates): Get a cached isolate below.
- } else if (((RelocInfo::IsJSReturn(mode) &&
- IsPatchedReturnSequence()) ||
- (RelocInfo::IsDebugBreakSlot(mode) &&
- IsPatchedDebugBreakSlotSequence())) &&
- Isolate::Current()->debug()->has_break_points()) {
- visitor->VisitDebugTarget(this);
-#endif
- } else if (mode == RelocInfo::RUNTIME_ENTRY) {
- visitor->VisitRuntimeEntry(this);
- }
-}
-
-
-template<typename StaticVisitor>
-void RelocInfo::Visit(Heap* heap) {
- RelocInfo::Mode mode = rmode();
- if (mode == RelocInfo::EMBEDDED_OBJECT) {
- StaticVisitor::VisitEmbeddedPointer(heap, this);
- } else if (RelocInfo::IsCodeTarget(mode)) {
- StaticVisitor::VisitCodeTarget(heap, this);
- } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
- StaticVisitor::VisitGlobalPropertyCell(heap, this);
- } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
- StaticVisitor::VisitExternalReference(this);
- } else if (RelocInfo::IsCodeAgeSequence(mode)) {
- StaticVisitor::VisitCodeAgeSequence(heap, this);
-#ifdef ENABLE_DEBUGGER_SUPPORT
- } else if (heap->isolate()->debug()->has_break_points() &&
- ((RelocInfo::IsJSReturn(mode) &&
- IsPatchedReturnSequence()) ||
- (RelocInfo::IsDebugBreakSlot(mode) &&
- IsPatchedDebugBreakSlotSequence()))) {
- StaticVisitor::VisitDebugTarget(heap, this);
-#endif
- } else if (mode == RelocInfo::RUNTIME_ENTRY) {
- StaticVisitor::VisitRuntimeEntry(this);
- }
-}
-
-
-Operand::Operand(int32_t immediate, RelocInfo::Mode rmode) {
- rm_ = no_reg;
- imm32_ = immediate;
- rmode_ = rmode;
-}
-
-
-Operand::Operand(const ExternalReference& f) {
- rm_ = no_reg;
- imm32_ = reinterpret_cast<int32_t>(f.address());
- rmode_ = RelocInfo::EXTERNAL_REFERENCE;
-}
-
-
-Operand::Operand(Smi* value) {
- rm_ = no_reg;
- imm32_ = reinterpret_cast<intptr_t>(value);
- rmode_ = RelocInfo::NONE32;
-}
-
-
-Operand::Operand(Register rm) {
- rm_ = rm;
- rs_ = no_reg;
- shift_op_ = LSL;
- shift_imm_ = 0;
-}
-
-
-bool Operand::is_reg() const {
- return rm_.is_valid() &&
- rs_.is(no_reg) &&
- shift_op_ == LSL &&
- shift_imm_ == 0;
-}
-
-
-void Assembler::CheckBuffer() {
- if (buffer_space() <= kGap) {
- GrowBuffer();
- }
- if (pc_offset() >= next_buffer_check_) {
- CheckConstPool(false, true);
- }
-}
-
-
-void Assembler::emit(Instr x) {
- CheckBuffer();
- *reinterpret_cast<Instr*>(pc_) = x;
- pc_ += kInstrSize;
-}
-
-
-Address Assembler::target_pointer_address_at(Address pc) {
- Address target_pc = pc;
- Instr instr = Memory::int32_at(target_pc);
- // If we have a bx instruction, the instruction before the bx is
- // what we need to patch.
- static const int32_t kBxInstMask = 0x0ffffff0;
- static const int32_t kBxInstPattern = 0x012fff10;
- if ((instr & kBxInstMask) == kBxInstPattern) {
- target_pc -= kInstrSize;
- instr = Memory::int32_at(target_pc);
- }
-
-#ifdef USE_BLX
- // If we have a blx instruction, the instruction before it is
- // what needs to be patched.
- if ((instr & kBlxRegMask) == kBlxRegPattern) {
- target_pc -= kInstrSize;
- instr = Memory::int32_at(target_pc);
- }
-#endif
-
- ASSERT(IsLdrPcImmediateOffset(instr));
- int offset = instr & 0xfff; // offset_12 is unsigned
- if ((instr & (1 << 23)) == 0) offset = -offset; // U bit defines offset sign
- // Verify that the constant pool comes after the instruction referencing it.
- ASSERT(offset >= -4);
- return target_pc + offset + 8;
-}
-
-
-Address Assembler::target_pointer_at(Address pc) {
- if (IsMovW(Memory::int32_at(pc))) {
- ASSERT(IsMovT(Memory::int32_at(pc + kInstrSize)));
- Instruction* instr = Instruction::At(pc);
- Instruction* next_instr = Instruction::At(pc + kInstrSize);
- return reinterpret_cast<Address>(
- (next_instr->ImmedMovwMovtValue() << 16) |
- instr->ImmedMovwMovtValue());
- }
- return Memory::Address_at(target_pointer_address_at(pc));
-}
-
-
-Address Assembler::target_address_from_return_address(Address pc) {
- // Returns the address of the call target from the return address that will
- // be returned to after a call.
-#ifdef USE_BLX
- // Call sequence on V7 or later is :
- // movw ip, #... @ call address low 16
- // movt ip, #... @ call address high 16
- // blx ip
- // @ return address
- // Or pre-V7 or cases that need frequent patching:
- // ldr ip, [pc, #...] @ call address
- // blx ip
- // @ return address
- Address candidate = pc - 2 * Assembler::kInstrSize;
- Instr candidate_instr(Memory::int32_at(candidate));
- if (IsLdrPcImmediateOffset(candidate_instr)) {
- return candidate;
- }
- candidate = pc - 3 * Assembler::kInstrSize;
- ASSERT(IsMovW(Memory::int32_at(candidate)) &&
- IsMovT(Memory::int32_at(candidate + kInstrSize)));
- return candidate;
-#else
- // Call sequence is:
- // mov lr, pc
- // ldr pc, [pc, #...] @ call address
- // @ return address
- return pc - kInstrSize;
-#endif
-}
-
-
-Address Assembler::return_address_from_call_start(Address pc) {
-#ifdef USE_BLX
- if (IsLdrPcImmediateOffset(Memory::int32_at(pc))) {
- return pc + kInstrSize * 2;
- } else {
- ASSERT(IsMovW(Memory::int32_at(pc)));
- ASSERT(IsMovT(Memory::int32_at(pc + kInstrSize)));
- return pc + kInstrSize * 3;
- }
-#else
- return pc + kInstrSize;
-#endif
-}
-
-
-void Assembler::deserialization_set_special_target_at(
- Address constant_pool_entry, Address target) {
- Memory::Address_at(constant_pool_entry) = target;
-}
-
-
-void Assembler::set_external_target_at(Address constant_pool_entry,
- Address target) {
- Memory::Address_at(constant_pool_entry) = target;
-}
-
-
-static Instr EncodeMovwImmediate(uint32_t immediate) {
- ASSERT(immediate < 0x10000);
- return ((immediate & 0xf000) << 4) | (immediate & 0xfff);
-}
-
-
-void Assembler::set_target_pointer_at(Address pc, Address target) {
- if (IsMovW(Memory::int32_at(pc))) {
- ASSERT(IsMovT(Memory::int32_at(pc + kInstrSize)));
- uint32_t* instr_ptr = reinterpret_cast<uint32_t*>(pc);
- uint32_t immediate = reinterpret_cast<uint32_t>(target);
- uint32_t intermediate = instr_ptr[0];
- intermediate &= ~EncodeMovwImmediate(0xFFFF);
- intermediate |= EncodeMovwImmediate(immediate & 0xFFFF);
- instr_ptr[0] = intermediate;
- intermediate = instr_ptr[1];
- intermediate &= ~EncodeMovwImmediate(0xFFFF);
- intermediate |= EncodeMovwImmediate(immediate >> 16);
- instr_ptr[1] = intermediate;
- ASSERT(IsMovW(Memory::int32_at(pc)));
- ASSERT(IsMovT(Memory::int32_at(pc + kInstrSize)));
- CPU::FlushICache(pc, 2 * kInstrSize);
- } else {
- ASSERT(IsLdrPcImmediateOffset(Memory::int32_at(pc)));
- Memory::Address_at(target_pointer_address_at(pc)) = target;
- // Intuitively, we would think it is necessary to always flush the
- // instruction cache after patching a target address in the code as follows:
- // CPU::FlushICache(pc, sizeof(target));
- // However, on ARM, no instruction is actually patched in the case
- // of embedded constants of the form:
- // ldr ip, [pc, #...]
- // since the instruction accessing this address in the constant pool remains
- // unchanged.
- }
-}
-
-
-Address Assembler::target_address_at(Address pc) {
- return target_pointer_at(pc);
-}
-
-
-void Assembler::set_target_address_at(Address pc, Address target) {
- set_target_pointer_at(pc, target);
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_ARM_ASSEMBLER_ARM_INL_H_
diff --git a/src/3rdparty/v8/src/arm/assembler-arm.cc b/src/3rdparty/v8/src/arm/assembler-arm.cc
deleted file mode 100644
index a8c32d9..0000000
--- a/src/3rdparty/v8/src/arm/assembler-arm.cc
+++ /dev/null
@@ -1,3052 +0,0 @@
-// Copyright (c) 1994-2006 Sun Microsystems Inc.
-// All Rights Reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-//
-// - Redistributions of source code must retain the above copyright notice,
-// this list of conditions and the following disclaimer.
-//
-// - Redistribution in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the
-// distribution.
-//
-// - Neither the name of Sun Microsystems or the names of contributors may
-// be used to endorse or promote products derived from this software without
-// specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
-// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
-// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
-// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
-// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
-// OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// The original source code covered by the above license above has been
-// modified significantly by Google Inc.
-// Copyright 2012 the V8 project authors. All rights reserved.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_ARM)
-
-#include "arm/assembler-arm-inl.h"
-#include "serialize.h"
-
-namespace v8 {
-namespace internal {
-
-#ifdef DEBUG
-bool CpuFeatures::initialized_ = false;
-#endif
-unsigned CpuFeatures::supported_ = 0;
-unsigned CpuFeatures::found_by_runtime_probing_ = 0;
-
-
-ExternalReference ExternalReference::cpu_features() {
- ASSERT(CpuFeatures::initialized_);
- return ExternalReference(&CpuFeatures::supported_);
-}
-
-// Get the CPU features enabled by the build. For cross compilation the
-// preprocessor symbols CAN_USE_ARMV7_INSTRUCTIONS and CAN_USE_VFP3_INSTRUCTIONS
-// can be defined to enable ARMv7 and VFPv3 instructions when building the
-// snapshot.
-static unsigned CpuFeaturesImpliedByCompiler() {
- unsigned answer = 0;
-#ifdef CAN_USE_ARMV7_INSTRUCTIONS
- answer |= 1u << ARMv7;
-#endif // CAN_USE_ARMV7_INSTRUCTIONS
-#ifdef CAN_USE_VFP3_INSTRUCTIONS
- answer |= 1u << VFP3 | 1u << VFP2 | 1u << ARMv7;
-#endif // CAN_USE_VFP3_INSTRUCTIONS
-#ifdef CAN_USE_VFP2_INSTRUCTIONS
- answer |= 1u << VFP2;
-#endif // CAN_USE_VFP2_INSTRUCTIONS
-#ifdef CAN_USE_VFP32DREGS
- answer |= 1u << VFP32DREGS;
-#endif // CAN_USE_VFP32DREGS
-
-#ifdef __arm__
- // If the compiler is allowed to use VFP then we can use VFP too in our code
- // generation even when generating snapshots. ARMv7 and hardware floating
- // point support implies VFPv3, see ARM DDI 0406B, page A1-6.
-#if defined(CAN_USE_ARMV7_INSTRUCTIONS) && defined(__VFP_FP__) \
- && !defined(__SOFTFP__)
- answer |= 1u << VFP3 | 1u << ARMv7 | 1u << VFP2;
-#endif // defined(CAN_USE_ARMV7_INSTRUCTIONS) && defined(__VFP_FP__)
- // && !defined(__SOFTFP__)
-#endif // _arm__
- if (answer & (1u << ARMv7)) {
- answer |= 1u << UNALIGNED_ACCESSES;
- }
-
- return answer;
-}
-
-
-const char* DwVfpRegister::AllocationIndexToString(int index) {
- if (CpuFeatures::IsSupported(VFP2)) {
- ASSERT(index >= 0 && index < NumAllocatableRegisters());
- ASSERT(kScratchDoubleReg.code() - kDoubleRegZero.code() ==
- kNumReservedRegisters - 1);
- if (index >= kDoubleRegZero.code())
- index += kNumReservedRegisters;
-
- return VFPRegisters::Name(index, true);
- } else {
- ASSERT(index == 0);
- return "sfpd0";
- }
-}
-
-
-void CpuFeatures::Probe() {
- unsigned standard_features = static_cast<unsigned>(
- OS::CpuFeaturesImpliedByPlatform()) | CpuFeaturesImpliedByCompiler();
- ASSERT(supported_ == 0 || supported_ == standard_features);
-#ifdef DEBUG
- initialized_ = true;
-#endif
-
- // Get the features implied by the OS and the compiler settings. This is the
- // minimal set of features which is also alowed for generated code in the
- // snapshot.
- supported_ |= standard_features;
-
- if (Serializer::enabled()) {
- // No probing for features if we might serialize (generate snapshot).
- return;
- }
-
-#ifndef __arm__
- // For the simulator=arm build, use VFP when FLAG_enable_vfp3 is
- // enabled. VFPv3 implies ARMv7, see ARM DDI 0406B, page A1-6.
- if (FLAG_enable_vfp3) {
- supported_ |= 1u << VFP3 | 1u << ARMv7 | 1u << VFP2;
- }
- // For the simulator=arm build, use ARMv7 when FLAG_enable_armv7 is enabled
- if (FLAG_enable_armv7) {
- supported_ |= 1u << ARMv7;
- }
-
- if (FLAG_enable_sudiv) {
- supported_ |= 1u << SUDIV;
- }
-
- if (FLAG_enable_movw_movt) {
- supported_ |= 1u << MOVW_MOVT_IMMEDIATE_LOADS;
- }
-
- if (FLAG_enable_32dregs) {
- supported_ |= 1u << VFP32DREGS;
- }
-
-#else // __arm__
- // Probe for additional features not already known to be available.
- if (!IsSupported(VFP3) && OS::ArmCpuHasFeature(VFP3)) {
- // This implementation also sets the VFP flags if runtime
- // detection of VFP returns true. VFPv3 implies ARMv7 and VFP2, see ARM DDI
- // 0406B, page A1-6.
- found_by_runtime_probing_ |= 1u << VFP3 | 1u << ARMv7 | 1u << VFP2;
- } else if (!IsSupported(VFP2) && OS::ArmCpuHasFeature(VFP2)) {
- found_by_runtime_probing_ |= 1u << VFP2;
- }
-
- if (!IsSupported(ARMv7) && OS::ArmCpuHasFeature(ARMv7)) {
- found_by_runtime_probing_ |= 1u << ARMv7;
- }
-
- if (!IsSupported(SUDIV) && OS::ArmCpuHasFeature(SUDIV)) {
- found_by_runtime_probing_ |= 1u << SUDIV;
- }
-
- if (!IsSupported(UNALIGNED_ACCESSES) && OS::ArmCpuHasFeature(ARMv7)) {
- found_by_runtime_probing_ |= 1u << UNALIGNED_ACCESSES;
- }
-
- if (OS::GetCpuImplementer() == QUALCOMM_IMPLEMENTER &&
- OS::ArmCpuHasFeature(ARMv7)) {
- found_by_runtime_probing_ |= 1u << MOVW_MOVT_IMMEDIATE_LOADS;
- }
-
- if (!IsSupported(VFP32DREGS) && OS::ArmCpuHasFeature(VFP32DREGS)) {
- found_by_runtime_probing_ |= 1u << VFP32DREGS;
- }
-
- supported_ |= found_by_runtime_probing_;
-#endif
-
- // Assert that VFP3 implies VFP2 and ARMv7.
- ASSERT(!IsSupported(VFP3) || (IsSupported(VFP2) && IsSupported(ARMv7)));
-}
-
-
-// -----------------------------------------------------------------------------
-// Implementation of RelocInfo
-
-const int RelocInfo::kApplyMask = 0;
-
-
-bool RelocInfo::IsCodedSpecially() {
- // The deserializer needs to know whether a pointer is specially coded. Being
- // specially coded on ARM means that it is a movw/movt instruction. We don't
- // generate those yet.
- return false;
-}
-
-
-void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
- // Patch the code at the current address with the supplied instructions.
- Instr* pc = reinterpret_cast<Instr*>(pc_);
- Instr* instr = reinterpret_cast<Instr*>(instructions);
- for (int i = 0; i < instruction_count; i++) {
- *(pc + i) = *(instr + i);
- }
-
- // Indicate that code has changed.
- CPU::FlushICache(pc_, instruction_count * Assembler::kInstrSize);
-}
-
-
-// Patch the code at the current PC with a call to the target address.
-// Additional guard instructions can be added if required.
-void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
- // Patch the code at the current address with a call to the target.
- UNIMPLEMENTED();
-}
-
-
-// -----------------------------------------------------------------------------
-// Implementation of Operand and MemOperand
-// See assembler-arm-inl.h for inlined constructors
-
-Operand::Operand(Handle<Object> handle) {
- rm_ = no_reg;
- // Verify all Objects referred by code are NOT in new space.
- Object* obj = *handle;
- ASSERT(!HEAP->InNewSpace(obj));
- if (obj->IsHeapObject()) {
- imm32_ = reinterpret_cast<intptr_t>(handle.location());
- rmode_ = RelocInfo::EMBEDDED_OBJECT;
- } else {
- // no relocation needed
- imm32_ = reinterpret_cast<intptr_t>(obj);
- rmode_ = RelocInfo::NONE32;
- }
-}
-
-
-Operand::Operand(Register rm, ShiftOp shift_op, int shift_imm) {
- ASSERT(is_uint5(shift_imm));
- ASSERT(shift_op != ROR || shift_imm != 0); // use RRX if you mean it
- rm_ = rm;
- rs_ = no_reg;
- shift_op_ = shift_op;
- shift_imm_ = shift_imm & 31;
- if (shift_op == RRX) {
- // encoded as ROR with shift_imm == 0
- ASSERT(shift_imm == 0);
- shift_op_ = ROR;
- shift_imm_ = 0;
- }
-}
-
-
-Operand::Operand(Register rm, ShiftOp shift_op, Register rs) {
- ASSERT(shift_op != RRX);
- rm_ = rm;
- rs_ = no_reg;
- shift_op_ = shift_op;
- rs_ = rs;
-}
-
-
-MemOperand::MemOperand(Register rn, int32_t offset, AddrMode am) {
- rn_ = rn;
- rm_ = no_reg;
- offset_ = offset;
- am_ = am;
-}
-
-MemOperand::MemOperand(Register rn, Register rm, AddrMode am) {
- rn_ = rn;
- rm_ = rm;
- shift_op_ = LSL;
- shift_imm_ = 0;
- am_ = am;
-}
-
-
-MemOperand::MemOperand(Register rn, Register rm,
- ShiftOp shift_op, int shift_imm, AddrMode am) {
- ASSERT(is_uint5(shift_imm));
- rn_ = rn;
- rm_ = rm;
- shift_op_ = shift_op;
- shift_imm_ = shift_imm & 31;
- am_ = am;
-}
-
-
-// -----------------------------------------------------------------------------
-// Specific instructions, constants, and masks.
-
-// add(sp, sp, 4) instruction (aka Pop())
-const Instr kPopInstruction =
- al | PostIndex | 4 | LeaveCC | I | kRegister_sp_Code * B16 |
- kRegister_sp_Code * B12;
-// str(r, MemOperand(sp, 4, NegPreIndex), al) instruction (aka push(r))
-// register r is not encoded.
-const Instr kPushRegPattern =
- al | B26 | 4 | NegPreIndex | kRegister_sp_Code * B16;
-// ldr(r, MemOperand(sp, 4, PostIndex), al) instruction (aka pop(r))
-// register r is not encoded.
-const Instr kPopRegPattern =
- al | B26 | L | 4 | PostIndex | kRegister_sp_Code * B16;
-// mov lr, pc
-const Instr kMovLrPc = al | MOV | kRegister_pc_Code | kRegister_lr_Code * B12;
-// ldr rd, [pc, #offset]
-const Instr kLdrPCMask = 15 * B24 | 7 * B20 | 15 * B16;
-const Instr kLdrPCPattern = 5 * B24 | L | kRegister_pc_Code * B16;
-// vldr dd, [pc, #offset]
-const Instr kVldrDPCMask = 15 * B24 | 3 * B20 | 15 * B16 | 15 * B8;
-const Instr kVldrDPCPattern = 13 * B24 | L | kRegister_pc_Code * B16 | 11 * B8;
-// blxcc rm
-const Instr kBlxRegMask =
- 15 * B24 | 15 * B20 | 15 * B16 | 15 * B12 | 15 * B8 | 15 * B4;
-const Instr kBlxRegPattern =
- B24 | B21 | 15 * B16 | 15 * B12 | 15 * B8 | BLX;
-const Instr kBlxIp = al | kBlxRegPattern | ip.code();
-const Instr kMovMvnMask = 0x6d * B21 | 0xf * B16;
-const Instr kMovMvnPattern = 0xd * B21;
-const Instr kMovMvnFlip = B22;
-const Instr kMovLeaveCCMask = 0xdff * B16;
-const Instr kMovLeaveCCPattern = 0x1a0 * B16;
-const Instr kMovwMask = 0xff * B20;
-const Instr kMovwPattern = 0x30 * B20;
-const Instr kMovwLeaveCCFlip = 0x5 * B21;
-const Instr kCmpCmnMask = 0xdd * B20 | 0xf * B12;
-const Instr kCmpCmnPattern = 0x15 * B20;
-const Instr kCmpCmnFlip = B21;
-const Instr kAddSubFlip = 0x6 * B21;
-const Instr kAndBicFlip = 0xe * B21;
-
-// A mask for the Rd register for push, pop, ldr, str instructions.
-const Instr kLdrRegFpOffsetPattern =
- al | B26 | L | Offset | kRegister_fp_Code * B16;
-const Instr kStrRegFpOffsetPattern =
- al | B26 | Offset | kRegister_fp_Code * B16;
-const Instr kLdrRegFpNegOffsetPattern =
- al | B26 | L | NegOffset | kRegister_fp_Code * B16;
-const Instr kStrRegFpNegOffsetPattern =
- al | B26 | NegOffset | kRegister_fp_Code * B16;
-const Instr kLdrStrInstrTypeMask = 0xffff0000;
-const Instr kLdrStrInstrArgumentMask = 0x0000ffff;
-const Instr kLdrStrOffsetMask = 0x00000fff;
-
-
-Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
- : AssemblerBase(isolate, buffer, buffer_size),
- recorded_ast_id_(TypeFeedbackId::None()),
- positions_recorder_(this) {
- reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
- num_pending_reloc_info_ = 0;
- num_pending_64_bit_reloc_info_ = 0;
- next_buffer_check_ = 0;
- const_pool_blocked_nesting_ = 0;
- no_const_pool_before_ = 0;
- first_const_pool_use_ = -1;
- last_bound_pos_ = 0;
- ClearRecordedAstId();
-}
-
-
-Assembler::~Assembler() {
- ASSERT(const_pool_blocked_nesting_ == 0);
-}
-
-
-void Assembler::GetCode(CodeDesc* desc) {
- // Emit constant pool if necessary.
- CheckConstPool(true, false);
- ASSERT(num_pending_reloc_info_ == 0);
- ASSERT(num_pending_64_bit_reloc_info_ == 0);
-
- // Set up code descriptor.
- desc->buffer = buffer_;
- desc->buffer_size = buffer_size_;
- desc->instr_size = pc_offset();
- desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
-}
-
-
-void Assembler::Align(int m) {
- ASSERT(m >= 4 && IsPowerOf2(m));
- while ((pc_offset() & (m - 1)) != 0) {
- nop();
- }
-}
-
-
-void Assembler::CodeTargetAlign() {
- // Preferred alignment of jump targets on some ARM chips.
- Align(8);
-}
-
-
-Condition Assembler::GetCondition(Instr instr) {
- return Instruction::ConditionField(instr);
-}
-
-
-bool Assembler::IsBranch(Instr instr) {
- return (instr & (B27 | B25)) == (B27 | B25);
-}
-
-
-int Assembler::GetBranchOffset(Instr instr) {
- ASSERT(IsBranch(instr));
- // Take the jump offset in the lower 24 bits, sign extend it and multiply it
- // with 4 to get the offset in bytes.
- return ((instr & kImm24Mask) << 8) >> 6;
-}
-
-
-bool Assembler::IsLdrRegisterImmediate(Instr instr) {
- return (instr & (B27 | B26 | B25 | B22 | B20)) == (B26 | B20);
-}
-
-
-bool Assembler::IsVldrDRegisterImmediate(Instr instr) {
- return (instr & (15 * B24 | 3 * B20 | 15 * B8)) == (13 * B24 | B20 | 11 * B8);
-}
-
-
-int Assembler::GetLdrRegisterImmediateOffset(Instr instr) {
- ASSERT(IsLdrRegisterImmediate(instr));
- bool positive = (instr & B23) == B23;
- int offset = instr & kOff12Mask; // Zero extended offset.
- return positive ? offset : -offset;
-}
-
-
-int Assembler::GetVldrDRegisterImmediateOffset(Instr instr) {
- ASSERT(IsVldrDRegisterImmediate(instr));
- bool positive = (instr & B23) == B23;
- int offset = instr & kOff8Mask; // Zero extended offset.
- offset <<= 2;
- return positive ? offset : -offset;
-}
-
-
-Instr Assembler::SetLdrRegisterImmediateOffset(Instr instr, int offset) {
- ASSERT(IsLdrRegisterImmediate(instr));
- bool positive = offset >= 0;
- if (!positive) offset = -offset;
- ASSERT(is_uint12(offset));
- // Set bit indicating whether the offset should be added.
- instr = (instr & ~B23) | (positive ? B23 : 0);
- // Set the actual offset.
- return (instr & ~kOff12Mask) | offset;
-}
-
-
-Instr Assembler::SetVldrDRegisterImmediateOffset(Instr instr, int offset) {
- ASSERT(IsVldrDRegisterImmediate(instr));
- ASSERT((offset & ~3) == offset); // Must be 64-bit aligned.
- bool positive = offset >= 0;
- if (!positive) offset = -offset;
- ASSERT(is_uint10(offset));
- // Set bit indicating whether the offset should be added.
- instr = (instr & ~B23) | (positive ? B23 : 0);
- // Set the actual offset. Its bottom 2 bits are zero.
- return (instr & ~kOff8Mask) | (offset >> 2);
-}
-
-
-bool Assembler::IsStrRegisterImmediate(Instr instr) {
- return (instr & (B27 | B26 | B25 | B22 | B20)) == B26;
-}
-
-
-Instr Assembler::SetStrRegisterImmediateOffset(Instr instr, int offset) {
- ASSERT(IsStrRegisterImmediate(instr));
- bool positive = offset >= 0;
- if (!positive) offset = -offset;
- ASSERT(is_uint12(offset));
- // Set bit indicating whether the offset should be added.
- instr = (instr & ~B23) | (positive ? B23 : 0);
- // Set the actual offset.
- return (instr & ~kOff12Mask) | offset;
-}
-
-
-bool Assembler::IsAddRegisterImmediate(Instr instr) {
- return (instr & (B27 | B26 | B25 | B24 | B23 | B22 | B21)) == (B25 | B23);
-}
-
-
-Instr Assembler::SetAddRegisterImmediateOffset(Instr instr, int offset) {
- ASSERT(IsAddRegisterImmediate(instr));
- ASSERT(offset >= 0);
- ASSERT(is_uint12(offset));
- // Set the offset.
- return (instr & ~kOff12Mask) | offset;
-}
-
-
-Register Assembler::GetRd(Instr instr) {
- Register reg;
- reg.code_ = Instruction::RdValue(instr);
- return reg;
-}
-
-
-Register Assembler::GetRn(Instr instr) {
- Register reg;
- reg.code_ = Instruction::RnValue(instr);
- return reg;
-}
-
-
-Register Assembler::GetRm(Instr instr) {
- Register reg;
- reg.code_ = Instruction::RmValue(instr);
- return reg;
-}
-
-
-bool Assembler::IsPush(Instr instr) {
- return ((instr & ~kRdMask) == kPushRegPattern);
-}
-
-
-bool Assembler::IsPop(Instr instr) {
- return ((instr & ~kRdMask) == kPopRegPattern);
-}
-
-
-bool Assembler::IsStrRegFpOffset(Instr instr) {
- return ((instr & kLdrStrInstrTypeMask) == kStrRegFpOffsetPattern);
-}
-
-
-bool Assembler::IsLdrRegFpOffset(Instr instr) {
- return ((instr & kLdrStrInstrTypeMask) == kLdrRegFpOffsetPattern);
-}
-
-
-bool Assembler::IsStrRegFpNegOffset(Instr instr) {
- return ((instr & kLdrStrInstrTypeMask) == kStrRegFpNegOffsetPattern);
-}
-
-
-bool Assembler::IsLdrRegFpNegOffset(Instr instr) {
- return ((instr & kLdrStrInstrTypeMask) == kLdrRegFpNegOffsetPattern);
-}
-
-
-bool Assembler::IsLdrPcImmediateOffset(Instr instr) {
- // Check the instruction is indeed a
- // ldr<cond> <Rd>, [pc +/- offset_12].
- return (instr & kLdrPCMask) == kLdrPCPattern;
-}
-
-
-bool Assembler::IsVldrDPcImmediateOffset(Instr instr) {
- // Check the instruction is indeed a
- // vldr<cond> <Dd>, [pc +/- offset_10].
- return (instr & kVldrDPCMask) == kVldrDPCPattern;
-}
-
-
-bool Assembler::IsTstImmediate(Instr instr) {
- return (instr & (B27 | B26 | I | kOpCodeMask | S | kRdMask)) ==
- (I | TST | S);
-}
-
-
-bool Assembler::IsCmpRegister(Instr instr) {
- return (instr & (B27 | B26 | I | kOpCodeMask | S | kRdMask | B4)) ==
- (CMP | S);
-}
-
-
-bool Assembler::IsCmpImmediate(Instr instr) {
- return (instr & (B27 | B26 | I | kOpCodeMask | S | kRdMask)) ==
- (I | CMP | S);
-}
-
-
-Register Assembler::GetCmpImmediateRegister(Instr instr) {
- ASSERT(IsCmpImmediate(instr));
- return GetRn(instr);
-}
-
-
-int Assembler::GetCmpImmediateRawImmediate(Instr instr) {
- ASSERT(IsCmpImmediate(instr));
- return instr & kOff12Mask;
-}
-
-// Labels refer to positions in the (to be) generated code.
-// There are bound, linked, and unused labels.
-//
-// Bound labels refer to known positions in the already
-// generated code. pos() is the position the label refers to.
-//
-// Linked labels refer to unknown positions in the code
-// to be generated; pos() is the position of the last
-// instruction using the label.
-
-
-// The link chain is terminated by a negative code position (must be aligned)
-const int kEndOfChain = -4;
-
-
-int Assembler::target_at(int pos) {
- Instr instr = instr_at(pos);
- if ((instr & ~kImm24Mask) == 0) {
- // Emitted label constant, not part of a branch.
- return instr - (Code::kHeaderSize - kHeapObjectTag);
- }
- ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx imm24
- int imm26 = ((instr & kImm24Mask) << 8) >> 6;
- if ((Instruction::ConditionField(instr) == kSpecialCondition) &&
- ((instr & B24) != 0)) {
- // blx uses bit 24 to encode bit 2 of imm26
- imm26 += 2;
- }
- return pos + kPcLoadDelta + imm26;
-}
-
-
-void Assembler::target_at_put(int pos, int target_pos) {
- Instr instr = instr_at(pos);
- if ((instr & ~kImm24Mask) == 0) {
- ASSERT(target_pos == kEndOfChain || target_pos >= 0);
- // Emitted label constant, not part of a branch.
- // Make label relative to Code* of generated Code object.
- instr_at_put(pos, target_pos + (Code::kHeaderSize - kHeapObjectTag));
- return;
- }
- int imm26 = target_pos - (pos + kPcLoadDelta);
- ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx imm24
- if (Instruction::ConditionField(instr) == kSpecialCondition) {
- // blx uses bit 24 to encode bit 2 of imm26
- ASSERT((imm26 & 1) == 0);
- instr = (instr & ~(B24 | kImm24Mask)) | ((imm26 & 2) >> 1)*B24;
- } else {
- ASSERT((imm26 & 3) == 0);
- instr &= ~kImm24Mask;
- }
- int imm24 = imm26 >> 2;
- ASSERT(is_int24(imm24));
- instr_at_put(pos, instr | (imm24 & kImm24Mask));
-}
-
-
-void Assembler::print(Label* L) {
- if (L->is_unused()) {
- PrintF("unused label\n");
- } else if (L->is_bound()) {
- PrintF("bound label to %d\n", L->pos());
- } else if (L->is_linked()) {
- Label l = *L;
- PrintF("unbound label");
- while (l.is_linked()) {
- PrintF("@ %d ", l.pos());
- Instr instr = instr_at(l.pos());
- if ((instr & ~kImm24Mask) == 0) {
- PrintF("value\n");
- } else {
- ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx
- Condition cond = Instruction::ConditionField(instr);
- const char* b;
- const char* c;
- if (cond == kSpecialCondition) {
- b = "blx";
- c = "";
- } else {
- if ((instr & B24) != 0)
- b = "bl";
- else
- b = "b";
-
- switch (cond) {
- case eq: c = "eq"; break;
- case ne: c = "ne"; break;
- case hs: c = "hs"; break;
- case lo: c = "lo"; break;
- case mi: c = "mi"; break;
- case pl: c = "pl"; break;
- case vs: c = "vs"; break;
- case vc: c = "vc"; break;
- case hi: c = "hi"; break;
- case ls: c = "ls"; break;
- case ge: c = "ge"; break;
- case lt: c = "lt"; break;
- case gt: c = "gt"; break;
- case le: c = "le"; break;
- case al: c = ""; break;
- default:
- c = "";
- UNREACHABLE();
- }
- }
- PrintF("%s%s\n", b, c);
- }
- next(&l);
- }
- } else {
- PrintF("label in inconsistent state (pos = %d)\n", L->pos_);
- }
-}
-
-
-void Assembler::bind_to(Label* L, int pos) {
- ASSERT(0 <= pos && pos <= pc_offset()); // must have a valid binding position
- while (L->is_linked()) {
- int fixup_pos = L->pos();
- next(L); // call next before overwriting link with target at fixup_pos
- target_at_put(fixup_pos, pos);
- }
- L->bind_to(pos);
-
- // Keep track of the last bound label so we don't eliminate any instructions
- // before a bound label.
- if (pos > last_bound_pos_)
- last_bound_pos_ = pos;
-}
-
-
-void Assembler::link_to(Label* L, Label* appendix) {
- if (appendix->is_linked()) {
- if (L->is_linked()) {
- // Append appendix to L's list.
- int fixup_pos;
- int link = L->pos();
- do {
- fixup_pos = link;
- link = target_at(fixup_pos);
- } while (link > 0);
- ASSERT(link == kEndOfChain);
- target_at_put(fixup_pos, appendix->pos());
- } else {
- // L is empty, simply use appendix.
- *L = *appendix;
- }
- }
- appendix->Unuse(); // appendix should not be used anymore
-}
-
-
-void Assembler::bind(Label* L) {
- ASSERT(!L->is_bound()); // label can only be bound once
- bind_to(L, pc_offset());
-}
-
-
-void Assembler::next(Label* L) {
- ASSERT(L->is_linked());
- int link = target_at(L->pos());
- if (link == kEndOfChain) {
- L->Unuse();
- } else {
- ASSERT(link >= 0);
- L->link_to(link);
- }
-}
-
-
-// Low-level code emission routines depending on the addressing mode.
-// If this returns true then you have to use the rotate_imm and immed_8
-// that it returns, because it may have already changed the instruction
-// to match them!
-static bool fits_shifter(uint32_t imm32,
- uint32_t* rotate_imm,
- uint32_t* immed_8,
- Instr* instr) {
- // imm32 must be unsigned.
- for (int rot = 0; rot < 16; rot++) {
- uint32_t imm8 = (imm32 << 2*rot) | (imm32 >> (32 - 2*rot));
- if ((imm8 <= 0xff)) {
- *rotate_imm = rot;
- *immed_8 = imm8;
- return true;
- }
- }
- // If the opcode is one with a complementary version and the complementary
- // immediate fits, change the opcode.
- if (instr != NULL) {
- if ((*instr & kMovMvnMask) == kMovMvnPattern) {
- if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) {
- *instr ^= kMovMvnFlip;
- return true;
- } else if ((*instr & kMovLeaveCCMask) == kMovLeaveCCPattern) {
- if (CpuFeatures::IsSupported(ARMv7)) {
- if (imm32 < 0x10000) {
- *instr ^= kMovwLeaveCCFlip;
- *instr |= EncodeMovwImmediate(imm32);
- *rotate_imm = *immed_8 = 0; // Not used for movw.
- return true;
- }
- }
- }
- } else if ((*instr & kCmpCmnMask) == kCmpCmnPattern) {
- if (fits_shifter(-static_cast<int>(imm32), rotate_imm, immed_8, NULL)) {
- *instr ^= kCmpCmnFlip;
- return true;
- }
- } else {
- Instr alu_insn = (*instr & kALUMask);
- if (alu_insn == ADD ||
- alu_insn == SUB) {
- if (fits_shifter(-static_cast<int>(imm32), rotate_imm, immed_8, NULL)) {
- *instr ^= kAddSubFlip;
- return true;
- }
- } else if (alu_insn == AND ||
- alu_insn == BIC) {
- if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) {
- *instr ^= kAndBicFlip;
- return true;
- }
- }
- }
- }
- return false;
-}
-
-
-// We have to use the temporary register for things that can be relocated even
-// if they can be encoded in the ARM's 12 bits of immediate-offset instruction
-// space. There is no guarantee that the relocated location can be similarly
-// encoded.
-bool Operand::must_output_reloc_info(const Assembler* assembler) const {
- if (rmode_ == RelocInfo::EXTERNAL_REFERENCE) {
-#ifdef DEBUG
- if (!Serializer::enabled()) {
- Serializer::TooLateToEnableNow();
- }
-#endif // def DEBUG
- if (assembler != NULL && assembler->predictable_code_size()) return true;
- return Serializer::enabled();
- } else if (RelocInfo::IsNone(rmode_)) {
- return false;
- }
- return true;
-}
-
-
-static bool use_movw_movt(const Operand& x, const Assembler* assembler) {
- if (Assembler::use_immediate_embedded_pointer_loads(assembler)) {
- return true;
- }
- if (x.must_output_reloc_info(assembler)) {
- return false;
- }
- return CpuFeatures::IsSupported(ARMv7);
-}
-
-
-bool Operand::is_single_instruction(const Assembler* assembler,
- Instr instr) const {
- if (rm_.is_valid()) return true;
- uint32_t dummy1, dummy2;
- if (must_output_reloc_info(assembler) ||
- !fits_shifter(imm32_, &dummy1, &dummy2, &instr)) {
- // The immediate operand cannot be encoded as a shifter operand, or use of
- // constant pool is required. For a mov instruction not setting the
- // condition code additional instruction conventions can be used.
- if ((instr & ~kCondMask) == 13*B21) { // mov, S not set
- return !use_movw_movt(*this, assembler);
- } else {
- // If this is not a mov or mvn instruction there will always an additional
- // instructions - either mov or ldr. The mov might actually be two
- // instructions mov or movw followed by movt so including the actual
- // instruction two or three instructions will be generated.
- return false;
- }
- } else {
- // No use of constant pool and the immediate operand can be encoded as a
- // shifter operand.
- return true;
- }
-}
-
-
-void Assembler::move_32_bit_immediate(Condition cond,
- Register rd,
- SBit s,
- const Operand& x) {
- if (rd.code() != pc.code() && s == LeaveCC) {
- if (use_movw_movt(x, this)) {
- if (x.must_output_reloc_info(this)) {
- RecordRelocInfo(x.rmode_, x.imm32_, DONT_USE_CONSTANT_POOL);
- // Make sure the movw/movt doesn't get separated.
- BlockConstPoolFor(2);
- }
- emit(cond | 0x30*B20 | rd.code()*B12 |
- EncodeMovwImmediate(x.imm32_ & 0xffff));
- movt(rd, static_cast<uint32_t>(x.imm32_) >> 16, cond);
- return;
- }
- }
-
- RecordRelocInfo(x.rmode_, x.imm32_, USE_CONSTANT_POOL);
- ldr(rd, MemOperand(pc, 0), cond);
-}
-
-
-void Assembler::addrmod1(Instr instr,
- Register rn,
- Register rd,
- const Operand& x) {
- CheckBuffer();
- ASSERT((instr & ~(kCondMask | kOpCodeMask | S)) == 0);
- if (!x.rm_.is_valid()) {
- // Immediate.
- uint32_t rotate_imm;
- uint32_t immed_8;
- if (x.must_output_reloc_info(this) ||
- !fits_shifter(x.imm32_, &rotate_imm, &immed_8, &instr)) {
- // The immediate operand cannot be encoded as a shifter operand, so load
- // it first to register ip and change the original instruction to use ip.
- // However, if the original instruction is a 'mov rd, x' (not setting the
- // condition code), then replace it with a 'ldr rd, [pc]'.
- CHECK(!rn.is(ip)); // rn should never be ip, or will be trashed
- Condition cond = Instruction::ConditionField(instr);
- if ((instr & ~kCondMask) == 13*B21) { // mov, S not set
- move_32_bit_immediate(cond, rd, LeaveCC, x);
- } else {
- if ((instr & kMovMvnMask) == kMovMvnPattern) {
- // Moves need to use a constant pool entry.
- RecordRelocInfo(x.rmode_, x.imm32_, USE_CONSTANT_POOL);
- ldr(ip, MemOperand(pc, 0), cond);
- } else if (x.must_output_reloc_info(this)) {
- // Otherwise, use most efficient form of fetching from constant pool.
- move_32_bit_immediate(cond, ip, LeaveCC, x);
- } else {
- // If this is not a mov or mvn instruction we may still be able to
- // avoid a constant pool entry by using mvn or movw.
- mov(ip, x, LeaveCC, cond);
- }
- addrmod1(instr, rn, rd, Operand(ip));
- }
- return;
- }
- instr |= I | rotate_imm*B8 | immed_8;
- } else if (!x.rs_.is_valid()) {
- // Immediate shift.
- instr |= x.shift_imm_*B7 | x.shift_op_ | x.rm_.code();
- } else {
- // Register shift.
- ASSERT(!rn.is(pc) && !rd.is(pc) && !x.rm_.is(pc) && !x.rs_.is(pc));
- instr |= x.rs_.code()*B8 | x.shift_op_ | B4 | x.rm_.code();
- }
- emit(instr | rn.code()*B16 | rd.code()*B12);
- if (rn.is(pc) || x.rm_.is(pc)) {
- // Block constant pool emission for one instruction after reading pc.
- BlockConstPoolFor(1);
- }
-}
-
-
-void Assembler::addrmod2(Instr instr, Register rd, const MemOperand& x) {
- ASSERT((instr & ~(kCondMask | B | L)) == B26);
- int am = x.am_;
- if (!x.rm_.is_valid()) {
- // Immediate offset.
- int offset_12 = x.offset_;
- if (offset_12 < 0) {
- offset_12 = -offset_12;
- am ^= U;
- }
- if (!is_uint12(offset_12)) {
- // Immediate offset cannot be encoded, load it first to register ip
- // rn (and rd in a load) should never be ip, or will be trashed.
- ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
- mov(ip, Operand(x.offset_), LeaveCC, Instruction::ConditionField(instr));
- addrmod2(instr, rd, MemOperand(x.rn_, ip, x.am_));
- return;
- }
- ASSERT(offset_12 >= 0); // no masking needed
- instr |= offset_12;
- } else {
- // Register offset (shift_imm_ and shift_op_ are 0) or scaled
- // register offset the constructors make sure than both shift_imm_
- // and shift_op_ are initialized.
- ASSERT(!x.rm_.is(pc));
- instr |= B25 | x.shift_imm_*B7 | x.shift_op_ | x.rm_.code();
- }
- ASSERT((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback
- emit(instr | am | x.rn_.code()*B16 | rd.code()*B12);
-}
-
-
-void Assembler::addrmod3(Instr instr, Register rd, const MemOperand& x) {
- ASSERT((instr & ~(kCondMask | L | S6 | H)) == (B4 | B7));
- ASSERT(x.rn_.is_valid());
- int am = x.am_;
- if (!x.rm_.is_valid()) {
- // Immediate offset.
- int offset_8 = x.offset_;
- if (offset_8 < 0) {
- offset_8 = -offset_8;
- am ^= U;
- }
- if (!is_uint8(offset_8)) {
- // Immediate offset cannot be encoded, load it first to register ip
- // rn (and rd in a load) should never be ip, or will be trashed.
- ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
- mov(ip, Operand(x.offset_), LeaveCC, Instruction::ConditionField(instr));
- addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_));
- return;
- }
- ASSERT(offset_8 >= 0); // no masking needed
- instr |= B | (offset_8 >> 4)*B8 | (offset_8 & 0xf);
- } else if (x.shift_imm_ != 0) {
- // Scaled register offset not supported, load index first
- // rn (and rd in a load) should never be ip, or will be trashed.
- ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
- mov(ip, Operand(x.rm_, x.shift_op_, x.shift_imm_), LeaveCC,
- Instruction::ConditionField(instr));
- addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_));
- return;
- } else {
- // Register offset.
- ASSERT((am & (P|W)) == P || !x.rm_.is(pc)); // no pc index with writeback
- instr |= x.rm_.code();
- }
- ASSERT((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback
- emit(instr | am | x.rn_.code()*B16 | rd.code()*B12);
-}
-
-
-void Assembler::addrmod4(Instr instr, Register rn, RegList rl) {
- ASSERT((instr & ~(kCondMask | P | U | W | L)) == B27);
- ASSERT(rl != 0);
- ASSERT(!rn.is(pc));
- emit(instr | rn.code()*B16 | rl);
-}
-
-
-void Assembler::addrmod5(Instr instr, CRegister crd, const MemOperand& x) {
- // Unindexed addressing is not encoded by this function.
- ASSERT_EQ((B27 | B26),
- (instr & ~(kCondMask | kCoprocessorMask | P | U | N | W | L)));
- ASSERT(x.rn_.is_valid() && !x.rm_.is_valid());
- int am = x.am_;
- int offset_8 = x.offset_;
- ASSERT((offset_8 & 3) == 0); // offset must be an aligned word offset
- offset_8 >>= 2;
- if (offset_8 < 0) {
- offset_8 = -offset_8;
- am ^= U;
- }
- ASSERT(is_uint8(offset_8)); // unsigned word offset must fit in a byte
- ASSERT((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback
-
- // Post-indexed addressing requires W == 1; different than in addrmod2/3.
- if ((am & P) == 0)
- am |= W;
-
- ASSERT(offset_8 >= 0); // no masking needed
- emit(instr | am | x.rn_.code()*B16 | crd.code()*B12 | offset_8);
-}
-
-
-int Assembler::branch_offset(Label* L, bool jump_elimination_allowed) {
- int target_pos;
- if (L->is_bound()) {
- target_pos = L->pos();
- } else {
- if (L->is_linked()) {
- target_pos = L->pos(); // L's link
- } else {
- target_pos = kEndOfChain;
- }
- L->link_to(pc_offset());
- }
-
- // Block the emission of the constant pool, since the branch instruction must
- // be emitted at the pc offset recorded by the label.
- BlockConstPoolFor(1);
- return target_pos - (pc_offset() + kPcLoadDelta);
-}
-
-
-void Assembler::label_at_put(Label* L, int at_offset) {
- int target_pos;
- if (L->is_bound()) {
- target_pos = L->pos();
- } else {
- if (L->is_linked()) {
- target_pos = L->pos(); // L's link
- } else {
- target_pos = kEndOfChain;
- }
- L->link_to(at_offset);
- instr_at_put(at_offset, target_pos + (Code::kHeaderSize - kHeapObjectTag));
- }
-}
-
-
-// Branch instructions.
-void Assembler::b(int branch_offset, Condition cond) {
- ASSERT((branch_offset & 3) == 0);
- int imm24 = branch_offset >> 2;
- ASSERT(is_int24(imm24));
- emit(cond | B27 | B25 | (imm24 & kImm24Mask));
-
- if (cond == al) {
- // Dead code is a good location to emit the constant pool.
- CheckConstPool(false, false);
- }
-}
-
-
-void Assembler::bl(int branch_offset, Condition cond) {
- positions_recorder()->WriteRecordedPositions();
- ASSERT((branch_offset & 3) == 0);
- int imm24 = branch_offset >> 2;
- ASSERT(is_int24(imm24));
- emit(cond | B27 | B25 | B24 | (imm24 & kImm24Mask));
-}
-
-
-void Assembler::blx(int branch_offset) { // v5 and above
- positions_recorder()->WriteRecordedPositions();
- ASSERT((branch_offset & 1) == 0);
- int h = ((branch_offset & 2) >> 1)*B24;
- int imm24 = branch_offset >> 2;
- ASSERT(is_int24(imm24));
- emit(kSpecialCondition | B27 | B25 | h | (imm24 & kImm24Mask));
-}
-
-
-void Assembler::blx(Register target, Condition cond) { // v5 and above
- positions_recorder()->WriteRecordedPositions();
- ASSERT(!target.is(pc));
- emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | BLX | target.code());
-}
-
-
-void Assembler::bx(Register target, Condition cond) { // v5 and above, plus v4t
- positions_recorder()->WriteRecordedPositions();
- ASSERT(!target.is(pc)); // use of pc is actually allowed, but discouraged
- emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | BX | target.code());
-}
-
-
-// Data-processing instructions.
-
-void Assembler::and_(Register dst, Register src1, const Operand& src2,
- SBit s, Condition cond) {
- addrmod1(cond | AND | s, src1, dst, src2);
-}
-
-
-void Assembler::eor(Register dst, Register src1, const Operand& src2,
- SBit s, Condition cond) {
- addrmod1(cond | EOR | s, src1, dst, src2);
-}
-
-
-void Assembler::sub(Register dst, Register src1, const Operand& src2,
- SBit s, Condition cond) {
- addrmod1(cond | SUB | s, src1, dst, src2);
-}
-
-
-void Assembler::rsb(Register dst, Register src1, const Operand& src2,
- SBit s, Condition cond) {
- addrmod1(cond | RSB | s, src1, dst, src2);
-}
-
-
-void Assembler::add(Register dst, Register src1, const Operand& src2,
- SBit s, Condition cond) {
- addrmod1(cond | ADD | s, src1, dst, src2);
-}
-
-
-void Assembler::adc(Register dst, Register src1, const Operand& src2,
- SBit s, Condition cond) {
- addrmod1(cond | ADC | s, src1, dst, src2);
-}
-
-
-void Assembler::sbc(Register dst, Register src1, const Operand& src2,
- SBit s, Condition cond) {
- addrmod1(cond | SBC | s, src1, dst, src2);
-}
-
-
-void Assembler::rsc(Register dst, Register src1, const Operand& src2,
- SBit s, Condition cond) {
- addrmod1(cond | RSC | s, src1, dst, src2);
-}
-
-
-void Assembler::tst(Register src1, const Operand& src2, Condition cond) {
- addrmod1(cond | TST | S, src1, r0, src2);
-}
-
-
-void Assembler::teq(Register src1, const Operand& src2, Condition cond) {
- addrmod1(cond | TEQ | S, src1, r0, src2);
-}
-
-
-void Assembler::cmp(Register src1, const Operand& src2, Condition cond) {
- addrmod1(cond | CMP | S, src1, r0, src2);
-}
-
-
-void Assembler::cmp_raw_immediate(
- Register src, int raw_immediate, Condition cond) {
- ASSERT(is_uint12(raw_immediate));
- emit(cond | I | CMP | S | src.code() << 16 | raw_immediate);
-}
-
-
-void Assembler::cmn(Register src1, const Operand& src2, Condition cond) {
- addrmod1(cond | CMN | S, src1, r0, src2);
-}
-
-
-void Assembler::orr(Register dst, Register src1, const Operand& src2,
- SBit s, Condition cond) {
- addrmod1(cond | ORR | s, src1, dst, src2);
-}
-
-
-void Assembler::mov(Register dst, const Operand& src, SBit s, Condition cond) {
- if (dst.is(pc)) {
- positions_recorder()->WriteRecordedPositions();
- }
- // Don't allow nop instructions in the form mov rn, rn to be generated using
- // the mov instruction. They must be generated using nop(int/NopMarkerTypes)
- // or MarkCode(int/NopMarkerTypes) pseudo instructions.
- ASSERT(!(src.is_reg() && src.rm().is(dst) && s == LeaveCC && cond == al));
- addrmod1(cond | MOV | s, r0, dst, src);
-}
-
-
-void Assembler::movw(Register reg, uint32_t immediate, Condition cond) {
- ASSERT(immediate < 0x10000);
- // May use movw if supported, but on unsupported platforms will try to use
- // equivalent rotated immed_8 value and other tricks before falling back to a
- // constant pool load.
- mov(reg, Operand(immediate), LeaveCC, cond);
-}
-
-
-void Assembler::movt(Register reg, uint32_t immediate, Condition cond) {
- emit(cond | 0x34*B20 | reg.code()*B12 | EncodeMovwImmediate(immediate));
-}
-
-
-void Assembler::bic(Register dst, Register src1, const Operand& src2,
- SBit s, Condition cond) {
- addrmod1(cond | BIC | s, src1, dst, src2);
-}
-
-
-void Assembler::mvn(Register dst, const Operand& src, SBit s, Condition cond) {
- addrmod1(cond | MVN | s, r0, dst, src);
-}
-
-
-// Multiply instructions.
-void Assembler::mla(Register dst, Register src1, Register src2, Register srcA,
- SBit s, Condition cond) {
- ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc) && !srcA.is(pc));
- emit(cond | A | s | dst.code()*B16 | srcA.code()*B12 |
- src2.code()*B8 | B7 | B4 | src1.code());
-}
-
-
-void Assembler::mls(Register dst, Register src1, Register src2, Register srcA,
- Condition cond) {
- ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc) && !srcA.is(pc));
- emit(cond | B22 | B21 | dst.code()*B16 | srcA.code()*B12 |
- src2.code()*B8 | B7 | B4 | src1.code());
-}
-
-
-void Assembler::sdiv(Register dst, Register src1, Register src2,
- Condition cond) {
- ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc));
- emit(cond | B26 | B25| B24 | B20 | dst.code()*B16 | 0xf * B12 |
- src2.code()*B8 | B4 | src1.code());
-}
-
-
-void Assembler::mul(Register dst, Register src1, Register src2,
- SBit s, Condition cond) {
- ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc));
- // dst goes in bits 16-19 for this instruction!
- emit(cond | s | dst.code()*B16 | src2.code()*B8 | B7 | B4 | src1.code());
-}
-
-
-void Assembler::smlal(Register dstL,
- Register dstH,
- Register src1,
- Register src2,
- SBit s,
- Condition cond) {
- ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
- ASSERT(!dstL.is(dstH));
- emit(cond | B23 | B22 | A | s | dstH.code()*B16 | dstL.code()*B12 |
- src2.code()*B8 | B7 | B4 | src1.code());
-}
-
-
-void Assembler::smull(Register dstL,
- Register dstH,
- Register src1,
- Register src2,
- SBit s,
- Condition cond) {
- ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
- ASSERT(!dstL.is(dstH));
- emit(cond | B23 | B22 | s | dstH.code()*B16 | dstL.code()*B12 |
- src2.code()*B8 | B7 | B4 | src1.code());
-}
-
-
-void Assembler::umlal(Register dstL,
- Register dstH,
- Register src1,
- Register src2,
- SBit s,
- Condition cond) {
- ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
- ASSERT(!dstL.is(dstH));
- emit(cond | B23 | A | s | dstH.code()*B16 | dstL.code()*B12 |
- src2.code()*B8 | B7 | B4 | src1.code());
-}
-
-
-void Assembler::umull(Register dstL,
- Register dstH,
- Register src1,
- Register src2,
- SBit s,
- Condition cond) {
- ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
- ASSERT(!dstL.is(dstH));
- emit(cond | B23 | s | dstH.code()*B16 | dstL.code()*B12 |
- src2.code()*B8 | B7 | B4 | src1.code());
-}
-
-
-// Miscellaneous arithmetic instructions.
-void Assembler::clz(Register dst, Register src, Condition cond) {
- // v5 and above.
- ASSERT(!dst.is(pc) && !src.is(pc));
- emit(cond | B24 | B22 | B21 | 15*B16 | dst.code()*B12 |
- 15*B8 | CLZ | src.code());
-}
-
-
-// Saturating instructions.
-
-// Unsigned saturate.
-void Assembler::usat(Register dst,
- int satpos,
- const Operand& src,
- Condition cond) {
- // v6 and above.
- ASSERT(CpuFeatures::IsSupported(ARMv7));
- ASSERT(!dst.is(pc) && !src.rm_.is(pc));
- ASSERT((satpos >= 0) && (satpos <= 31));
- ASSERT((src.shift_op_ == ASR) || (src.shift_op_ == LSL));
- ASSERT(src.rs_.is(no_reg));
-
- int sh = 0;
- if (src.shift_op_ == ASR) {
- sh = 1;
- }
-
- emit(cond | 0x6*B24 | 0xe*B20 | satpos*B16 | dst.code()*B12 |
- src.shift_imm_*B7 | sh*B6 | 0x1*B4 | src.rm_.code());
-}
-
-
-// Bitfield manipulation instructions.
-
-// Unsigned bit field extract.
-// Extracts #width adjacent bits from position #lsb in a register, and
-// writes them to the low bits of a destination register.
-// ubfx dst, src, #lsb, #width
-void Assembler::ubfx(Register dst,
- Register src,
- int lsb,
- int width,
- Condition cond) {
- // v7 and above.
- ASSERT(CpuFeatures::IsSupported(ARMv7));
- ASSERT(!dst.is(pc) && !src.is(pc));
- ASSERT((lsb >= 0) && (lsb <= 31));
- ASSERT((width >= 1) && (width <= (32 - lsb)));
- emit(cond | 0xf*B23 | B22 | B21 | (width - 1)*B16 | dst.code()*B12 |
- lsb*B7 | B6 | B4 | src.code());
-}
-
-
-// Signed bit field extract.
-// Extracts #width adjacent bits from position #lsb in a register, and
-// writes them to the low bits of a destination register. The extracted
-// value is sign extended to fill the destination register.
-// sbfx dst, src, #lsb, #width
-void Assembler::sbfx(Register dst,
- Register src,
- int lsb,
- int width,
- Condition cond) {
- // v7 and above.
- ASSERT(CpuFeatures::IsSupported(ARMv7));
- ASSERT(!dst.is(pc) && !src.is(pc));
- ASSERT((lsb >= 0) && (lsb <= 31));
- ASSERT((width >= 1) && (width <= (32 - lsb)));
- emit(cond | 0xf*B23 | B21 | (width - 1)*B16 | dst.code()*B12 |
- lsb*B7 | B6 | B4 | src.code());
-}
-
-
-// Bit field clear.
-// Sets #width adjacent bits at position #lsb in the destination register
-// to zero, preserving the value of the other bits.
-// bfc dst, #lsb, #width
-void Assembler::bfc(Register dst, int lsb, int width, Condition cond) {
- // v7 and above.
- ASSERT(CpuFeatures::IsSupported(ARMv7));
- ASSERT(!dst.is(pc));
- ASSERT((lsb >= 0) && (lsb <= 31));
- ASSERT((width >= 1) && (width <= (32 - lsb)));
- int msb = lsb + width - 1;
- emit(cond | 0x1f*B22 | msb*B16 | dst.code()*B12 | lsb*B7 | B4 | 0xf);
-}
-
-
-// Bit field insert.
-// Inserts #width adjacent bits from the low bits of the source register
-// into position #lsb of the destination register.
-// bfi dst, src, #lsb, #width
-void Assembler::bfi(Register dst,
- Register src,
- int lsb,
- int width,
- Condition cond) {
- // v7 and above.
- ASSERT(CpuFeatures::IsSupported(ARMv7));
- ASSERT(!dst.is(pc) && !src.is(pc));
- ASSERT((lsb >= 0) && (lsb <= 31));
- ASSERT((width >= 1) && (width <= (32 - lsb)));
- int msb = lsb + width - 1;
- emit(cond | 0x1f*B22 | msb*B16 | dst.code()*B12 | lsb*B7 | B4 |
- src.code());
-}
-
-
-// Status register access instructions.
-void Assembler::mrs(Register dst, SRegister s, Condition cond) {
- ASSERT(!dst.is(pc));
- emit(cond | B24 | s | 15*B16 | dst.code()*B12);
-}
-
-
-void Assembler::msr(SRegisterFieldMask fields, const Operand& src,
- Condition cond) {
- ASSERT(fields >= B16 && fields < B20); // at least one field set
- Instr instr;
- if (!src.rm_.is_valid()) {
- // Immediate.
- uint32_t rotate_imm;
- uint32_t immed_8;
- if (src.must_output_reloc_info(this) ||
- !fits_shifter(src.imm32_, &rotate_imm, &immed_8, NULL)) {
- // Immediate operand cannot be encoded, load it first to register ip.
- RecordRelocInfo(src.rmode_, src.imm32_);
- ldr(ip, MemOperand(pc, 0), cond);
- msr(fields, Operand(ip), cond);
- return;
- }
- instr = I | rotate_imm*B8 | immed_8;
- } else {
- ASSERT(!src.rs_.is_valid() && src.shift_imm_ == 0); // only rm allowed
- instr = src.rm_.code();
- }
- emit(cond | instr | B24 | B21 | fields | 15*B12);
-}
-
-
-// Load/Store instructions.
-void Assembler::ldr(Register dst, const MemOperand& src, Condition cond) {
- if (dst.is(pc)) {
- positions_recorder()->WriteRecordedPositions();
- }
- addrmod2(cond | B26 | L, dst, src);
-}
-
-
-void Assembler::str(Register src, const MemOperand& dst, Condition cond) {
- addrmod2(cond | B26, src, dst);
-}
-
-
-void Assembler::ldrb(Register dst, const MemOperand& src, Condition cond) {
- addrmod2(cond | B26 | B | L, dst, src);
-}
-
-
-void Assembler::strb(Register src, const MemOperand& dst, Condition cond) {
- addrmod2(cond | B26 | B, src, dst);
-}
-
-
-void Assembler::ldrh(Register dst, const MemOperand& src, Condition cond) {
- addrmod3(cond | L | B7 | H | B4, dst, src);
-}
-
-
-void Assembler::strh(Register src, const MemOperand& dst, Condition cond) {
- addrmod3(cond | B7 | H | B4, src, dst);
-}
-
-
-void Assembler::ldrsb(Register dst, const MemOperand& src, Condition cond) {
- addrmod3(cond | L | B7 | S6 | B4, dst, src);
-}
-
-
-void Assembler::ldrsh(Register dst, const MemOperand& src, Condition cond) {
- addrmod3(cond | L | B7 | S6 | H | B4, dst, src);
-}
-
-
-void Assembler::ldrd(Register dst1, Register dst2,
- const MemOperand& src, Condition cond) {
- ASSERT(CpuFeatures::IsEnabled(ARMv7));
- ASSERT(src.rm().is(no_reg));
- ASSERT(!dst1.is(lr)); // r14.
- ASSERT_EQ(0, dst1.code() % 2);
- ASSERT_EQ(dst1.code() + 1, dst2.code());
- addrmod3(cond | B7 | B6 | B4, dst1, src);
-}
-
-
-void Assembler::strd(Register src1, Register src2,
- const MemOperand& dst, Condition cond) {
- ASSERT(dst.rm().is(no_reg));
- ASSERT(!src1.is(lr)); // r14.
- ASSERT_EQ(0, src1.code() % 2);
- ASSERT_EQ(src1.code() + 1, src2.code());
- ASSERT(CpuFeatures::IsEnabled(ARMv7));
- addrmod3(cond | B7 | B6 | B5 | B4, src1, dst);
-}
-
-// Load/Store multiple instructions.
-void Assembler::ldm(BlockAddrMode am,
- Register base,
- RegList dst,
- Condition cond) {
- // ABI stack constraint: ldmxx base, {..sp..} base != sp is not restartable.
- ASSERT(base.is(sp) || (dst & sp.bit()) == 0);
-
- addrmod4(cond | B27 | am | L, base, dst);
-
- // Emit the constant pool after a function return implemented by ldm ..{..pc}.
- if (cond == al && (dst & pc.bit()) != 0) {
- // There is a slight chance that the ldm instruction was actually a call,
- // in which case it would be wrong to return into the constant pool; we
- // recognize this case by checking if the emission of the pool was blocked
- // at the pc of the ldm instruction by a mov lr, pc instruction; if this is
- // the case, we emit a jump over the pool.
- CheckConstPool(true, no_const_pool_before_ == pc_offset() - kInstrSize);
- }
-}
-
-
-void Assembler::stm(BlockAddrMode am,
- Register base,
- RegList src,
- Condition cond) {
- addrmod4(cond | B27 | am, base, src);
-}
-
-
-// Exception-generating instructions and debugging support.
-// Stops with a non-negative code less than kNumOfWatchedStops support
-// enabling/disabling and a counter feature. See simulator-arm.h .
-void Assembler::stop(const char* msg, Condition cond, int32_t code) {
-#ifndef __arm__
- ASSERT(code >= kDefaultStopCode);
- {
- // The Simulator will handle the stop instruction and get the message
- // address. It expects to find the address just after the svc instruction.
- BlockConstPoolScope block_const_pool(this);
- if (code >= 0) {
- svc(kStopCode + code, cond);
- } else {
- svc(kStopCode + kMaxStopCode, cond);
- }
- emit(reinterpret_cast<Instr>(msg));
- }
-#else // def __arm__
-#ifdef CAN_USE_ARMV5_INSTRUCTIONS
- if (cond != al) {
- Label skip;
- b(&skip, NegateCondition(cond));
- bkpt(0);
- bind(&skip);
- } else {
- bkpt(0);
- }
-#else // ndef CAN_USE_ARMV5_INSTRUCTIONS
- svc(0x9f0001, cond);
-#endif // ndef CAN_USE_ARMV5_INSTRUCTIONS
-#endif // def __arm__
-}
-
-
-void Assembler::bkpt(uint32_t imm16) { // v5 and above
- ASSERT(is_uint16(imm16));
- emit(al | B24 | B21 | (imm16 >> 4)*B8 | BKPT | (imm16 & 0xf));
-}
-
-
-void Assembler::svc(uint32_t imm24, Condition cond) {
- ASSERT(is_uint24(imm24));
- emit(cond | 15*B24 | imm24);
-}
-
-
-// Coprocessor instructions.
-void Assembler::cdp(Coprocessor coproc,
- int opcode_1,
- CRegister crd,
- CRegister crn,
- CRegister crm,
- int opcode_2,
- Condition cond) {
- ASSERT(is_uint4(opcode_1) && is_uint3(opcode_2));
- emit(cond | B27 | B26 | B25 | (opcode_1 & 15)*B20 | crn.code()*B16 |
- crd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | crm.code());
-}
-
-
-void Assembler::cdp2(Coprocessor coproc,
- int opcode_1,
- CRegister crd,
- CRegister crn,
- CRegister crm,
- int opcode_2) { // v5 and above
- cdp(coproc, opcode_1, crd, crn, crm, opcode_2, kSpecialCondition);
-}
-
-
-void Assembler::mcr(Coprocessor coproc,
- int opcode_1,
- Register rd,
- CRegister crn,
- CRegister crm,
- int opcode_2,
- Condition cond) {
- ASSERT(is_uint3(opcode_1) && is_uint3(opcode_2));
- emit(cond | B27 | B26 | B25 | (opcode_1 & 7)*B21 | crn.code()*B16 |
- rd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | B4 | crm.code());
-}
-
-
-void Assembler::mcr2(Coprocessor coproc,
- int opcode_1,
- Register rd,
- CRegister crn,
- CRegister crm,
- int opcode_2) { // v5 and above
- mcr(coproc, opcode_1, rd, crn, crm, opcode_2, kSpecialCondition);
-}
-
-
-void Assembler::mrc(Coprocessor coproc,
- int opcode_1,
- Register rd,
- CRegister crn,
- CRegister crm,
- int opcode_2,
- Condition cond) {
- ASSERT(is_uint3(opcode_1) && is_uint3(opcode_2));
- emit(cond | B27 | B26 | B25 | (opcode_1 & 7)*B21 | L | crn.code()*B16 |
- rd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | B4 | crm.code());
-}
-
-
-void Assembler::mrc2(Coprocessor coproc,
- int opcode_1,
- Register rd,
- CRegister crn,
- CRegister crm,
- int opcode_2) { // v5 and above
- mrc(coproc, opcode_1, rd, crn, crm, opcode_2, kSpecialCondition);
-}
-
-
-void Assembler::ldc(Coprocessor coproc,
- CRegister crd,
- const MemOperand& src,
- LFlag l,
- Condition cond) {
- addrmod5(cond | B27 | B26 | l | L | coproc*B8, crd, src);
-}
-
-
-void Assembler::ldc(Coprocessor coproc,
- CRegister crd,
- Register rn,
- int option,
- LFlag l,
- Condition cond) {
- // Unindexed addressing.
- ASSERT(is_uint8(option));
- emit(cond | B27 | B26 | U | l | L | rn.code()*B16 | crd.code()*B12 |
- coproc*B8 | (option & 255));
-}
-
-
-void Assembler::ldc2(Coprocessor coproc,
- CRegister crd,
- const MemOperand& src,
- LFlag l) { // v5 and above
- ldc(coproc, crd, src, l, kSpecialCondition);
-}
-
-
-void Assembler::ldc2(Coprocessor coproc,
- CRegister crd,
- Register rn,
- int option,
- LFlag l) { // v5 and above
- ldc(coproc, crd, rn, option, l, kSpecialCondition);
-}
-
-
-// Support for VFP.
-
-void Assembler::vldr(const DwVfpRegister dst,
- const Register base,
- int offset,
- const Condition cond) {
- // Ddst = MEM(Rbase + offset).
- // Instruction details available in ARM DDI 0406C.b, A8-924.
- // cond(31-28) | 1101(27-24)| U(23) | D(22) | 01(21-20) | Rbase(19-16) |
- // Vd(15-12) | 1011(11-8) | offset
- ASSERT(CpuFeatures::IsEnabled(VFP2));
- int u = 1;
- if (offset < 0) {
- offset = -offset;
- u = 0;
- }
- int vd, d;
- dst.split_code(&vd, &d);
-
- ASSERT(offset >= 0);
- if ((offset % 4) == 0 && (offset / 4) < 256) {
- emit(cond | 0xD*B24 | u*B23 | d*B22 | B20 | base.code()*B16 | vd*B12 |
- 0xB*B8 | ((offset / 4) & 255));
- } else {
- // Larger offsets must be handled by computing the correct address
- // in the ip register.
- ASSERT(!base.is(ip));
- if (u == 1) {
- add(ip, base, Operand(offset));
- } else {
- sub(ip, base, Operand(offset));
- }
- emit(cond | 0xD*B24 | d*B22 | B20 | ip.code()*B16 | vd*B12 | 0xB*B8);
- }
-}
-
-
-void Assembler::vldr(const DwVfpRegister dst,
- const MemOperand& operand,
- const Condition cond) {
- ASSERT(!operand.rm().is_valid());
- ASSERT(operand.am_ == Offset);
- vldr(dst, operand.rn(), operand.offset(), cond);
-}
-
-
-void Assembler::vldr(const SwVfpRegister dst,
- const Register base,
- int offset,
- const Condition cond) {
- // Sdst = MEM(Rbase + offset).
- // Instruction details available in ARM DDI 0406A, A8-628.
- // cond(31-28) | 1101(27-24)| U001(23-20) | Rbase(19-16) |
- // Vdst(15-12) | 1010(11-8) | offset
- ASSERT(CpuFeatures::IsEnabled(VFP2));
- int u = 1;
- if (offset < 0) {
- offset = -offset;
- u = 0;
- }
- int sd, d;
- dst.split_code(&sd, &d);
- ASSERT(offset >= 0);
-
- if ((offset % 4) == 0 && (offset / 4) < 256) {
- emit(cond | u*B23 | d*B22 | 0xD1*B20 | base.code()*B16 | sd*B12 |
- 0xA*B8 | ((offset / 4) & 255));
- } else {
- // Larger offsets must be handled by computing the correct address
- // in the ip register.
- ASSERT(!base.is(ip));
- if (u == 1) {
- add(ip, base, Operand(offset));
- } else {
- sub(ip, base, Operand(offset));
- }
- emit(cond | d*B22 | 0xD1*B20 | ip.code()*B16 | sd*B12 | 0xA*B8);
- }
-}
-
-
-void Assembler::vldr(const SwVfpRegister dst,
- const MemOperand& operand,
- const Condition cond) {
- ASSERT(!operand.rm().is_valid());
- ASSERT(operand.am_ == Offset);
- vldr(dst, operand.rn(), operand.offset(), cond);
-}
-
-
-void Assembler::vstr(const DwVfpRegister src,
- const Register base,
- int offset,
- const Condition cond) {
- // MEM(Rbase + offset) = Dsrc.
- // Instruction details available in ARM DDI 0406C.b, A8-1082.
- // cond(31-28) | 1101(27-24)| U(23) | D(22) | 00(21-20) | Rbase(19-16) |
- // Vd(15-12) | 1011(11-8) | (offset/4)
- ASSERT(CpuFeatures::IsEnabled(VFP2));
- int u = 1;
- if (offset < 0) {
- offset = -offset;
- u = 0;
- }
- ASSERT(offset >= 0);
- int vd, d;
- src.split_code(&vd, &d);
-
- if ((offset % 4) == 0 && (offset / 4) < 256) {
- emit(cond | 0xD*B24 | u*B23 | d*B22 | base.code()*B16 | vd*B12 | 0xB*B8 |
- ((offset / 4) & 255));
- } else {
- // Larger offsets must be handled by computing the correct address
- // in the ip register.
- ASSERT(!base.is(ip));
- if (u == 1) {
- add(ip, base, Operand(offset));
- } else {
- sub(ip, base, Operand(offset));
- }
- emit(cond | 0xD*B24 | d*B22 | ip.code()*B16 | vd*B12 | 0xB*B8);
- }
-}
-
-
-void Assembler::vstr(const DwVfpRegister src,
- const MemOperand& operand,
- const Condition cond) {
- ASSERT(!operand.rm().is_valid());
- ASSERT(operand.am_ == Offset);
- vstr(src, operand.rn(), operand.offset(), cond);
-}
-
-
-void Assembler::vstr(const SwVfpRegister src,
- const Register base,
- int offset,
- const Condition cond) {
- // MEM(Rbase + offset) = SSrc.
- // Instruction details available in ARM DDI 0406A, A8-786.
- // cond(31-28) | 1101(27-24)| U000(23-20) | Rbase(19-16) |
- // Vdst(15-12) | 1010(11-8) | (offset/4)
- ASSERT(CpuFeatures::IsEnabled(VFP2));
- int u = 1;
- if (offset < 0) {
- offset = -offset;
- u = 0;
- }
- int sd, d;
- src.split_code(&sd, &d);
- ASSERT(offset >= 0);
- if ((offset % 4) == 0 && (offset / 4) < 256) {
- emit(cond | u*B23 | d*B22 | 0xD0*B20 | base.code()*B16 | sd*B12 |
- 0xA*B8 | ((offset / 4) & 255));
- } else {
- // Larger offsets must be handled by computing the correct address
- // in the ip register.
- ASSERT(!base.is(ip));
- if (u == 1) {
- add(ip, base, Operand(offset));
- } else {
- sub(ip, base, Operand(offset));
- }
- emit(cond | d*B22 | 0xD0*B20 | ip.code()*B16 | sd*B12 | 0xA*B8);
- }
-}
-
-
-void Assembler::vstr(const SwVfpRegister src,
- const MemOperand& operand,
- const Condition cond) {
- ASSERT(!operand.rm().is_valid());
- ASSERT(operand.am_ == Offset);
- vstr(src, operand.rn(), operand.offset(), cond);
-}
-
-
-void Assembler::vldm(BlockAddrMode am,
- Register base,
- DwVfpRegister first,
- DwVfpRegister last,
- Condition cond) {
- // Instruction details available in ARM DDI 0406C.b, A8-922.
- // cond(31-28) | 110(27-25)| PUDW1(24-20) | Rbase(19-16) |
- // first(15-12) | 1011(11-8) | (count * 2)
- ASSERT(CpuFeatures::IsEnabled(VFP2));
- ASSERT_LE(first.code(), last.code());
- ASSERT(am == ia || am == ia_w || am == db_w);
- ASSERT(!base.is(pc));
-
- int sd, d;
- first.split_code(&sd, &d);
- int count = last.code() - first.code() + 1;
- ASSERT(count <= 16);
- emit(cond | B27 | B26 | am | d*B22 | B20 | base.code()*B16 | sd*B12 |
- 0xB*B8 | count*2);
-}
-
-
-void Assembler::vstm(BlockAddrMode am,
- Register base,
- DwVfpRegister first,
- DwVfpRegister last,
- Condition cond) {
- // Instruction details available in ARM DDI 0406C.b, A8-1080.
- // cond(31-28) | 110(27-25)| PUDW0(24-20) | Rbase(19-16) |
- // first(15-12) | 1011(11-8) | (count * 2)
- ASSERT(CpuFeatures::IsEnabled(VFP2));
- ASSERT_LE(first.code(), last.code());
- ASSERT(am == ia || am == ia_w || am == db_w);
- ASSERT(!base.is(pc));
-
- int sd, d;
- first.split_code(&sd, &d);
- int count = last.code() - first.code() + 1;
- ASSERT(count <= 16);
- emit(cond | B27 | B26 | am | d*B22 | base.code()*B16 | sd*B12 |
- 0xB*B8 | count*2);
-}
-
-void Assembler::vldm(BlockAddrMode am,
- Register base,
- SwVfpRegister first,
- SwVfpRegister last,
- Condition cond) {
- // Instruction details available in ARM DDI 0406A, A8-626.
- // cond(31-28) | 110(27-25)| PUDW1(24-20) | Rbase(19-16) |
- // first(15-12) | 1010(11-8) | (count/2)
- ASSERT(CpuFeatures::IsEnabled(VFP2));
- ASSERT_LE(first.code(), last.code());
- ASSERT(am == ia || am == ia_w || am == db_w);
- ASSERT(!base.is(pc));
-
- int sd, d;
- first.split_code(&sd, &d);
- int count = last.code() - first.code() + 1;
- emit(cond | B27 | B26 | am | d*B22 | B20 | base.code()*B16 | sd*B12 |
- 0xA*B8 | count);
-}
-
-
-void Assembler::vstm(BlockAddrMode am,
- Register base,
- SwVfpRegister first,
- SwVfpRegister last,
- Condition cond) {
- // Instruction details available in ARM DDI 0406A, A8-784.
- // cond(31-28) | 110(27-25)| PUDW0(24-20) | Rbase(19-16) |
- // first(15-12) | 1011(11-8) | (count/2)
- ASSERT(CpuFeatures::IsEnabled(VFP2));
- ASSERT_LE(first.code(), last.code());
- ASSERT(am == ia || am == ia_w || am == db_w);
- ASSERT(!base.is(pc));
-
- int sd, d;
- first.split_code(&sd, &d);
- int count = last.code() - first.code() + 1;
- emit(cond | B27 | B26 | am | d*B22 | base.code()*B16 | sd*B12 |
- 0xA*B8 | count);
-}
-
-static void DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) {
- uint64_t i;
- memcpy(&i, &d, 8);
-
- *lo = i & 0xffffffff;
- *hi = i >> 32;
-}
-
-// Only works for little endian floating point formats.
-// We don't support VFP on the mixed endian floating point platform.
-static bool FitsVMOVDoubleImmediate(double d, uint32_t *encoding) {
- ASSERT(CpuFeatures::IsSupported(VFP3));
-
- // VMOV can accept an immediate of the form:
- //
- // +/- m * 2^(-n) where 16 <= m <= 31 and 0 <= n <= 7
- //
- // The immediate is encoded using an 8-bit quantity, comprised of two
- // 4-bit fields. For an 8-bit immediate of the form:
- //
- // [abcdefgh]
- //
- // where a is the MSB and h is the LSB, an immediate 64-bit double can be
- // created of the form:
- //
- // [aBbbbbbb,bbcdefgh,00000000,00000000,
- // 00000000,00000000,00000000,00000000]
- //
- // where B = ~b.
- //
-
- uint32_t lo, hi;
- DoubleAsTwoUInt32(d, &lo, &hi);
-
- // The most obvious constraint is the long block of zeroes.
- if ((lo != 0) || ((hi & 0xffff) != 0)) {
- return false;
- }
-
- // Bits 62:55 must be all clear or all set.
- if (((hi & 0x3fc00000) != 0) && ((hi & 0x3fc00000) != 0x3fc00000)) {
- return false;
- }
-
- // Bit 63 must be NOT bit 62.
- if (((hi ^ (hi << 1)) & (0x40000000)) == 0) {
- return false;
- }
-
- // Create the encoded immediate in the form:
- // [00000000,0000abcd,00000000,0000efgh]
- *encoding = (hi >> 16) & 0xf; // Low nybble.
- *encoding |= (hi >> 4) & 0x70000; // Low three bits of the high nybble.
- *encoding |= (hi >> 12) & 0x80000; // Top bit of the high nybble.
-
- return true;
-}
-
-
-void Assembler::vmov(const DwVfpRegister dst,
- double imm,
- const Register scratch) {
- ASSERT(CpuFeatures::IsEnabled(VFP2));
-
- uint32_t enc;
- if (CpuFeatures::IsSupported(VFP3) && FitsVMOVDoubleImmediate(imm, &enc)) {
- // The double can be encoded in the instruction.
- //
- // Dd = immediate
- // Instruction details available in ARM DDI 0406C.b, A8-936.
- // cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | imm4H(19-16) |
- // Vd(15-12) | 101(11-9) | sz=1(8) | imm4L(3-0)
- int vd, d;
- dst.split_code(&vd, &d);
- emit(al | 0x1D*B23 | d*B22 | 0x3*B20 | vd*B12 | 0x5*B9 | B8 | enc);
- } else if (FLAG_enable_vldr_imm) {
- // TODO(jfb) Temporarily turned off until we have constant blinding or
- // some equivalent mitigation: an attacker can otherwise control
- // generated data which also happens to be executable, a Very Bad
- // Thing indeed.
- // Blinding gets tricky because we don't have xor, we probably
- // need to add/subtract without losing precision, which requires a
- // cookie value that Lithium is probably better positioned to
- // choose.
- // We could also add a few peepholes here like detecting 0.0 and
- // -0.0 and doing a vmov from the sequestered d14, forcing denorms
- // to zero (we set flush-to-zero), and normalizing NaN values.
- // We could also detect redundant values.
- // The code could also randomize the order of values, though
- // that's tricky because vldr has a limited reach. Furthermore
- // it breaks load locality.
- RecordRelocInfo(imm);
- vldr(dst, MemOperand(pc, 0));
- } else {
- // Synthesise the double from ARM immediates.
- uint32_t lo, hi;
- DoubleAsTwoUInt32(imm, &lo, &hi);
-
- if (scratch.is(no_reg)) {
- if (dst.code() < 16) {
- // Move the low part of the double into the lower of the corresponsing S
- // registers of D register dst.
- mov(ip, Operand(lo));
- vmov(dst.low(), ip);
-
- // Move the high part of the double into the higher of the
- // corresponsing S registers of D register dst.
- mov(ip, Operand(hi));
- vmov(dst.high(), ip);
- } else {
- // D16-D31 does not have S registers, so move the low and high parts
- // directly to the D register using vmov.32.
- // Note: This may be slower, so we only do this when we have to.
- mov(ip, Operand(lo));
- vmov(dst, VmovIndexLo, ip);
- mov(ip, Operand(hi));
- vmov(dst, VmovIndexHi, ip);
- }
- } else {
- // Move the low and high parts of the double to a D register in one
- // instruction.
- mov(ip, Operand(lo));
- mov(scratch, Operand(hi));
- vmov(dst, ip, scratch);
- }
- }
-}
-
-
-void Assembler::vmov(const SwVfpRegister dst,
- const SwVfpRegister src,
- const Condition cond) {
- // Sd = Sm
- // Instruction details available in ARM DDI 0406B, A8-642.
- ASSERT(CpuFeatures::IsEnabled(VFP2));
- int sd, d, sm, m;
- dst.split_code(&sd, &d);
- src.split_code(&sm, &m);
- emit(cond | 0xE*B24 | d*B22 | 0xB*B20 | sd*B12 | 0xA*B8 | B6 | m*B5 | sm);
-}
-
-
-void Assembler::vmov(const DwVfpRegister dst,
- const DwVfpRegister src,
- const Condition cond) {
- // Dd = Dm
- // Instruction details available in ARM DDI 0406C.b, A8-938.
- // cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | 0000(19-16) | Vd(15-12) |
- // 101(11-9) | sz=1(8) | 0(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
- ASSERT(CpuFeatures::IsEnabled(VFP2));
- int vd, d;
- dst.split_code(&vd, &d);
- int vm, m;
- src.split_code(&vm, &m);
- emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | vd*B12 | 0x5*B9 | B8 | B6 | m*B5 |
- vm);
-}
-
-
-void Assembler::vmov(const DwVfpRegister dst,
- const VmovIndex index,
- const Register src,
- const Condition cond) {
- // Dd[index] = Rt
- // Instruction details available in ARM DDI 0406C.b, A8-940.
- // cond(31-28) | 1110(27-24) | 0(23) | opc1=0index(22-21) | 0(20) |
- // Vd(19-16) | Rt(15-12) | 1011(11-8) | D(7) | opc2=00(6-5) | 1(4) | 0000(3-0)
- ASSERT(CpuFeatures::IsEnabled(VFP2));
- ASSERT(index.index == 0 || index.index == 1);
- int vd, d;
- dst.split_code(&vd, &d);
- emit(cond | 0xE*B24 | index.index*B21 | vd*B16 | src.code()*B12 | 0xB*B8 |
- d*B7 | B4);
-}
-
-
-void Assembler::vmov(const DwVfpRegister dst,
- const Register src1,
- const Register src2,
- const Condition cond) {
- // Dm = <Rt,Rt2>.
- // Instruction details available in ARM DDI 0406C.b, A8-948.
- // cond(31-28) | 1100(27-24)| 010(23-21) | op=0(20) | Rt2(19-16) |
- // Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm
- ASSERT(CpuFeatures::IsEnabled(VFP2));
- ASSERT(!src1.is(pc) && !src2.is(pc));
- int vm, m;
- dst.split_code(&vm, &m);
- emit(cond | 0xC*B24 | B22 | src2.code()*B16 |
- src1.code()*B12 | 0xB*B8 | m*B5 | B4 | vm);
-}
-
-
-void Assembler::vmov(const Register dst1,
- const Register dst2,
- const DwVfpRegister src,
- const Condition cond) {
- // <Rt,Rt2> = Dm.
- // Instruction details available in ARM DDI 0406C.b, A8-948.
- // cond(31-28) | 1100(27-24)| 010(23-21) | op=1(20) | Rt2(19-16) |
- // Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm
- ASSERT(CpuFeatures::IsEnabled(VFP2));
- ASSERT(!dst1.is(pc) && !dst2.is(pc));
- int vm, m;
- src.split_code(&vm, &m);
- emit(cond | 0xC*B24 | B22 | B20 | dst2.code()*B16 |
- dst1.code()*B12 | 0xB*B8 | m*B5 | B4 | vm);
-}
-
-
-void Assembler::vmov(const SwVfpRegister dst,
- const Register src,
- const Condition cond) {
- // Sn = Rt.
- // Instruction details available in ARM DDI 0406A, A8-642.
- // cond(31-28) | 1110(27-24)| 000(23-21) | op=0(20) | Vn(19-16) |
- // Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0)
- ASSERT(CpuFeatures::IsEnabled(VFP2));
- ASSERT(!src.is(pc));
- int sn, n;
- dst.split_code(&sn, &n);
- emit(cond | 0xE*B24 | sn*B16 | src.code()*B12 | 0xA*B8 | n*B7 | B4);
-}
-
-
-void Assembler::vmov(const Register dst,
- const SwVfpRegister src,
- const Condition cond) {
- // Rt = Sn.
- // Instruction details available in ARM DDI 0406A, A8-642.
- // cond(31-28) | 1110(27-24)| 000(23-21) | op=1(20) | Vn(19-16) |
- // Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0)
- ASSERT(CpuFeatures::IsEnabled(VFP2));
- ASSERT(!dst.is(pc));
- int sn, n;
- src.split_code(&sn, &n);
- emit(cond | 0xE*B24 | B20 | sn*B16 | dst.code()*B12 | 0xA*B8 | n*B7 | B4);
-}
-
-
-// Type of data to read from or write to VFP register.
-// Used as specifier in generic vcvt instruction.
-enum VFPType { S32, U32, F32, F64 };
-
-
-static bool IsSignedVFPType(VFPType type) {
- switch (type) {
- case S32:
- return true;
- case U32:
- return false;
- default:
- UNREACHABLE();
- return false;
- }
-}
-
-
-static bool IsIntegerVFPType(VFPType type) {
- switch (type) {
- case S32:
- case U32:
- return true;
- case F32:
- case F64:
- return false;
- default:
- UNREACHABLE();
- return false;
- }
-}
-
-
-static bool IsDoubleVFPType(VFPType type) {
- switch (type) {
- case F32:
- return false;
- case F64:
- return true;
- default:
- UNREACHABLE();
- return false;
- }
-}
-
-
-// Split five bit reg_code based on size of reg_type.
-// 32-bit register codes are Vm:M
-// 64-bit register codes are M:Vm
-// where Vm is four bits, and M is a single bit.
-static void SplitRegCode(VFPType reg_type,
- int reg_code,
- int* vm,
- int* m) {
- ASSERT((reg_code >= 0) && (reg_code <= 31));
- if (IsIntegerVFPType(reg_type) || !IsDoubleVFPType(reg_type)) {
- // 32 bit type.
- *m = reg_code & 0x1;
- *vm = reg_code >> 1;
- } else {
- // 64 bit type.
- *m = (reg_code & 0x10) >> 4;
- *vm = reg_code & 0x0F;
- }
-}
-
-
-// Encode vcvt.src_type.dst_type instruction.
-static Instr EncodeVCVT(const VFPType dst_type,
- const int dst_code,
- const VFPType src_type,
- const int src_code,
- VFPConversionMode mode,
- const Condition cond) {
- ASSERT(src_type != dst_type);
- int D, Vd, M, Vm;
- SplitRegCode(src_type, src_code, &Vm, &M);
- SplitRegCode(dst_type, dst_code, &Vd, &D);
-
- if (IsIntegerVFPType(dst_type) || IsIntegerVFPType(src_type)) {
- // Conversion between IEEE floating point and 32-bit integer.
- // Instruction details available in ARM DDI 0406B, A8.6.295.
- // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 1(19) | opc2(18-16) |
- // Vd(15-12) | 101(11-9) | sz(8) | op(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
- ASSERT(!IsIntegerVFPType(dst_type) || !IsIntegerVFPType(src_type));
-
- int sz, opc2, op;
-
- if (IsIntegerVFPType(dst_type)) {
- opc2 = IsSignedVFPType(dst_type) ? 0x5 : 0x4;
- sz = IsDoubleVFPType(src_type) ? 0x1 : 0x0;
- op = mode;
- } else {
- ASSERT(IsIntegerVFPType(src_type));
- opc2 = 0x0;
- sz = IsDoubleVFPType(dst_type) ? 0x1 : 0x0;
- op = IsSignedVFPType(src_type) ? 0x1 : 0x0;
- }
-
- return (cond | 0xE*B24 | B23 | D*B22 | 0x3*B20 | B19 | opc2*B16 |
- Vd*B12 | 0x5*B9 | sz*B8 | op*B7 | B6 | M*B5 | Vm);
- } else {
- // Conversion between IEEE double and single precision.
- // Instruction details available in ARM DDI 0406B, A8.6.298.
- // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0111(19-16) |
- // Vd(15-12) | 101(11-9) | sz(8) | 1(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
- int sz = IsDoubleVFPType(src_type) ? 0x1 : 0x0;
- return (cond | 0xE*B24 | B23 | D*B22 | 0x3*B20 | 0x7*B16 |
- Vd*B12 | 0x5*B9 | sz*B8 | B7 | B6 | M*B5 | Vm);
- }
-}
-
-
-void Assembler::vcvt_f64_s32(const DwVfpRegister dst,
- const SwVfpRegister src,
- VFPConversionMode mode,
- const Condition cond) {
- ASSERT(CpuFeatures::IsEnabled(VFP2));
- emit(EncodeVCVT(F64, dst.code(), S32, src.code(), mode, cond));
-}
-
-
-void Assembler::vcvt_f32_s32(const SwVfpRegister dst,
- const SwVfpRegister src,
- VFPConversionMode mode,
- const Condition cond) {
- ASSERT(CpuFeatures::IsEnabled(VFP2));
- emit(EncodeVCVT(F32, dst.code(), S32, src.code(), mode, cond));
-}
-
-
-void Assembler::vcvt_f64_u32(const DwVfpRegister dst,
- const SwVfpRegister src,
- VFPConversionMode mode,
- const Condition cond) {
- ASSERT(CpuFeatures::IsEnabled(VFP2));
- emit(EncodeVCVT(F64, dst.code(), U32, src.code(), mode, cond));
-}
-
-
-void Assembler::vcvt_s32_f64(const SwVfpRegister dst,
- const DwVfpRegister src,
- VFPConversionMode mode,
- const Condition cond) {
- ASSERT(CpuFeatures::IsEnabled(VFP2));
- emit(EncodeVCVT(S32, dst.code(), F64, src.code(), mode, cond));
-}
-
-
-void Assembler::vcvt_u32_f64(const SwVfpRegister dst,
- const DwVfpRegister src,
- VFPConversionMode mode,
- const Condition cond) {
- ASSERT(CpuFeatures::IsEnabled(VFP2));
- emit(EncodeVCVT(U32, dst.code(), F64, src.code(), mode, cond));
-}
-
-
-void Assembler::vcvt_f64_f32(const DwVfpRegister dst,
- const SwVfpRegister src,
- VFPConversionMode mode,
- const Condition cond) {
- ASSERT(CpuFeatures::IsEnabled(VFP2));
- emit(EncodeVCVT(F64, dst.code(), F32, src.code(), mode, cond));
-}
-
-
-void Assembler::vcvt_f32_f64(const SwVfpRegister dst,
- const DwVfpRegister src,
- VFPConversionMode mode,
- const Condition cond) {
- ASSERT(CpuFeatures::IsEnabled(VFP2));
- emit(EncodeVCVT(F32, dst.code(), F64, src.code(), mode, cond));
-}
-
-
-void Assembler::vneg(const DwVfpRegister dst,
- const DwVfpRegister src,
- const Condition cond) {
- // Instruction details available in ARM DDI 0406C.b, A8-968.
- // cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | 0001(19-16) | Vd(15-12) |
- // 101(11-9) | sz=1(8) | 0(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
- ASSERT(CpuFeatures::IsEnabled(VFP2));
- int vd, d;
- dst.split_code(&vd, &d);
- int vm, m;
- src.split_code(&vm, &m);
-
- emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | B16 | vd*B12 | 0x5*B9 | B8 | B6 |
- m*B5 | vm);
-}
-
-
-void Assembler::vabs(const DwVfpRegister dst,
- const DwVfpRegister src,
- const Condition cond) {
- // Instruction details available in ARM DDI 0406C.b, A8-524.
- // cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | 0000(19-16) | Vd(15-12) |
- // 101(11-9) | sz=1(8) | 1(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
- ASSERT(CpuFeatures::IsEnabled(VFP2));
- int vd, d;
- dst.split_code(&vd, &d);
- int vm, m;
- src.split_code(&vm, &m);
- emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | vd*B12 | 0x5*B9 | B8 | B7 | B6 |
- m*B5 | vm);
-}
-
-
-void Assembler::vadd(const DwVfpRegister dst,
- const DwVfpRegister src1,
- const DwVfpRegister src2,
- const Condition cond) {
- // Dd = vadd(Dn, Dm) double precision floating point addition.
- // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
- // Instruction details available in ARM DDI 0406C.b, A8-830.
- // cond(31-28) | 11100(27-23)| D(22) | 11(21-20) | Vn(19-16) |
- // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | 0(6) | M(5) | 0(4) | Vm(3-0)
- ASSERT(CpuFeatures::IsEnabled(VFP2));
- int vd, d;
- dst.split_code(&vd, &d);
- int vn, n;
- src1.split_code(&vn, &n);
- int vm, m;
- src2.split_code(&vm, &m);
- emit(cond | 0x1C*B23 | d*B22 | 0x3*B20 | vn*B16 | vd*B12 | 0x5*B9 | B8 |
- n*B7 | m*B5 | vm);
-}
-
-
-void Assembler::vsub(const DwVfpRegister dst,
- const DwVfpRegister src1,
- const DwVfpRegister src2,
- const Condition cond) {
- // Dd = vsub(Dn, Dm) double precision floating point subtraction.
- // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
- // Instruction details available in ARM DDI 0406C.b, A8-1086.
- // cond(31-28) | 11100(27-23)| D(22) | 11(21-20) | Vn(19-16) |
- // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
- ASSERT(CpuFeatures::IsEnabled(VFP2));
- int vd, d;
- dst.split_code(&vd, &d);
- int vn, n;
- src1.split_code(&vn, &n);
- int vm, m;
- src2.split_code(&vm, &m);
- emit(cond | 0x1C*B23 | d*B22 | 0x3*B20 | vn*B16 | vd*B12 | 0x5*B9 | B8 |
- n*B7 | B6 | m*B5 | vm);
-}
-
-
-void Assembler::vmul(const DwVfpRegister dst,
- const DwVfpRegister src1,
- const DwVfpRegister src2,
- const Condition cond) {
- // Dd = vmul(Dn, Dm) double precision floating point multiplication.
- // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
- // Instruction details available in ARM DDI 0406C.b, A8-960.
- // cond(31-28) | 11100(27-23)| D(22) | 10(21-20) | Vn(19-16) |
- // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | 0(6) | M(5) | 0(4) | Vm(3-0)
- ASSERT(CpuFeatures::IsEnabled(VFP2));
- int vd, d;
- dst.split_code(&vd, &d);
- int vn, n;
- src1.split_code(&vn, &n);
- int vm, m;
- src2.split_code(&vm, &m);
- emit(cond | 0x1C*B23 | d*B22 | 0x2*B20 | vn*B16 | vd*B12 | 0x5*B9 | B8 |
- n*B7 | m*B5 | vm);
-}
-
-
-void Assembler::vmla(const DwVfpRegister dst,
- const DwVfpRegister src1,
- const DwVfpRegister src2,
- const Condition cond) {
- // Instruction details available in ARM DDI 0406C.b, A8-932.
- // cond(31-28) | 11100(27-23) | D(22) | 00(21-20) | Vn(19-16) |
- // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | op=0(6) | M(5) | 0(4) | Vm(3-0)
- int vd, d;
- dst.split_code(&vd, &d);
- int vn, n;
- src1.split_code(&vn, &n);
- int vm, m;
- src2.split_code(&vm, &m);
- emit(cond | 0x1C*B23 | d*B22 | vn*B16 | vd*B12 | 0x5*B9 | B8 | n*B7 | m*B5 |
- vm);
-}
-
-
-void Assembler::vmls(const DwVfpRegister dst,
- const DwVfpRegister src1,
- const DwVfpRegister src2,
- const Condition cond) {
- // Instruction details available in ARM DDI 0406C.b, A8-932.
- // cond(31-28) | 11100(27-23) | D(22) | 00(21-20) | Vn(19-16) |
- // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | op=1(6) | M(5) | 0(4) | Vm(3-0)
- int vd, d;
- dst.split_code(&vd, &d);
- int vn, n;
- src1.split_code(&vn, &n);
- int vm, m;
- src2.split_code(&vm, &m);
- emit(cond | 0x1C*B23 | d*B22 | vn*B16 | vd*B12 | 0x5*B9 | B8 | n*B7 | B6 |
- m*B5 | vm);
-}
-
-
-void Assembler::vdiv(const DwVfpRegister dst,
- const DwVfpRegister src1,
- const DwVfpRegister src2,
- const Condition cond) {
- // Dd = vdiv(Dn, Dm) double precision floating point division.
- // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
- // Instruction details available in ARM DDI 0406C.b, A8-882.
- // cond(31-28) | 11101(27-23)| D(22) | 00(21-20) | Vn(19-16) |
- // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | 0(6) | M(5) | 0(4) | Vm(3-0)
- ASSERT(CpuFeatures::IsEnabled(VFP2));
- int vd, d;
- dst.split_code(&vd, &d);
- int vn, n;
- src1.split_code(&vn, &n);
- int vm, m;
- src2.split_code(&vm, &m);
- emit(cond | 0x1D*B23 | d*B22 | vn*B16 | vd*B12 | 0x5*B9 | B8 | n*B7 | m*B5 |
- vm);
-}
-
-
-void Assembler::vcmp(const DwVfpRegister src1,
- const DwVfpRegister src2,
- const Condition cond) {
- // vcmp(Dd, Dm) double precision floating point comparison.
- // Instruction details available in ARM DDI 0406C.b, A8-864.
- // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0100(19-16) |
- // Vd(15-12) | 101(11-9) | sz=1(8) | E=0(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
- ASSERT(CpuFeatures::IsEnabled(VFP2));
- int vd, d;
- src1.split_code(&vd, &d);
- int vm, m;
- src2.split_code(&vm, &m);
- emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | 0x4*B16 | vd*B12 | 0x5*B9 | B8 | B6 |
- m*B5 | vm);
-}
-
-
-void Assembler::vcmp(const DwVfpRegister src1,
- const double src2,
- const Condition cond) {
- // vcmp(Dd, #0.0) double precision floating point comparison.
- // Instruction details available in ARM DDI 0406C.b, A8-864.
- // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0101(19-16) |
- // Vd(15-12) | 101(11-9) | sz=1(8) | E=0(7) | 1(6) | 0(5) | 0(4) | 0000(3-0)
- ASSERT(CpuFeatures::IsEnabled(VFP2));
- ASSERT(src2 == 0.0);
- int vd, d;
- src1.split_code(&vd, &d);
- emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | 0x5*B16 | vd*B12 | 0x5*B9 | B8 | B6);
-}
-
-
-void Assembler::vmsr(Register dst, Condition cond) {
- // Instruction details available in ARM DDI 0406A, A8-652.
- // cond(31-28) | 1110 (27-24) | 1110(23-20)| 0001 (19-16) |
- // Rt(15-12) | 1010 (11-8) | 0(7) | 00 (6-5) | 1(4) | 0000(3-0)
- ASSERT(CpuFeatures::IsEnabled(VFP2));
- emit(cond | 0xE*B24 | 0xE*B20 | B16 |
- dst.code()*B12 | 0xA*B8 | B4);
-}
-
-
-void Assembler::vmrs(Register dst, Condition cond) {
- // Instruction details available in ARM DDI 0406A, A8-652.
- // cond(31-28) | 1110 (27-24) | 1111(23-20)| 0001 (19-16) |
- // Rt(15-12) | 1010 (11-8) | 0(7) | 00 (6-5) | 1(4) | 0000(3-0)
- ASSERT(CpuFeatures::IsEnabled(VFP2));
- emit(cond | 0xE*B24 | 0xF*B20 | B16 |
- dst.code()*B12 | 0xA*B8 | B4);
-}
-
-
-void Assembler::vsqrt(const DwVfpRegister dst,
- const DwVfpRegister src,
- const Condition cond) {
- // Instruction details available in ARM DDI 0406C.b, A8-1058.
- // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0001(19-16) |
- // Vd(15-12) | 101(11-9) | sz=1(8) | 11(7-6) | M(5) | 0(4) | Vm(3-0)
- ASSERT(CpuFeatures::IsEnabled(VFP2));
- int vd, d;
- dst.split_code(&vd, &d);
- int vm, m;
- src.split_code(&vm, &m);
- emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | B16 | vd*B12 | 0x5*B9 | B8 | 0x3*B6 |
- m*B5 | vm);
-}
-
-
-// Pseudo instructions.
-void Assembler::nop(int type) {
- // ARMv6{K/T2} and v7 have an actual NOP instruction but it serializes
- // some of the CPU's pipeline and has to issue. Older ARM chips simply used
- // MOV Rx, Rx as NOP and it performs better even in newer CPUs.
- // We therefore use MOV Rx, Rx, even on newer CPUs, and use Rx to encode
- // a type.
- ASSERT(0 <= type && type <= 14); // mov pc, pc isn't a nop.
- emit(al | 13*B21 | type*B12 | type);
-}
-
-
-bool Assembler::IsMovT(Instr instr) {
- instr &= ~(((kNumberOfConditions - 1) << 28) | // Mask off conditions
- ((kNumRegisters-1)*B12) | // mask out register
- EncodeMovwImmediate(0xFFFF)); // mask out immediate value
- return instr == 0x34*B20;
-}
-
-
-bool Assembler::IsMovW(Instr instr) {
- instr &= ~(((kNumberOfConditions - 1) << 28) | // Mask off conditions
- ((kNumRegisters-1)*B12) | // mask out destination
- EncodeMovwImmediate(0xFFFF)); // mask out immediate value
- return instr == 0x30*B20;
-}
-
-
-bool Assembler::IsNop(Instr instr, int type) {
- ASSERT(0 <= type && type <= 14); // mov pc, pc isn't a nop.
- // Check for mov rx, rx where x = type.
- return instr == (al | 13*B21 | type*B12 | type);
-}
-
-
-bool Assembler::ImmediateFitsAddrMode1Instruction(int32_t imm32) {
- uint32_t dummy1;
- uint32_t dummy2;
- return fits_shifter(imm32, &dummy1, &dummy2, NULL);
-}
-
-
-// Debugging.
-void Assembler::RecordJSReturn() {
- positions_recorder()->WriteRecordedPositions();
- CheckBuffer();
- RecordRelocInfo(RelocInfo::JS_RETURN);
-}
-
-
-void Assembler::RecordDebugBreakSlot() {
- positions_recorder()->WriteRecordedPositions();
- CheckBuffer();
- RecordRelocInfo(RelocInfo::DEBUG_BREAK_SLOT);
-}
-
-
-void Assembler::RecordComment(const char* msg) {
- if (FLAG_code_comments) {
- CheckBuffer();
- RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
- }
-}
-
-
-void Assembler::RecordConstPool(int size) {
- // We only need this for debugger support, to correctly compute offsets in the
- // code.
-#ifdef ENABLE_DEBUGGER_SUPPORT
- RecordRelocInfo(RelocInfo::CONST_POOL, static_cast<intptr_t>(size));
-#endif
-}
-
-void Assembler::GrowBuffer() {
- if (!own_buffer_) FATAL("external code buffer is too small");
-
- // Compute new buffer size.
- CodeDesc desc; // the new buffer
- if (buffer_size_ < 4*KB) {
- desc.buffer_size = 4*KB;
- } else if (buffer_size_ < 1*MB) {
- desc.buffer_size = 2*buffer_size_;
- } else {
- desc.buffer_size = buffer_size_ + 1*MB;
- }
- CHECK_GT(desc.buffer_size, 0); // no overflow
-
- // Set up new buffer.
- desc.buffer = NewArray<byte>(desc.buffer_size);
-
- desc.instr_size = pc_offset();
- desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
-
- // Copy the data.
- int pc_delta = desc.buffer - buffer_;
- int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
- memmove(desc.buffer, buffer_, desc.instr_size);
- memmove(reloc_info_writer.pos() + rc_delta,
- reloc_info_writer.pos(), desc.reloc_size);
-
- // Switch buffers.
- DeleteArray(buffer_);
- buffer_ = desc.buffer;
- buffer_size_ = desc.buffer_size;
- pc_ += pc_delta;
- reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
- reloc_info_writer.last_pc() + pc_delta);
-
- // None of our relocation types are pc relative pointing outside the code
- // buffer nor pc absolute pointing inside the code buffer, so there is no need
- // to relocate any emitted relocation entries.
-
- // Relocate pending relocation entries.
- for (int i = 0; i < num_pending_reloc_info_; i++) {
- RelocInfo& rinfo = pending_reloc_info_[i];
- ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
- rinfo.rmode() != RelocInfo::POSITION);
- if (rinfo.rmode() != RelocInfo::JS_RETURN) {
- rinfo.set_pc(rinfo.pc() + pc_delta);
- }
- }
-}
-
-
-void Assembler::db(uint8_t data) {
- // No relocation info should be pending while using db. db is used
- // to write pure data with no pointers and the constant pool should
- // be emitted before using db.
- ASSERT(num_pending_reloc_info_ == 0);
- ASSERT(num_pending_64_bit_reloc_info_ == 0);
- CheckBuffer();
- *reinterpret_cast<uint8_t*>(pc_) = data;
- pc_ += sizeof(uint8_t);
-}
-
-
-void Assembler::dd(uint32_t data) {
- // No relocation info should be pending while using dd. dd is used
- // to write pure data with no pointers and the constant pool should
- // be emitted before using dd.
- ASSERT(num_pending_reloc_info_ == 0);
- ASSERT(num_pending_64_bit_reloc_info_ == 0);
- CheckBuffer();
- *reinterpret_cast<uint32_t*>(pc_) = data;
- pc_ += sizeof(uint32_t);
-}
-
-
-void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data,
- UseConstantPoolMode mode) {
- // We do not try to reuse pool constants.
- RelocInfo rinfo(pc_, rmode, data, NULL);
- if (((rmode >= RelocInfo::JS_RETURN) &&
- (rmode <= RelocInfo::DEBUG_BREAK_SLOT)) ||
- (rmode == RelocInfo::CONST_POOL) ||
- mode == DONT_USE_CONSTANT_POOL) {
- // Adjust code for new modes.
- ASSERT(RelocInfo::IsDebugBreakSlot(rmode)
- || RelocInfo::IsJSReturn(rmode)
- || RelocInfo::IsComment(rmode)
- || RelocInfo::IsPosition(rmode)
- || RelocInfo::IsConstPool(rmode)
- || mode == DONT_USE_CONSTANT_POOL);
- // These modes do not need an entry in the constant pool.
- } else {
- RecordRelocInfoConstantPoolEntryHelper(rinfo);
- }
- if (!RelocInfo::IsNone(rinfo.rmode())) {
- // Don't record external references unless the heap will be serialized.
- if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
-#ifdef DEBUG
- if (!Serializer::enabled()) {
- Serializer::TooLateToEnableNow();
- }
-#endif
- if (!Serializer::enabled() && !emit_debug_code()) {
- return;
- }
- }
- ASSERT(buffer_space() >= kMaxRelocSize); // too late to grow buffer here
- if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
- RelocInfo reloc_info_with_ast_id(pc_,
- rmode,
- RecordedAstId().ToInt(),
- NULL);
- ClearRecordedAstId();
- reloc_info_writer.Write(&reloc_info_with_ast_id);
- } else {
- reloc_info_writer.Write(&rinfo);
- }
- }
-}
-
-void Assembler::RecordRelocInfo(double data) {
- // We do not try to reuse pool constants.
- RelocInfo rinfo(pc_, data);
- RecordRelocInfoConstantPoolEntryHelper(rinfo);
-}
-
-
-void Assembler::RecordRelocInfoConstantPoolEntryHelper(const RelocInfo& rinfo) {
- ASSERT(num_pending_reloc_info_ < kMaxNumPendingRelocInfo);
- if (num_pending_reloc_info_ == 0) {
- first_const_pool_use_ = pc_offset();
- }
- pending_reloc_info_[num_pending_reloc_info_++] = rinfo;
- if (rinfo.rmode() == RelocInfo::NONE64) {
- ++num_pending_64_bit_reloc_info_;
- }
- ASSERT(num_pending_64_bit_reloc_info_ <= num_pending_reloc_info_);
- // Make sure the constant pool is not emitted in place of the next
- // instruction for which we just recorded relocation info.
- BlockConstPoolFor(1);
-}
-
-
-void Assembler::BlockConstPoolFor(int instructions) {
- int pc_limit = pc_offset() + instructions * kInstrSize;
- if (no_const_pool_before_ < pc_limit) {
- // If there are some pending entries, the constant pool cannot be blocked
- // further than constant pool instruction's reach.
- ASSERT((num_pending_reloc_info_ == 0) ||
- (pc_limit - first_const_pool_use_ < kMaxDistToIntPool));
- // TODO(jfb) Also check 64-bit entries are in range (requires splitting
- // them up from 32-bit entries).
- no_const_pool_before_ = pc_limit;
- }
-
- if (next_buffer_check_ < no_const_pool_before_) {
- next_buffer_check_ = no_const_pool_before_;
- }
-}
-
-
-void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
- // Some short sequence of instruction mustn't be broken up by constant pool
- // emission, such sequences are protected by calls to BlockConstPoolFor and
- // BlockConstPoolScope.
- if (is_const_pool_blocked()) {
- // Something is wrong if emission is forced and blocked at the same time.
- ASSERT(!force_emit);
- return;
- }
-
- // There is nothing to do if there are no pending constant pool entries.
- if (num_pending_reloc_info_ == 0) {
- ASSERT(num_pending_64_bit_reloc_info_ == 0);
- // Calculate the offset of the next check.
- next_buffer_check_ = pc_offset() + kCheckPoolInterval;
- return;
- }
-
- // Check that the code buffer is large enough before emitting the constant
- // pool (include the jump over the pool and the constant pool marker and
- // the gap to the relocation information).
- // Note 64-bit values are wider, and the first one needs to be 64-bit aligned.
- int jump_instr = require_jump ? kInstrSize : 0;
- int size_up_to_marker = jump_instr + kInstrSize;
- int size_after_marker = num_pending_reloc_info_ * kPointerSize;
- bool has_fp_values = (num_pending_64_bit_reloc_info_ > 0);
- // 64-bit values must be 64-bit aligned.
- // We'll start emitting at PC: branch+marker, then 32-bit values, then
- // 64-bit values which might need to be aligned.
- bool require_64_bit_align = has_fp_values &&
- (((uintptr_t)pc_ + size_up_to_marker + size_after_marker) & 0x3);
- if (require_64_bit_align) {
- size_after_marker += kInstrSize;
- }
- // num_pending_reloc_info_ also contains 64-bit entries, the above code
- // therefore already counted half of the size for 64-bit entries. Add the
- // remaining size.
- STATIC_ASSERT(kPointerSize == kDoubleSize / 2);
- size_after_marker += num_pending_64_bit_reloc_info_ * (kDoubleSize / 2);
-
- int size = size_up_to_marker + size_after_marker;
-
- // We emit a constant pool when:
- // * requested to do so by parameter force_emit (e.g. after each function).
- // * the distance from the first instruction accessing the constant pool to
- // any of the constant pool entries will exceed its limit the next
- // time the pool is checked. This is overly restrictive, but we don't emit
- // constant pool entries in-order so it's conservatively correct.
- // * the instruction doesn't require a jump after itself to jump over the
- // constant pool, and we're getting close to running out of range.
- if (!force_emit) {
- ASSERT((first_const_pool_use_ >= 0) && (num_pending_reloc_info_ > 0));
- int dist = pc_offset() + size - first_const_pool_use_;
- if (has_fp_values) {
- if ((dist < kMaxDistToFPPool - kCheckPoolInterval) &&
- (require_jump || (dist < kMaxDistToFPPool / 2))) {
- return;
- }
- } else {
- if ((dist < kMaxDistToIntPool - kCheckPoolInterval) &&
- (require_jump || (dist < kMaxDistToIntPool / 2))) {
- return;
- }
- }
- }
-
- int needed_space = size + kGap;
- while (buffer_space() <= needed_space) GrowBuffer();
-
- {
- // Block recursive calls to CheckConstPool.
- BlockConstPoolScope block_const_pool(this);
- RecordComment("[ Constant Pool");
- RecordConstPool(size);
-
- // Emit jump over constant pool if necessary.
- Label after_pool;
- if (require_jump) {
- b(&after_pool);
- }
-
- // Put down constant pool marker "Undefined instruction".
- // The data size helps disassembly know what to print.
- emit(kConstantPoolMarker | EncodeConstantPoolLength(size_after_marker));
-
- if (require_64_bit_align) {
- emit(kConstantPoolMarker);
- }
-
- // Emit 64-bit constant pool entries first: their range is smaller than
- // 32-bit entries.
- for (int i = 0; i < num_pending_reloc_info_; i++) {
- RelocInfo& rinfo = pending_reloc_info_[i];
-
- if (rinfo.rmode() != RelocInfo::NONE64) {
- // 32-bit values emitted later.
- continue;
- }
-
- ASSERT(!((uintptr_t)pc_ & 0x3)); // Check 64-bit alignment.
-
- Instr instr = instr_at(rinfo.pc());
- // Instruction to patch must be 'vldr rd, [pc, #offset]' with offset == 0.
- ASSERT((IsVldrDPcImmediateOffset(instr) &&
- GetVldrDRegisterImmediateOffset(instr) == 0));
-
- int delta = pc_ - rinfo.pc() - kPcLoadDelta;
- ASSERT(is_uint10(delta));
-
- instr_at_put(rinfo.pc(), SetVldrDRegisterImmediateOffset(instr, delta));
-
- const double double_data = rinfo.data64();
- uint64_t uint_data = 0;
- memcpy(&uint_data, &double_data, sizeof(double_data));
- emit(uint_data & 0xFFFFFFFF);
- emit(uint_data >> 32);
- }
-
- // Emit 32-bit constant pool entries.
- for (int i = 0; i < num_pending_reloc_info_; i++) {
- RelocInfo& rinfo = pending_reloc_info_[i];
- ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
- rinfo.rmode() != RelocInfo::POSITION &&
- rinfo.rmode() != RelocInfo::STATEMENT_POSITION &&
- rinfo.rmode() != RelocInfo::CONST_POOL);
-
- if (rinfo.rmode() == RelocInfo::NONE64) {
- // 64-bit values emitted earlier.
- continue;
- }
-
- Instr instr = instr_at(rinfo.pc());
-
- // 64-bit loads shouldn't get here.
- ASSERT(!IsVldrDPcImmediateOffset(instr));
-
- int delta = pc_ - rinfo.pc() - kPcLoadDelta;
- // 0 is the smallest delta:
- // ldr rd, [pc, #0]
- // constant pool marker
- // data
-
- if (IsLdrPcImmediateOffset(instr) &&
- GetLdrRegisterImmediateOffset(instr) == 0) {
- ASSERT(is_uint12(delta));
- instr_at_put(rinfo.pc(), SetLdrRegisterImmediateOffset(instr, delta));
- emit(rinfo.data());
- } else {
- ASSERT(IsMovW(instr));
- emit(rinfo.data());
- }
- }
-
- num_pending_reloc_info_ = 0;
- num_pending_64_bit_reloc_info_ = 0;
- first_const_pool_use_ = -1;
-
- RecordComment("]");
-
- if (after_pool.is_linked()) {
- bind(&after_pool);
- }
- }
-
- // Since a constant pool was just emitted, move the check offset forward by
- // the standard interval.
- next_buffer_check_ = pc_offset() + kCheckPoolInterval;
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_ARM
diff --git a/src/3rdparty/v8/src/arm/assembler-arm.h b/src/3rdparty/v8/src/arm/assembler-arm.h
deleted file mode 100644
index 12cee54..0000000
--- a/src/3rdparty/v8/src/arm/assembler-arm.h
+++ /dev/null
@@ -1,1518 +0,0 @@
-// Copyright (c) 1994-2006 Sun Microsystems Inc.
-// All Rights Reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-//
-// - Redistributions of source code must retain the above copyright notice,
-// this list of conditions and the following disclaimer.
-//
-// - Redistribution in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the
-// distribution.
-//
-// - Neither the name of Sun Microsystems or the names of contributors may
-// be used to endorse or promote products derived from this software without
-// specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
-// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
-// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
-// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
-// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
-// OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// The original source code covered by the above license above has been
-// modified significantly by Google Inc.
-// Copyright 2012 the V8 project authors. All rights reserved.
-
-// A light-weight ARM Assembler
-// Generates user mode instructions for the ARM architecture up to version 5
-
-#ifndef V8_ARM_ASSEMBLER_ARM_H_
-#define V8_ARM_ASSEMBLER_ARM_H_
-#include <stdio.h>
-#include "assembler.h"
-#include "constants-arm.h"
-#include "serialize.h"
-
-namespace v8 {
-namespace internal {
-
-// CpuFeatures keeps track of which features are supported by the target CPU.
-// Supported features must be enabled by a Scope before use.
-class CpuFeatures : public AllStatic {
- public:
- // Detect features of the target CPU. Set safe defaults if the serializer
- // is enabled (snapshots must be portable).
- static void Probe();
-
- // Check whether a feature is supported by the target CPU.
- static bool IsSupported(CpuFeature f) {
- ASSERT(initialized_);
- if (f == VFP3 && !FLAG_enable_vfp3) return false;
- if (f == VFP2 && !FLAG_enable_vfp2) return false;
- if (f == SUDIV && !FLAG_enable_sudiv) return false;
- if (f == UNALIGNED_ACCESSES && !FLAG_enable_unaligned_accesses) {
- return false;
- }
- if (f == VFP32DREGS && !FLAG_enable_32dregs) return false;
- return (supported_ & (1u << f)) != 0;
- }
-
-#ifdef DEBUG
- // Check whether a feature is currently enabled.
- static bool IsEnabled(CpuFeature f) {
- ASSERT(initialized_);
- Isolate* isolate = Isolate::UncheckedCurrent();
- if (isolate == NULL) {
- // When no isolate is available, work as if we're running in
- // release mode.
- return IsSupported(f);
- }
- unsigned enabled = static_cast<unsigned>(isolate->enabled_cpu_features());
- return (enabled & (1u << f)) != 0;
- }
-#endif
-
- // Enable a specified feature within a scope.
- class Scope BASE_EMBEDDED {
-#ifdef DEBUG
-
- public:
- explicit Scope(CpuFeature f) {
- unsigned mask = 1u << f;
- // VFP2 and ARMv7 are implied by VFP3.
- if (f == VFP3) mask |= 1u << VFP2 | 1u << ARMv7;
- ASSERT(CpuFeatures::IsSupported(f));
- ASSERT(!Serializer::enabled() ||
- (CpuFeatures::found_by_runtime_probing_ & mask) == 0);
- isolate_ = Isolate::UncheckedCurrent();
- old_enabled_ = 0;
- if (isolate_ != NULL) {
- old_enabled_ = static_cast<unsigned>(isolate_->enabled_cpu_features());
- isolate_->set_enabled_cpu_features(old_enabled_ | mask);
- }
- }
- ~Scope() {
- ASSERT_EQ(Isolate::UncheckedCurrent(), isolate_);
- if (isolate_ != NULL) {
- isolate_->set_enabled_cpu_features(old_enabled_);
- }
- }
-
- private:
- Isolate* isolate_;
- unsigned old_enabled_;
-#else
-
- public:
- explicit Scope(CpuFeature f) {}
-#endif
- };
-
- class TryForceFeatureScope BASE_EMBEDDED {
- public:
- explicit TryForceFeatureScope(CpuFeature f)
- : old_supported_(CpuFeatures::supported_) {
- if (CanForce()) {
- CpuFeatures::supported_ |= (1u << f);
- }
- }
-
- ~TryForceFeatureScope() {
- if (CanForce()) {
- CpuFeatures::supported_ = old_supported_;
- }
- }
-
- private:
- static bool CanForce() {
- // It's only safe to temporarily force support of CPU features
- // when there's only a single isolate, which is guaranteed when
- // the serializer is enabled.
- return Serializer::enabled();
- }
-
- const unsigned old_supported_;
- };
-
- private:
-#ifdef DEBUG
- static bool initialized_;
-#endif
- static unsigned supported_;
- static unsigned found_by_runtime_probing_;
-
- friend class ExternalReference;
- DISALLOW_COPY_AND_ASSIGN(CpuFeatures);
-};
-
-
-// CPU Registers.
-//
-// 1) We would prefer to use an enum, but enum values are assignment-
-// compatible with int, which has caused code-generation bugs.
-//
-// 2) We would prefer to use a class instead of a struct but we don't like
-// the register initialization to depend on the particular initialization
-// order (which appears to be different on OS X, Linux, and Windows for the
-// installed versions of C++ we tried). Using a struct permits C-style
-// "initialization". Also, the Register objects cannot be const as this
-// forces initialization stubs in MSVC, making us dependent on initialization
-// order.
-//
-// 3) By not using an enum, we are possibly preventing the compiler from
-// doing certain constant folds, which may significantly reduce the
-// code generated for some assembly instructions (because they boil down
-// to a few constants). If this is a problem, we could change the code
-// such that we use an enum in optimized mode, and the struct in debug
-// mode. This way we get the compile-time error checking in debug mode
-// and best performance in optimized code.
-
-// Core register
-struct Register {
- static const int kNumRegisters = 16;
- static const int kMaxNumAllocatableRegisters = 8;
- static const int kSizeInBytes = 4;
- static const int kGPRsPerNonVFP2Double = 2;
-
- inline static int NumAllocatableRegisters();
-
- static int ToAllocationIndex(Register reg) {
- ASSERT(reg.code() < kMaxNumAllocatableRegisters);
- return reg.code();
- }
-
- static Register FromAllocationIndex(int index) {
- ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
- return from_code(index);
- }
-
- static const char* AllocationIndexToString(int index) {
- ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
- const char* const names[] = {
- "r0",
- "r1",
- "r2",
- "r3",
- "r4",
- "r5",
- "r6",
- "r7",
- };
- return names[index];
- }
-
- static Register from_code(int code) {
- Register r = { code };
- return r;
- }
-
- bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; }
- bool is(Register reg) const { return code_ == reg.code_; }
- int code() const {
- ASSERT(is_valid());
- return code_;
- }
- int bit() const {
- ASSERT(is_valid());
- return 1 << code_;
- }
-
- void set_code(int code) {
- code_ = code;
- ASSERT(is_valid());
- }
-
- // Unfortunately we can't make this private in a struct.
- int code_;
-};
-
-// These constants are used in several locations, including static initializers
-const int kRegister_no_reg_Code = -1;
-const int kRegister_r0_Code = 0;
-const int kRegister_r1_Code = 1;
-const int kRegister_r2_Code = 2;
-const int kRegister_r3_Code = 3;
-const int kRegister_r4_Code = 4;
-const int kRegister_r5_Code = 5;
-const int kRegister_r6_Code = 6;
-const int kRegister_r7_Code = 7;
-const int kRegister_r8_Code = 8;
-const int kRegister_r9_Code = 9;
-const int kRegister_r10_Code = 10;
-const int kRegister_fp_Code = 11;
-const int kRegister_ip_Code = 12;
-const int kRegister_sp_Code = 13;
-const int kRegister_lr_Code = 14;
-const int kRegister_pc_Code = 15;
-
-const Register no_reg = { kRegister_no_reg_Code };
-
-const Register r0 = { kRegister_r0_Code };
-const Register r1 = { kRegister_r1_Code };
-const Register r2 = { kRegister_r2_Code };
-const Register r3 = { kRegister_r3_Code };
-const Register r4 = { kRegister_r4_Code };
-const Register r5 = { kRegister_r5_Code };
-const Register r6 = { kRegister_r6_Code };
-const Register r7 = { kRegister_r7_Code };
-// Used as context register.
-const Register r8 = { kRegister_r8_Code };
-// Used as lithium codegen scratch register.
-const Register r9 = { kRegister_r9_Code };
-// Used as roots register.
-const Register r10 = { kRegister_r10_Code };
-const Register fp = { kRegister_fp_Code };
-const Register ip = { kRegister_ip_Code };
-const Register sp = { kRegister_sp_Code };
-const Register lr = { kRegister_lr_Code };
-const Register pc = { kRegister_pc_Code };
-
-// Single word VFP register.
-struct SwVfpRegister {
- bool is_valid() const { return 0 <= code_ && code_ < 32; }
- bool is(SwVfpRegister reg) const { return code_ == reg.code_; }
- int code() const {
- ASSERT(is_valid());
- return code_;
- }
- int bit() const {
- ASSERT(is_valid());
- return 1 << code_;
- }
- void split_code(int* vm, int* m) const {
- ASSERT(is_valid());
- *m = code_ & 0x1;
- *vm = code_ >> 1;
- }
-
- int code_;
-};
-
-
-// Double word VFP register.
-struct DwVfpRegister {
- static const int kMaxNumRegisters = 32;
- // A few double registers are reserved: one as a scratch register and one to
- // hold 0.0, that does not fit in the immediate field of vmov instructions.
- // d14: 0.0
- // d15: scratch register.
- static const int kNumReservedRegisters = 2;
- static const int kMaxNumAllocatableRegisters = kMaxNumRegisters -
- kNumReservedRegisters;
-
- // Note: the number of registers can be different at snapshot and run-time.
- // Any code included in the snapshot must be able to run both with 16 or 32
- // registers.
- inline static int NumRegisters();
- inline static int NumAllocatableRegisters();
-
- inline static int ToAllocationIndex(DwVfpRegister reg);
- static const char* AllocationIndexToString(int index);
- inline static DwVfpRegister FromAllocationIndex(int index);
-
- static DwVfpRegister from_code(int code) {
- DwVfpRegister r = { code };
- return r;
- }
-
- bool is_valid() const {
- return 0 <= code_ && code_ < kMaxNumRegisters;
- }
- bool is(DwVfpRegister reg) const { return code_ == reg.code_; }
- SwVfpRegister low() const {
- ASSERT(code_ < 16);
- SwVfpRegister reg;
- reg.code_ = code_ * 2;
-
- ASSERT(reg.is_valid());
- return reg;
- }
- SwVfpRegister high() const {
- ASSERT(code_ < 16);
- SwVfpRegister reg;
- reg.code_ = (code_ * 2) + 1;
-
- ASSERT(reg.is_valid());
- return reg;
- }
- int code() const {
- ASSERT(is_valid());
- return code_;
- }
- int bit() const {
- ASSERT(is_valid());
- return 1 << code_;
- }
- void split_code(int* vm, int* m) const {
- ASSERT(is_valid());
- *m = (code_ & 0x10) >> 4;
- *vm = code_ & 0x0F;
- }
-
- int code_;
-};
-
-
-typedef DwVfpRegister DoubleRegister;
-
-
-// Support for the VFP registers s0 to s31 (d0 to d15).
-// Note that "s(N):s(N+1)" is the same as "d(N/2)".
-const SwVfpRegister s0 = { 0 };
-const SwVfpRegister s1 = { 1 };
-const SwVfpRegister s2 = { 2 };
-const SwVfpRegister s3 = { 3 };
-const SwVfpRegister s4 = { 4 };
-const SwVfpRegister s5 = { 5 };
-const SwVfpRegister s6 = { 6 };
-const SwVfpRegister s7 = { 7 };
-const SwVfpRegister s8 = { 8 };
-const SwVfpRegister s9 = { 9 };
-const SwVfpRegister s10 = { 10 };
-const SwVfpRegister s11 = { 11 };
-const SwVfpRegister s12 = { 12 };
-const SwVfpRegister s13 = { 13 };
-const SwVfpRegister s14 = { 14 };
-const SwVfpRegister s15 = { 15 };
-const SwVfpRegister s16 = { 16 };
-const SwVfpRegister s17 = { 17 };
-const SwVfpRegister s18 = { 18 };
-const SwVfpRegister s19 = { 19 };
-const SwVfpRegister s20 = { 20 };
-const SwVfpRegister s21 = { 21 };
-const SwVfpRegister s22 = { 22 };
-const SwVfpRegister s23 = { 23 };
-const SwVfpRegister s24 = { 24 };
-const SwVfpRegister s25 = { 25 };
-const SwVfpRegister s26 = { 26 };
-const SwVfpRegister s27 = { 27 };
-const SwVfpRegister s28 = { 28 };
-const SwVfpRegister s29 = { 29 };
-const SwVfpRegister s30 = { 30 };
-const SwVfpRegister s31 = { 31 };
-
-const DwVfpRegister no_dreg = { -1 };
-const DwVfpRegister d0 = { 0 };
-const DwVfpRegister d1 = { 1 };
-const DwVfpRegister d2 = { 2 };
-const DwVfpRegister d3 = { 3 };
-const DwVfpRegister d4 = { 4 };
-const DwVfpRegister d5 = { 5 };
-const DwVfpRegister d6 = { 6 };
-const DwVfpRegister d7 = { 7 };
-const DwVfpRegister d8 = { 8 };
-const DwVfpRegister d9 = { 9 };
-const DwVfpRegister d10 = { 10 };
-const DwVfpRegister d11 = { 11 };
-const DwVfpRegister d12 = { 12 };
-const DwVfpRegister d13 = { 13 };
-const DwVfpRegister d14 = { 14 };
-const DwVfpRegister d15 = { 15 };
-const DwVfpRegister d16 = { 16 };
-const DwVfpRegister d17 = { 17 };
-const DwVfpRegister d18 = { 18 };
-const DwVfpRegister d19 = { 19 };
-const DwVfpRegister d20 = { 20 };
-const DwVfpRegister d21 = { 21 };
-const DwVfpRegister d22 = { 22 };
-const DwVfpRegister d23 = { 23 };
-const DwVfpRegister d24 = { 24 };
-const DwVfpRegister d25 = { 25 };
-const DwVfpRegister d26 = { 26 };
-const DwVfpRegister d27 = { 27 };
-const DwVfpRegister d28 = { 28 };
-const DwVfpRegister d29 = { 29 };
-const DwVfpRegister d30 = { 30 };
-const DwVfpRegister d31 = { 31 };
-
-const Register sfpd_lo = { kRegister_r6_Code };
-const Register sfpd_hi = { kRegister_r7_Code };
-
-// Aliases for double registers. Defined using #define instead of
-// "static const DwVfpRegister&" because Clang complains otherwise when a
-// compilation unit that includes this header doesn't use the variables.
-#define kFirstCalleeSavedDoubleReg d8
-#define kLastCalleeSavedDoubleReg d15
-#define kDoubleRegZero d14
-#define kScratchDoubleReg d15
-
-
-// Coprocessor register
-struct CRegister {
- bool is_valid() const { return 0 <= code_ && code_ < 16; }
- bool is(CRegister creg) const { return code_ == creg.code_; }
- int code() const {
- ASSERT(is_valid());
- return code_;
- }
- int bit() const {
- ASSERT(is_valid());
- return 1 << code_;
- }
-
- // Unfortunately we can't make this private in a struct.
- int code_;
-};
-
-
-const CRegister no_creg = { -1 };
-
-const CRegister cr0 = { 0 };
-const CRegister cr1 = { 1 };
-const CRegister cr2 = { 2 };
-const CRegister cr3 = { 3 };
-const CRegister cr4 = { 4 };
-const CRegister cr5 = { 5 };
-const CRegister cr6 = { 6 };
-const CRegister cr7 = { 7 };
-const CRegister cr8 = { 8 };
-const CRegister cr9 = { 9 };
-const CRegister cr10 = { 10 };
-const CRegister cr11 = { 11 };
-const CRegister cr12 = { 12 };
-const CRegister cr13 = { 13 };
-const CRegister cr14 = { 14 };
-const CRegister cr15 = { 15 };
-
-
-// Coprocessor number
-enum Coprocessor {
- p0 = 0,
- p1 = 1,
- p2 = 2,
- p3 = 3,
- p4 = 4,
- p5 = 5,
- p6 = 6,
- p7 = 7,
- p8 = 8,
- p9 = 9,
- p10 = 10,
- p11 = 11,
- p12 = 12,
- p13 = 13,
- p14 = 14,
- p15 = 15
-};
-
-
-// -----------------------------------------------------------------------------
-// Machine instruction Operands
-
-// Class Operand represents a shifter operand in data processing instructions
-class Operand BASE_EMBEDDED {
- public:
- // immediate
- INLINE(explicit Operand(int32_t immediate,
- RelocInfo::Mode rmode = RelocInfo::NONE32));
- INLINE(static Operand Zero()) {
- return Operand(static_cast<int32_t>(0));
- }
- INLINE(explicit Operand(const ExternalReference& f));
- explicit Operand(Handle<Object> handle);
- INLINE(explicit Operand(Smi* value));
-
- // rm
- INLINE(explicit Operand(Register rm));
-
- // rm <shift_op> shift_imm
- explicit Operand(Register rm, ShiftOp shift_op, int shift_imm);
-
- // rm <shift_op> rs
- explicit Operand(Register rm, ShiftOp shift_op, Register rs);
-
- // Return true if this is a register operand.
- INLINE(bool is_reg() const);
-
- // Return true if this operand fits in one instruction so that no
- // 2-instruction solution with a load into the ip register is necessary. If
- // the instruction this operand is used for is a MOV or MVN instruction the
- // actual instruction to use is required for this calculation. For other
- // instructions instr is ignored.
- bool is_single_instruction(const Assembler* assembler, Instr instr = 0) const;
- bool must_output_reloc_info(const Assembler* assembler) const;
-
- inline int32_t immediate() const {
- ASSERT(!rm_.is_valid());
- return imm32_;
- }
-
- Register rm() const { return rm_; }
- Register rs() const { return rs_; }
- ShiftOp shift_op() const { return shift_op_; }
-
- private:
- Register rm_;
- Register rs_;
- ShiftOp shift_op_;
- int shift_imm_; // valid if rm_ != no_reg && rs_ == no_reg
- int32_t imm32_; // valid if rm_ == no_reg
- RelocInfo::Mode rmode_;
-
- friend class Assembler;
-};
-
-
-// Class MemOperand represents a memory operand in load and store instructions
-class MemOperand BASE_EMBEDDED {
- public:
- // [rn +/- offset] Offset/NegOffset
- // [rn +/- offset]! PreIndex/NegPreIndex
- // [rn], +/- offset PostIndex/NegPostIndex
- // offset is any signed 32-bit value; offset is first loaded to register ip if
- // it does not fit the addressing mode (12-bit unsigned and sign bit)
- explicit MemOperand(Register rn, int32_t offset = 0, AddrMode am = Offset);
-
- // [rn +/- rm] Offset/NegOffset
- // [rn +/- rm]! PreIndex/NegPreIndex
- // [rn], +/- rm PostIndex/NegPostIndex
- explicit MemOperand(Register rn, Register rm, AddrMode am = Offset);
-
- // [rn +/- rm <shift_op> shift_imm] Offset/NegOffset
- // [rn +/- rm <shift_op> shift_imm]! PreIndex/NegPreIndex
- // [rn], +/- rm <shift_op> shift_imm PostIndex/NegPostIndex
- explicit MemOperand(Register rn, Register rm,
- ShiftOp shift_op, int shift_imm, AddrMode am = Offset);
-
- void set_offset(int32_t offset) {
- ASSERT(rm_.is(no_reg));
- offset_ = offset;
- }
-
- uint32_t offset() const {
- ASSERT(rm_.is(no_reg));
- return offset_;
- }
-
- Register rn() const { return rn_; }
- Register rm() const { return rm_; }
- AddrMode am() const { return am_; }
-
- bool OffsetIsUint12Encodable() const {
- return offset_ >= 0 ? is_uint12(offset_) : is_uint12(-offset_);
- }
-
- private:
- Register rn_; // base
- Register rm_; // register offset
- int32_t offset_; // valid if rm_ == no_reg
- ShiftOp shift_op_;
- int shift_imm_; // valid if rm_ != no_reg && rs_ == no_reg
- AddrMode am_; // bits P, U, and W
-
- friend class Assembler;
-};
-
-extern const Instr kMovLrPc;
-extern const Instr kLdrPCMask;
-extern const Instr kLdrPCPattern;
-extern const Instr kBlxRegMask;
-extern const Instr kBlxRegPattern;
-extern const Instr kBlxIp;
-
-extern const Instr kMovMvnMask;
-extern const Instr kMovMvnPattern;
-extern const Instr kMovMvnFlip;
-
-extern const Instr kMovLeaveCCMask;
-extern const Instr kMovLeaveCCPattern;
-extern const Instr kMovwMask;
-extern const Instr kMovwPattern;
-extern const Instr kMovwLeaveCCFlip;
-
-extern const Instr kCmpCmnMask;
-extern const Instr kCmpCmnPattern;
-extern const Instr kCmpCmnFlip;
-extern const Instr kAddSubFlip;
-extern const Instr kAndBicFlip;
-
-struct VmovIndex {
- unsigned char index;
-};
-const VmovIndex VmovIndexLo = { 0 };
-const VmovIndex VmovIndexHi = { 1 };
-
-class Assembler : public AssemblerBase {
- public:
- // Create an assembler. Instructions and relocation information are emitted
- // into a buffer, with the instructions starting from the beginning and the
- // relocation information starting from the end of the buffer. See CodeDesc
- // for a detailed comment on the layout (globals.h).
- //
- // If the provided buffer is NULL, the assembler allocates and grows its own
- // buffer, and buffer_size determines the initial buffer size. The buffer is
- // owned by the assembler and deallocated upon destruction of the assembler.
- //
- // If the provided buffer is not NULL, the assembler uses the provided buffer
- // for code generation and assumes its size to be buffer_size. If the buffer
- // is too small, a fatal error occurs. No deallocation of the buffer is done
- // upon destruction of the assembler.
- Assembler(Isolate* isolate, void* buffer, int buffer_size);
- virtual ~Assembler();
-
- // GetCode emits any pending (non-emitted) code and fills the descriptor
- // desc. GetCode() is idempotent; it returns the same result if no other
- // Assembler functions are invoked in between GetCode() calls.
- void GetCode(CodeDesc* desc);
-
- // Label operations & relative jumps (PPUM Appendix D)
- //
- // Takes a branch opcode (cc) and a label (L) and generates
- // either a backward branch or a forward branch and links it
- // to the label fixup chain. Usage:
- //
- // Label L; // unbound label
- // j(cc, &L); // forward branch to unbound label
- // bind(&L); // bind label to the current pc
- // j(cc, &L); // backward branch to bound label
- // bind(&L); // illegal: a label may be bound only once
- //
- // Note: The same Label can be used for forward and backward branches
- // but it may be bound only once.
-
- void bind(Label* L); // binds an unbound label L to the current code position
-
- // Returns the branch offset to the given label from the current code position
- // Links the label to the current position if it is still unbound
- // Manages the jump elimination optimization if the second parameter is true.
- int branch_offset(Label* L, bool jump_elimination_allowed);
-
- // Puts a labels target address at the given position.
- // The high 8 bits are set to zero.
- void label_at_put(Label* L, int at_offset);
-
- // Return the address in the constant pool of the code target address used by
- // the branch/call instruction at pc, or the object in a mov.
- INLINE(static Address target_pointer_address_at(Address pc));
-
- // Read/Modify the pointer in the branch/call/move instruction at pc.
- INLINE(static Address target_pointer_at(Address pc));
- INLINE(static void set_target_pointer_at(Address pc, Address target));
-
- // Read/Modify the code target address in the branch/call instruction at pc.
- INLINE(static Address target_address_at(Address pc));
- INLINE(static void set_target_address_at(Address pc, Address target));
-
- // Return the code target address at a call site from the return address
- // of that call in the instruction stream.
- INLINE(static Address target_address_from_return_address(Address pc));
-
- // Given the address of the beginning of a call, return the address
- // in the instruction stream that the call will return from.
- INLINE(static Address return_address_from_call_start(Address pc));
-
- // This sets the branch destination (which is in the constant pool on ARM).
- // This is for calls and branches within generated code.
- inline static void deserialization_set_special_target_at(
- Address constant_pool_entry, Address target);
-
- // This sets the branch destination (which is in the constant pool on ARM).
- // This is for calls and branches to runtime code.
- inline static void set_external_target_at(Address constant_pool_entry,
- Address target);
-
- // Here we are patching the address in the constant pool, not the actual call
- // instruction. The address in the constant pool is the same size as a
- // pointer.
- static const int kSpecialTargetSize = kPointerSize;
-
- // Size of an instruction.
- static const int kInstrSize = sizeof(Instr);
-
- // Distance between start of patched return sequence and the emitted address
- // to jump to.
-#ifdef USE_BLX
- // Patched return sequence is:
- // ldr ip, [pc, #0] @ emited address and start
- // blx ip
- static const int kPatchReturnSequenceAddressOffset = 0 * kInstrSize;
-#else
- // Patched return sequence is:
- // mov lr, pc @ start of sequence
- // ldr pc, [pc, #-4] @ emited address
- static const int kPatchReturnSequenceAddressOffset = kInstrSize;
-#endif
-
- // Distance between start of patched debug break slot and the emitted address
- // to jump to.
-#ifdef USE_BLX
- // Patched debug break slot code is:
- // ldr ip, [pc, #0] @ emited address and start
- // blx ip
- static const int kPatchDebugBreakSlotAddressOffset = 0 * kInstrSize;
-#else
- // Patched debug break slot code is:
- // mov lr, pc @ start of sequence
- // ldr pc, [pc, #-4] @ emited address
- static const int kPatchDebugBreakSlotAddressOffset = kInstrSize;
-#endif
-
-#ifdef USE_BLX
- static const int kPatchDebugBreakSlotReturnOffset = 2 * kInstrSize;
-#else
- static const int kPatchDebugBreakSlotReturnOffset = kInstrSize;
-#endif
-
- // Difference between address of current opcode and value read from pc
- // register.
- static const int kPcLoadDelta = 8;
-
- static const int kJSReturnSequenceInstructions = 4;
- static const int kDebugBreakSlotInstructions = 3;
- static const int kDebugBreakSlotLength =
- kDebugBreakSlotInstructions * kInstrSize;
-
- // ---------------------------------------------------------------------------
- // Code generation
-
- // Insert the smallest number of nop instructions
- // possible to align the pc offset to a multiple
- // of m. m must be a power of 2 (>= 4).
- void Align(int m);
- // Aligns code to something that's optimal for a jump target for the platform.
- void CodeTargetAlign();
-
- // Branch instructions
- void b(int branch_offset, Condition cond = al);
- void bl(int branch_offset, Condition cond = al);
- void blx(int branch_offset); // v5 and above
- void blx(Register target, Condition cond = al); // v5 and above
- void bx(Register target, Condition cond = al); // v5 and above, plus v4t
-
- // Convenience branch instructions using labels
- void b(Label* L, Condition cond = al) {
- b(branch_offset(L, cond == al), cond);
- }
- void b(Condition cond, Label* L) { b(branch_offset(L, cond == al), cond); }
- void bl(Label* L, Condition cond = al) { bl(branch_offset(L, false), cond); }
- void bl(Condition cond, Label* L) { bl(branch_offset(L, false), cond); }
- void blx(Label* L) { blx(branch_offset(L, false)); } // v5 and above
-
- // Data-processing instructions
-
- void and_(Register dst, Register src1, const Operand& src2,
- SBit s = LeaveCC, Condition cond = al);
-
- void eor(Register dst, Register src1, const Operand& src2,
- SBit s = LeaveCC, Condition cond = al);
-
- void sub(Register dst, Register src1, const Operand& src2,
- SBit s = LeaveCC, Condition cond = al);
- void sub(Register dst, Register src1, Register src2,
- SBit s = LeaveCC, Condition cond = al) {
- sub(dst, src1, Operand(src2), s, cond);
- }
-
- void rsb(Register dst, Register src1, const Operand& src2,
- SBit s = LeaveCC, Condition cond = al);
-
- void add(Register dst, Register src1, const Operand& src2,
- SBit s = LeaveCC, Condition cond = al);
- void add(Register dst, Register src1, Register src2,
- SBit s = LeaveCC, Condition cond = al) {
- add(dst, src1, Operand(src2), s, cond);
- }
-
- void adc(Register dst, Register src1, const Operand& src2,
- SBit s = LeaveCC, Condition cond = al);
-
- void sbc(Register dst, Register src1, const Operand& src2,
- SBit s = LeaveCC, Condition cond = al);
-
- void rsc(Register dst, Register src1, const Operand& src2,
- SBit s = LeaveCC, Condition cond = al);
-
- void tst(Register src1, const Operand& src2, Condition cond = al);
- void tst(Register src1, Register src2, Condition cond = al) {
- tst(src1, Operand(src2), cond);
- }
-
- void teq(Register src1, const Operand& src2, Condition cond = al);
-
- void cmp(Register src1, const Operand& src2, Condition cond = al);
- void cmp(Register src1, Register src2, Condition cond = al) {
- cmp(src1, Operand(src2), cond);
- }
- void cmp_raw_immediate(Register src1, int raw_immediate, Condition cond = al);
-
- void cmn(Register src1, const Operand& src2, Condition cond = al);
-
- void orr(Register dst, Register src1, const Operand& src2,
- SBit s = LeaveCC, Condition cond = al);
- void orr(Register dst, Register src1, Register src2,
- SBit s = LeaveCC, Condition cond = al) {
- orr(dst, src1, Operand(src2), s, cond);
- }
-
- void mov(Register dst, const Operand& src,
- SBit s = LeaveCC, Condition cond = al);
- void mov(Register dst, Register src, SBit s = LeaveCC, Condition cond = al) {
- mov(dst, Operand(src), s, cond);
- }
-
- // ARMv7 instructions for loading a 32 bit immediate in two instructions.
- // This may actually emit a different mov instruction, but on an ARMv7 it
- // is guaranteed to only emit one instruction.
- void movw(Register reg, uint32_t immediate, Condition cond = al);
- // The constant for movt should be in the range 0-0xffff.
- void movt(Register reg, uint32_t immediate, Condition cond = al);
-
- void bic(Register dst, Register src1, const Operand& src2,
- SBit s = LeaveCC, Condition cond = al);
-
- void mvn(Register dst, const Operand& src,
- SBit s = LeaveCC, Condition cond = al);
-
- // Multiply instructions
-
- void mla(Register dst, Register src1, Register src2, Register srcA,
- SBit s = LeaveCC, Condition cond = al);
-
- void mls(Register dst, Register src1, Register src2, Register srcA,
- Condition cond = al);
-
- void sdiv(Register dst, Register src1, Register src2,
- Condition cond = al);
-
- void mul(Register dst, Register src1, Register src2,
- SBit s = LeaveCC, Condition cond = al);
-
- void smlal(Register dstL, Register dstH, Register src1, Register src2,
- SBit s = LeaveCC, Condition cond = al);
-
- void smull(Register dstL, Register dstH, Register src1, Register src2,
- SBit s = LeaveCC, Condition cond = al);
-
- void umlal(Register dstL, Register dstH, Register src1, Register src2,
- SBit s = LeaveCC, Condition cond = al);
-
- void umull(Register dstL, Register dstH, Register src1, Register src2,
- SBit s = LeaveCC, Condition cond = al);
-
- // Miscellaneous arithmetic instructions
-
- void clz(Register dst, Register src, Condition cond = al); // v5 and above
-
- // Saturating instructions. v6 and above.
-
- // Unsigned saturate.
- //
- // Saturate an optionally shifted signed value to an unsigned range.
- //
- // usat dst, #satpos, src
- // usat dst, #satpos, src, lsl #sh
- // usat dst, #satpos, src, asr #sh
- //
- // Register dst will contain:
- //
- // 0, if s < 0
- // (1 << satpos) - 1, if s > ((1 << satpos) - 1)
- // s, otherwise
- //
- // where s is the contents of src after shifting (if used.)
- void usat(Register dst, int satpos, const Operand& src, Condition cond = al);
-
- // Bitfield manipulation instructions. v7 and above.
-
- void ubfx(Register dst, Register src, int lsb, int width,
- Condition cond = al);
-
- void sbfx(Register dst, Register src, int lsb, int width,
- Condition cond = al);
-
- void bfc(Register dst, int lsb, int width, Condition cond = al);
-
- void bfi(Register dst, Register src, int lsb, int width,
- Condition cond = al);
-
- // Status register access instructions
-
- void mrs(Register dst, SRegister s, Condition cond = al);
- void msr(SRegisterFieldMask fields, const Operand& src, Condition cond = al);
-
- // Load/Store instructions
- void ldr(Register dst, const MemOperand& src, Condition cond = al);
- void str(Register src, const MemOperand& dst, Condition cond = al);
- void ldrb(Register dst, const MemOperand& src, Condition cond = al);
- void strb(Register src, const MemOperand& dst, Condition cond = al);
- void ldrh(Register dst, const MemOperand& src, Condition cond = al);
- void strh(Register src, const MemOperand& dst, Condition cond = al);
- void ldrsb(Register dst, const MemOperand& src, Condition cond = al);
- void ldrsh(Register dst, const MemOperand& src, Condition cond = al);
- void ldrd(Register dst1,
- Register dst2,
- const MemOperand& src, Condition cond = al);
- void strd(Register src1,
- Register src2,
- const MemOperand& dst, Condition cond = al);
-
- // Load/Store multiple instructions
- void ldm(BlockAddrMode am, Register base, RegList dst, Condition cond = al);
- void stm(BlockAddrMode am, Register base, RegList src, Condition cond = al);
-
- // Exception-generating instructions and debugging support
- void stop(const char* msg,
- Condition cond = al,
- int32_t code = kDefaultStopCode);
-
- void bkpt(uint32_t imm16); // v5 and above
- void svc(uint32_t imm24, Condition cond = al);
-
- // Coprocessor instructions
-
- void cdp(Coprocessor coproc, int opcode_1,
- CRegister crd, CRegister crn, CRegister crm,
- int opcode_2, Condition cond = al);
-
- void cdp2(Coprocessor coproc, int opcode_1,
- CRegister crd, CRegister crn, CRegister crm,
- int opcode_2); // v5 and above
-
- void mcr(Coprocessor coproc, int opcode_1,
- Register rd, CRegister crn, CRegister crm,
- int opcode_2 = 0, Condition cond = al);
-
- void mcr2(Coprocessor coproc, int opcode_1,
- Register rd, CRegister crn, CRegister crm,
- int opcode_2 = 0); // v5 and above
-
- void mrc(Coprocessor coproc, int opcode_1,
- Register rd, CRegister crn, CRegister crm,
- int opcode_2 = 0, Condition cond = al);
-
- void mrc2(Coprocessor coproc, int opcode_1,
- Register rd, CRegister crn, CRegister crm,
- int opcode_2 = 0); // v5 and above
-
- void ldc(Coprocessor coproc, CRegister crd, const MemOperand& src,
- LFlag l = Short, Condition cond = al);
- void ldc(Coprocessor coproc, CRegister crd, Register base, int option,
- LFlag l = Short, Condition cond = al);
-
- void ldc2(Coprocessor coproc, CRegister crd, const MemOperand& src,
- LFlag l = Short); // v5 and above
- void ldc2(Coprocessor coproc, CRegister crd, Register base, int option,
- LFlag l = Short); // v5 and above
-
- // Support for VFP.
- // All these APIs support S0 to S31 and D0 to D31.
-
- void vldr(const DwVfpRegister dst,
- const Register base,
- int offset,
- const Condition cond = al);
- void vldr(const DwVfpRegister dst,
- const MemOperand& src,
- const Condition cond = al);
-
- void vldr(const SwVfpRegister dst,
- const Register base,
- int offset,
- const Condition cond = al);
- void vldr(const SwVfpRegister dst,
- const MemOperand& src,
- const Condition cond = al);
-
- void vstr(const DwVfpRegister src,
- const Register base,
- int offset,
- const Condition cond = al);
- void vstr(const DwVfpRegister src,
- const MemOperand& dst,
- const Condition cond = al);
-
- void vstr(const SwVfpRegister src,
- const Register base,
- int offset,
- const Condition cond = al);
- void vstr(const SwVfpRegister src,
- const MemOperand& dst,
- const Condition cond = al);
-
- void vldm(BlockAddrMode am,
- Register base,
- DwVfpRegister first,
- DwVfpRegister last,
- Condition cond = al);
-
- void vstm(BlockAddrMode am,
- Register base,
- DwVfpRegister first,
- DwVfpRegister last,
- Condition cond = al);
-
- void vldm(BlockAddrMode am,
- Register base,
- SwVfpRegister first,
- SwVfpRegister last,
- Condition cond = al);
-
- void vstm(BlockAddrMode am,
- Register base,
- SwVfpRegister first,
- SwVfpRegister last,
- Condition cond = al);
-
- void vmov(const DwVfpRegister dst,
- double imm,
- const Register scratch = no_reg);
- void vmov(const SwVfpRegister dst,
- const SwVfpRegister src,
- const Condition cond = al);
- void vmov(const DwVfpRegister dst,
- const DwVfpRegister src,
- const Condition cond = al);
- void vmov(const DwVfpRegister dst,
- const VmovIndex index,
- const Register src,
- const Condition cond = al);
- void vmov(const DwVfpRegister dst,
- const Register src1,
- const Register src2,
- const Condition cond = al);
- void vmov(const Register dst1,
- const Register dst2,
- const DwVfpRegister src,
- const Condition cond = al);
- void vmov(const SwVfpRegister dst,
- const Register src,
- const Condition cond = al);
- void vmov(const Register dst,
- const SwVfpRegister src,
- const Condition cond = al);
- void vcvt_f64_s32(const DwVfpRegister dst,
- const SwVfpRegister src,
- VFPConversionMode mode = kDefaultRoundToZero,
- const Condition cond = al);
- void vcvt_f32_s32(const SwVfpRegister dst,
- const SwVfpRegister src,
- VFPConversionMode mode = kDefaultRoundToZero,
- const Condition cond = al);
- void vcvt_f64_u32(const DwVfpRegister dst,
- const SwVfpRegister src,
- VFPConversionMode mode = kDefaultRoundToZero,
- const Condition cond = al);
- void vcvt_s32_f64(const SwVfpRegister dst,
- const DwVfpRegister src,
- VFPConversionMode mode = kDefaultRoundToZero,
- const Condition cond = al);
- void vcvt_u32_f64(const SwVfpRegister dst,
- const DwVfpRegister src,
- VFPConversionMode mode = kDefaultRoundToZero,
- const Condition cond = al);
- void vcvt_f64_f32(const DwVfpRegister dst,
- const SwVfpRegister src,
- VFPConversionMode mode = kDefaultRoundToZero,
- const Condition cond = al);
- void vcvt_f32_f64(const SwVfpRegister dst,
- const DwVfpRegister src,
- VFPConversionMode mode = kDefaultRoundToZero,
- const Condition cond = al);
-
- void vneg(const DwVfpRegister dst,
- const DwVfpRegister src,
- const Condition cond = al);
- void vabs(const DwVfpRegister dst,
- const DwVfpRegister src,
- const Condition cond = al);
- void vadd(const DwVfpRegister dst,
- const DwVfpRegister src1,
- const DwVfpRegister src2,
- const Condition cond = al);
- void vsub(const DwVfpRegister dst,
- const DwVfpRegister src1,
- const DwVfpRegister src2,
- const Condition cond = al);
- void vmul(const DwVfpRegister dst,
- const DwVfpRegister src1,
- const DwVfpRegister src2,
- const Condition cond = al);
- void vmla(const DwVfpRegister dst,
- const DwVfpRegister src1,
- const DwVfpRegister src2,
- const Condition cond = al);
- void vmls(const DwVfpRegister dst,
- const DwVfpRegister src1,
- const DwVfpRegister src2,
- const Condition cond = al);
- void vdiv(const DwVfpRegister dst,
- const DwVfpRegister src1,
- const DwVfpRegister src2,
- const Condition cond = al);
- void vcmp(const DwVfpRegister src1,
- const DwVfpRegister src2,
- const Condition cond = al);
- void vcmp(const DwVfpRegister src1,
- const double src2,
- const Condition cond = al);
- void vmrs(const Register dst,
- const Condition cond = al);
- void vmsr(const Register dst,
- const Condition cond = al);
- void vsqrt(const DwVfpRegister dst,
- const DwVfpRegister src,
- const Condition cond = al);
-
- // Pseudo instructions
-
- // Different nop operations are used by the code generator to detect certain
- // states of the generated code.
- enum NopMarkerTypes {
- NON_MARKING_NOP = 0,
- DEBUG_BREAK_NOP,
- // IC markers.
- PROPERTY_ACCESS_INLINED,
- PROPERTY_ACCESS_INLINED_CONTEXT,
- PROPERTY_ACCESS_INLINED_CONTEXT_DONT_DELETE,
- // Helper values.
- LAST_CODE_MARKER,
- FIRST_IC_MARKER = PROPERTY_ACCESS_INLINED
- };
-
- void nop(int type = 0); // 0 is the default non-marking type.
-
- void push(Register src, Condition cond = al) {
- str(src, MemOperand(sp, 4, NegPreIndex), cond);
- }
-
- void pop(Register dst, Condition cond = al) {
- ldr(dst, MemOperand(sp, 4, PostIndex), cond);
- }
-
- void pop() {
- add(sp, sp, Operand(kPointerSize));
- }
-
- // Jump unconditionally to given label.
- void jmp(Label* L) { b(L, al); }
-
- static bool use_immediate_embedded_pointer_loads(
- const Assembler* assembler) {
-#ifdef USE_BLX
- return CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS) &&
- (assembler == NULL || !assembler->predictable_code_size());
-#else
- // If not using BLX, all loads from the constant pool cannot be immediate,
- // because the ldr pc, [pc + #xxxx] used for calls must be a single
- // instruction and cannot be easily distinguished out of context from
- // other loads that could use movw/movt.
- return false;
-#endif
- }
-
- // Check the code size generated from label to here.
- int SizeOfCodeGeneratedSince(Label* label) {
- return pc_offset() - label->pos();
- }
-
- // Check the number of instructions generated from label to here.
- int InstructionsGeneratedSince(Label* label) {
- return SizeOfCodeGeneratedSince(label) / kInstrSize;
- }
-
- // Check whether an immediate fits an addressing mode 1 instruction.
- bool ImmediateFitsAddrMode1Instruction(int32_t imm32);
-
- // Class for scoping postponing the constant pool generation.
- class BlockConstPoolScope {
- public:
- explicit BlockConstPoolScope(Assembler* assem) : assem_(assem) {
- assem_->StartBlockConstPool();
- }
- ~BlockConstPoolScope() {
- assem_->EndBlockConstPool();
- }
-
- private:
- Assembler* assem_;
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(BlockConstPoolScope);
- };
-
- // Debugging
-
- // Mark address of the ExitJSFrame code.
- void RecordJSReturn();
-
- // Mark address of a debug break slot.
- void RecordDebugBreakSlot();
-
- // Record the AST id of the CallIC being compiled, so that it can be placed
- // in the relocation information.
- void SetRecordedAstId(TypeFeedbackId ast_id) {
- ASSERT(recorded_ast_id_.IsNone());
- recorded_ast_id_ = ast_id;
- }
-
- TypeFeedbackId RecordedAstId() {
- ASSERT(!recorded_ast_id_.IsNone());
- return recorded_ast_id_;
- }
-
- void ClearRecordedAstId() { recorded_ast_id_ = TypeFeedbackId::None(); }
-
- // Record a comment relocation entry that can be used by a disassembler.
- // Use --code-comments to enable.
- void RecordComment(const char* msg);
-
- // Record the emission of a constant pool.
- //
- // The emission of constant pool depends on the size of the code generated and
- // the number of RelocInfo recorded.
- // The Debug mechanism needs to map code offsets between two versions of a
- // function, compiled with and without debugger support (see for example
- // Debug::PrepareForBreakPoints()).
- // Compiling functions with debugger support generates additional code
- // (Debug::GenerateSlot()). This may affect the emission of the constant
- // pools and cause the version of the code with debugger support to have
- // constant pools generated in different places.
- // Recording the position and size of emitted constant pools allows to
- // correctly compute the offset mappings between the different versions of a
- // function in all situations.
- //
- // The parameter indicates the size of the constant pool (in bytes), including
- // the marker and branch over the data.
- void RecordConstPool(int size);
-
- // Writes a single byte or word of data in the code stream. Used
- // for inline tables, e.g., jump-tables. The constant pool should be
- // emitted before any use of db and dd to ensure that constant pools
- // are not emitted as part of the tables generated.
- void db(uint8_t data);
- void dd(uint32_t data);
-
- PositionsRecorder* positions_recorder() { return &positions_recorder_; }
-
- // Read/patch instructions
- Instr instr_at(int pos) { return *reinterpret_cast<Instr*>(buffer_ + pos); }
- void instr_at_put(int pos, Instr instr) {
- *reinterpret_cast<Instr*>(buffer_ + pos) = instr;
- }
- static Instr instr_at(byte* pc) { return *reinterpret_cast<Instr*>(pc); }
- static void instr_at_put(byte* pc, Instr instr) {
- *reinterpret_cast<Instr*>(pc) = instr;
- }
- static Condition GetCondition(Instr instr);
- static bool IsBranch(Instr instr);
- static int GetBranchOffset(Instr instr);
- static bool IsLdrRegisterImmediate(Instr instr);
- static bool IsVldrDRegisterImmediate(Instr instr);
- static int GetLdrRegisterImmediateOffset(Instr instr);
- static int GetVldrDRegisterImmediateOffset(Instr instr);
- static Instr SetLdrRegisterImmediateOffset(Instr instr, int offset);
- static Instr SetVldrDRegisterImmediateOffset(Instr instr, int offset);
- static bool IsStrRegisterImmediate(Instr instr);
- static Instr SetStrRegisterImmediateOffset(Instr instr, int offset);
- static bool IsAddRegisterImmediate(Instr instr);
- static Instr SetAddRegisterImmediateOffset(Instr instr, int offset);
- static Register GetRd(Instr instr);
- static Register GetRn(Instr instr);
- static Register GetRm(Instr instr);
- static bool IsPush(Instr instr);
- static bool IsPop(Instr instr);
- static bool IsStrRegFpOffset(Instr instr);
- static bool IsLdrRegFpOffset(Instr instr);
- static bool IsStrRegFpNegOffset(Instr instr);
- static bool IsLdrRegFpNegOffset(Instr instr);
- static bool IsLdrPcImmediateOffset(Instr instr);
- static bool IsVldrDPcImmediateOffset(Instr instr);
- static bool IsTstImmediate(Instr instr);
- static bool IsCmpRegister(Instr instr);
- static bool IsCmpImmediate(Instr instr);
- static Register GetCmpImmediateRegister(Instr instr);
- static int GetCmpImmediateRawImmediate(Instr instr);
- static bool IsNop(Instr instr, int type = NON_MARKING_NOP);
- static bool IsMovT(Instr instr);
- static bool IsMovW(Instr instr);
-
- // Constants in pools are accessed via pc relative addressing, which can
- // reach +/-4KB for integer PC-relative loads and +/-1KB for floating-point
- // PC-relative loads, thereby defining a maximum distance between the
- // instruction and the accessed constant.
- static const int kMaxDistToIntPool = 4*KB;
- static const int kMaxDistToFPPool = 1*KB;
- // All relocations could be integer, it therefore acts as the limit.
- static const int kMaxNumPendingRelocInfo = kMaxDistToIntPool/kInstrSize;
-
- // Postpone the generation of the constant pool for the specified number of
- // instructions.
- void BlockConstPoolFor(int instructions);
-
- // Check if is time to emit a constant pool.
- void CheckConstPool(bool force_emit, bool require_jump);
-
- protected:
- // Relocation for a type-recording IC has the AST id added to it. This
- // member variable is a way to pass the information from the call site to
- // the relocation info.
- TypeFeedbackId recorded_ast_id_;
-
- int buffer_space() const { return reloc_info_writer.pos() - pc_; }
-
- // Decode branch instruction at pos and return branch target pos
- int target_at(int pos);
-
- // Patch branch instruction at pos to branch to given branch target pos
- void target_at_put(int pos, int target_pos);
-
- // Prevent contant pool emission until EndBlockConstPool is called.
- // Call to this function can be nested but must be followed by an equal
- // number of call to EndBlockConstpool.
- void StartBlockConstPool() {
- if (const_pool_blocked_nesting_++ == 0) {
- // Prevent constant pool checks happening by setting the next check to
- // the biggest possible offset.
- next_buffer_check_ = kMaxInt;
- }
- }
-
- // Resume constant pool emission. Need to be called as many time as
- // StartBlockConstPool to have an effect.
- void EndBlockConstPool() {
- if (--const_pool_blocked_nesting_ == 0) {
- // Check the constant pool hasn't been blocked for too long.
- ASSERT((num_pending_reloc_info_ == 0) ||
- (pc_offset() < (first_const_pool_use_ + kMaxDistToIntPool)));
- ASSERT((num_pending_64_bit_reloc_info_ == 0) ||
- (pc_offset() < (first_const_pool_use_ + kMaxDistToFPPool)));
- // Two cases:
- // * no_const_pool_before_ >= next_buffer_check_ and the emission is
- // still blocked
- // * no_const_pool_before_ < next_buffer_check_ and the next emit will
- // trigger a check.
- next_buffer_check_ = no_const_pool_before_;
- }
- }
-
- bool is_const_pool_blocked() const {
- return (const_pool_blocked_nesting_ > 0) ||
- (pc_offset() < no_const_pool_before_);
- }
-
- private:
- int next_buffer_check_; // pc offset of next buffer check
-
- // Code generation
- // The relocation writer's position is at least kGap bytes below the end of
- // the generated instructions. This is so that multi-instruction sequences do
- // not have to check for overflow. The same is true for writes of large
- // relocation info entries.
- static const int kGap = 32;
-
- // Constant pool generation
- // Pools are emitted in the instruction stream, preferably after unconditional
- // jumps or after returns from functions (in dead code locations).
- // If a long code sequence does not contain unconditional jumps, it is
- // necessary to emit the constant pool before the pool gets too far from the
- // location it is accessed from. In this case, we emit a jump over the emitted
- // constant pool.
- // Constants in the pool may be addresses of functions that gets relocated;
- // if so, a relocation info entry is associated to the constant pool entry.
-
- // Repeated checking whether the constant pool should be emitted is rather
- // expensive. By default we only check again once a number of instructions
- // has been generated. That also means that the sizing of the buffers is not
- // an exact science, and that we rely on some slop to not overrun buffers.
- static const int kCheckPoolIntervalInst = 32;
- static const int kCheckPoolInterval = kCheckPoolIntervalInst * kInstrSize;
-
-
- // Emission of the constant pool may be blocked in some code sequences.
- int const_pool_blocked_nesting_; // Block emission if this is not zero.
- int no_const_pool_before_; // Block emission before this pc offset.
-
- // Keep track of the first instruction requiring a constant pool entry
- // since the previous constant pool was emitted.
- int first_const_pool_use_;
-
- // Relocation info generation
- // Each relocation is encoded as a variable size value
- static const int kMaxRelocSize = RelocInfoWriter::kMaxSize;
- RelocInfoWriter reloc_info_writer;
-
- // Relocation info records are also used during code generation as temporary
- // containers for constants and code target addresses until they are emitted
- // to the constant pool. These pending relocation info records are temporarily
- // stored in a separate buffer until a constant pool is emitted.
- // If every instruction in a long sequence is accessing the pool, we need one
- // pending relocation entry per instruction.
-
- // the buffer of pending relocation info
- RelocInfo pending_reloc_info_[kMaxNumPendingRelocInfo];
- // number of pending reloc info entries in the buffer
- int num_pending_reloc_info_;
- // Number of pending reloc info entries included above which also happen to
- // be 64-bit.
- int num_pending_64_bit_reloc_info_;
-
- // The bound position, before this we cannot do instruction elimination.
- int last_bound_pos_;
-
- // Code emission
- inline void CheckBuffer();
- void GrowBuffer();
- inline void emit(Instr x);
-
- // 32-bit immediate values
- void move_32_bit_immediate(Condition cond,
- Register rd,
- SBit s,
- const Operand& x);
-
- // Instruction generation
- void addrmod1(Instr instr, Register rn, Register rd, const Operand& x);
- void addrmod2(Instr instr, Register rd, const MemOperand& x);
- void addrmod3(Instr instr, Register rd, const MemOperand& x);
- void addrmod4(Instr instr, Register rn, RegList rl);
- void addrmod5(Instr instr, CRegister crd, const MemOperand& x);
-
- // Labels
- void print(Label* L);
- void bind_to(Label* L, int pos);
- void link_to(Label* L, Label* appendix);
- void next(Label* L);
-
- enum UseConstantPoolMode {
- USE_CONSTANT_POOL,
- DONT_USE_CONSTANT_POOL
- };
-
- // Record reloc info for current pc_
- void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0,
- UseConstantPoolMode mode = USE_CONSTANT_POOL);
- void RecordRelocInfo(double data);
- void RecordRelocInfoConstantPoolEntryHelper(const RelocInfo& rinfo);
-
- friend class RegExpMacroAssemblerARM;
- friend class RelocInfo;
- friend class CodePatcher;
- friend class BlockConstPoolScope;
-
- PositionsRecorder positions_recorder_;
- friend class PositionsRecorder;
- friend class EnsureSpace;
-};
-
-
-class EnsureSpace BASE_EMBEDDED {
- public:
- explicit EnsureSpace(Assembler* assembler) {
- assembler->CheckBuffer();
- }
-};
-
-
-} } // namespace v8::internal
-
-#endif // V8_ARM_ASSEMBLER_ARM_H_
diff --git a/src/3rdparty/v8/src/arm/builtins-arm.cc b/src/3rdparty/v8/src/arm/builtins-arm.cc
deleted file mode 100644
index 466c890..0000000
--- a/src/3rdparty/v8/src/arm/builtins-arm.cc
+++ /dev/null
@@ -1,1901 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_ARM)
-
-#include "codegen.h"
-#include "debug.h"
-#include "deoptimizer.h"
-#include "full-codegen.h"
-#include "runtime.h"
-
-namespace v8 {
-namespace internal {
-
-
-#define __ ACCESS_MASM(masm)
-
-
-void Builtins::Generate_Adaptor(MacroAssembler* masm,
- CFunctionId id,
- BuiltinExtraArguments extra_args) {
- // ----------- S t a t e -------------
- // -- r0 : number of arguments excluding receiver
- // -- r1 : called function (only guaranteed when
- // extra_args requires it)
- // -- cp : context
- // -- sp[0] : last argument
- // -- ...
- // -- sp[4 * (argc - 1)] : first argument (argc == r0)
- // -- sp[4 * argc] : receiver
- // -----------------------------------
-
- // Insert extra arguments.
- int num_extra_args = 0;
- if (extra_args == NEEDS_CALLED_FUNCTION) {
- num_extra_args = 1;
- __ push(r1);
- } else {
- ASSERT(extra_args == NO_EXTRA_ARGUMENTS);
- }
-
- // JumpToExternalReference expects r0 to contain the number of arguments
- // including the receiver and the extra arguments.
- __ add(r0, r0, Operand(num_extra_args + 1));
- __ JumpToExternalReference(ExternalReference(id, masm->isolate()));
-}
-
-
-// Load the built-in InternalArray function from the current context.
-static void GenerateLoadInternalArrayFunction(MacroAssembler* masm,
- Register result) {
- // Load the native context.
-
- __ ldr(result,
- MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ ldr(result,
- FieldMemOperand(result, GlobalObject::kNativeContextOffset));
- // Load the InternalArray function from the native context.
- __ ldr(result,
- MemOperand(result,
- Context::SlotOffset(
- Context::INTERNAL_ARRAY_FUNCTION_INDEX)));
-}
-
-
-// Load the built-in Array function from the current context.
-static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) {
- // Load the native context.
-
- __ ldr(result,
- MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ ldr(result,
- FieldMemOperand(result, GlobalObject::kNativeContextOffset));
- // Load the Array function from the native context.
- __ ldr(result,
- MemOperand(result,
- Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
-}
-
-
-// Allocate an empty JSArray. The allocated array is put into the result
-// register. An elements backing store is allocated with size initial_capacity
-// and filled with the hole values.
-static void AllocateEmptyJSArray(MacroAssembler* masm,
- Register array_function,
- Register result,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* gc_required) {
- const int initial_capacity = JSArray::kPreallocatedArrayElements;
- STATIC_ASSERT(initial_capacity >= 0);
- __ LoadInitialArrayMap(array_function, scratch2, scratch1, false);
-
- // Allocate the JSArray object together with space for a fixed array with the
- // requested elements.
- int size = JSArray::kSize;
- if (initial_capacity > 0) {
- size += FixedArray::SizeFor(initial_capacity);
- }
- __ AllocateInNewSpace(size,
- result,
- scratch2,
- scratch3,
- gc_required,
- TAG_OBJECT);
-
- // Allocated the JSArray. Now initialize the fields except for the elements
- // array.
- // result: JSObject
- // scratch1: initial map
- // scratch2: start of next object
- __ str(scratch1, FieldMemOperand(result, JSObject::kMapOffset));
- __ LoadRoot(scratch1, Heap::kEmptyFixedArrayRootIndex);
- __ str(scratch1, FieldMemOperand(result, JSArray::kPropertiesOffset));
- // Field JSArray::kElementsOffset is initialized later.
- __ mov(scratch3, Operand::Zero());
- __ str(scratch3, FieldMemOperand(result, JSArray::kLengthOffset));
-
- if (initial_capacity == 0) {
- __ str(scratch1, FieldMemOperand(result, JSArray::kElementsOffset));
- return;
- }
-
- // Calculate the location of the elements array and set elements array member
- // of the JSArray.
- // result: JSObject
- // scratch2: start of next object
- __ add(scratch1, result, Operand(JSArray::kSize));
- __ str(scratch1, FieldMemOperand(result, JSArray::kElementsOffset));
-
- // Clear the heap tag on the elements array.
- __ sub(scratch1, scratch1, Operand(kHeapObjectTag));
-
- // Initialize the FixedArray and fill it with holes. FixedArray length is
- // stored as a smi.
- // result: JSObject
- // scratch1: elements array (untagged)
- // scratch2: start of next object
- __ LoadRoot(scratch3, Heap::kFixedArrayMapRootIndex);
- STATIC_ASSERT(0 * kPointerSize == FixedArray::kMapOffset);
- __ str(scratch3, MemOperand(scratch1, kPointerSize, PostIndex));
- __ mov(scratch3, Operand(Smi::FromInt(initial_capacity)));
- STATIC_ASSERT(1 * kPointerSize == FixedArray::kLengthOffset);
- __ str(scratch3, MemOperand(scratch1, kPointerSize, PostIndex));
-
- // Fill the FixedArray with the hole value. Inline the code if short.
- STATIC_ASSERT(2 * kPointerSize == FixedArray::kHeaderSize);
- __ LoadRoot(scratch3, Heap::kTheHoleValueRootIndex);
- static const int kLoopUnfoldLimit = 4;
- if (initial_capacity <= kLoopUnfoldLimit) {
- for (int i = 0; i < initial_capacity; i++) {
- __ str(scratch3, MemOperand(scratch1, kPointerSize, PostIndex));
- }
- } else {
- Label loop, entry;
- __ add(scratch2, scratch1, Operand(initial_capacity * kPointerSize));
- __ b(&entry);
- __ bind(&loop);
- __ str(scratch3, MemOperand(scratch1, kPointerSize, PostIndex));
- __ bind(&entry);
- __ cmp(scratch1, scratch2);
- __ b(lt, &loop);
- }
-}
-
-// Allocate a JSArray with the number of elements stored in a register. The
-// register array_function holds the built-in Array function and the register
-// array_size holds the size of the array as a smi. The allocated array is put
-// into the result register and beginning and end of the FixedArray elements
-// storage is put into registers elements_array_storage and elements_array_end
-// (see below for when that is not the case). If the parameter fill_with_holes
-// is true the allocated elements backing store is filled with the hole values
-// otherwise it is left uninitialized. When the backing store is filled the
-// register elements_array_storage is scratched.
-static void AllocateJSArray(MacroAssembler* masm,
- Register array_function, // Array function.
- Register array_size, // As a smi, cannot be 0.
- Register result,
- Register elements_array_storage,
- Register elements_array_end,
- Register scratch1,
- Register scratch2,
- bool fill_with_hole,
- Label* gc_required) {
- // Load the initial map from the array function.
- __ LoadInitialArrayMap(array_function, scratch2,
- elements_array_storage, fill_with_hole);
-
- if (FLAG_debug_code) { // Assert that array size is not zero.
- __ tst(array_size, array_size);
- __ Assert(ne, "array size is unexpectedly 0");
- }
-
- // Allocate the JSArray object together with space for a FixedArray with the
- // requested number of elements.
- STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
- __ mov(elements_array_end,
- Operand((JSArray::kSize + FixedArray::kHeaderSize) / kPointerSize));
- __ add(elements_array_end,
- elements_array_end,
- Operand(array_size, ASR, kSmiTagSize));
- __ AllocateInNewSpace(
- elements_array_end,
- result,
- scratch1,
- scratch2,
- gc_required,
- static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
-
- // Allocated the JSArray. Now initialize the fields except for the elements
- // array.
- // result: JSObject
- // elements_array_storage: initial map
- // array_size: size of array (smi)
- __ str(elements_array_storage, FieldMemOperand(result, JSObject::kMapOffset));
- __ LoadRoot(elements_array_storage, Heap::kEmptyFixedArrayRootIndex);
- __ str(elements_array_storage,
- FieldMemOperand(result, JSArray::kPropertiesOffset));
- // Field JSArray::kElementsOffset is initialized later.
- __ str(array_size, FieldMemOperand(result, JSArray::kLengthOffset));
-
- // Calculate the location of the elements array and set elements array member
- // of the JSArray.
- // result: JSObject
- // array_size: size of array (smi)
- __ add(elements_array_storage, result, Operand(JSArray::kSize));
- __ str(elements_array_storage,
- FieldMemOperand(result, JSArray::kElementsOffset));
-
- // Clear the heap tag on the elements array.
- STATIC_ASSERT(kSmiTag == 0);
- __ sub(elements_array_storage,
- elements_array_storage,
- Operand(kHeapObjectTag));
- // Initialize the fixed array and fill it with holes. FixedArray length is
- // stored as a smi.
- // result: JSObject
- // elements_array_storage: elements array (untagged)
- // array_size: size of array (smi)
- __ LoadRoot(scratch1, Heap::kFixedArrayMapRootIndex);
- ASSERT_EQ(0 * kPointerSize, FixedArray::kMapOffset);
- __ str(scratch1, MemOperand(elements_array_storage, kPointerSize, PostIndex));
- STATIC_ASSERT(kSmiTag == 0);
- ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
- __ str(array_size,
- MemOperand(elements_array_storage, kPointerSize, PostIndex));
-
- // Calculate elements array and elements array end.
- // result: JSObject
- // elements_array_storage: elements array element storage
- // array_size: smi-tagged size of elements array
- STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
- __ add(elements_array_end,
- elements_array_storage,
- Operand(array_size, LSL, kPointerSizeLog2 - kSmiTagSize));
-
- // Fill the allocated FixedArray with the hole value if requested.
- // result: JSObject
- // elements_array_storage: elements array element storage
- // elements_array_end: start of next object
- if (fill_with_hole) {
- Label loop, entry;
- __ LoadRoot(scratch1, Heap::kTheHoleValueRootIndex);
- __ jmp(&entry);
- __ bind(&loop);
- __ str(scratch1,
- MemOperand(elements_array_storage, kPointerSize, PostIndex));
- __ bind(&entry);
- __ cmp(elements_array_storage, elements_array_end);
- __ b(lt, &loop);
- }
-}
-
-// Create a new array for the built-in Array function. This function allocates
-// the JSArray object and the FixedArray elements array and initializes these.
-// If the Array cannot be constructed in native code the runtime is called. This
-// function assumes the following state:
-// r0: argc
-// r1: constructor (built-in Array function)
-// lr: return address
-// sp[0]: last argument
-// This function is used for both construct and normal calls of Array. The only
-// difference between handling a construct call and a normal call is that for a
-// construct call the constructor function in r1 needs to be preserved for
-// entering the generic code. In both cases argc in r0 needs to be preserved.
-// Both registers are preserved by this code so no need to differentiate between
-// construct call and normal call.
-static void ArrayNativeCode(MacroAssembler* masm,
- Label* call_generic_code) {
- Counters* counters = masm->isolate()->counters();
- Label argc_one_or_more, argc_two_or_more, not_empty_array, empty_array,
- has_non_smi_element, finish, cant_transition_map, not_double;
-
- // Check for array construction with zero arguments or one.
- __ cmp(r0, Operand::Zero());
- __ b(ne, &argc_one_or_more);
-
- // Handle construction of an empty array.
- __ bind(&empty_array);
- AllocateEmptyJSArray(masm,
- r1,
- r2,
- r3,
- r4,
- r5,
- call_generic_code);
- __ IncrementCounter(counters->array_function_native(), 1, r3, r4);
- // Set up return value, remove receiver from stack and return.
- __ mov(r0, r2);
- __ add(sp, sp, Operand(kPointerSize));
- __ Jump(lr);
-
- // Check for one argument. Bail out if argument is not smi or if it is
- // negative.
- __ bind(&argc_one_or_more);
- __ cmp(r0, Operand(1));
- __ b(ne, &argc_two_or_more);
- STATIC_ASSERT(kSmiTag == 0);
- __ ldr(r2, MemOperand(sp)); // Get the argument from the stack.
- __ tst(r2, r2);
- __ b(ne, &not_empty_array);
- __ Drop(1); // Adjust stack.
- __ mov(r0, Operand::Zero()); // Treat this as a call with argc of zero.
- __ b(&empty_array);
-
- __ bind(&not_empty_array);
- __ and_(r3, r2, Operand(kIntptrSignBit | kSmiTagMask), SetCC);
- __ b(ne, call_generic_code);
-
- // Handle construction of an empty array of a certain size. Bail out if size
- // is too large to actually allocate an elements array.
- STATIC_ASSERT(kSmiTag == 0);
- __ cmp(r2, Operand(JSObject::kInitialMaxFastElementArray << kSmiTagSize));
- __ b(ge, call_generic_code);
-
- // r0: argc
- // r1: constructor
- // r2: array_size (smi)
- // sp[0]: argument
- AllocateJSArray(masm,
- r1,
- r2,
- r3,
- r4,
- r5,
- r6,
- r7,
- true,
- call_generic_code);
- __ IncrementCounter(counters->array_function_native(), 1, r2, r4);
- // Set up return value, remove receiver and argument from stack and return.
- __ mov(r0, r3);
- __ add(sp, sp, Operand(2 * kPointerSize));
- __ Jump(lr);
-
- // Handle construction of an array from a list of arguments.
- __ bind(&argc_two_or_more);
- __ mov(r2, Operand(r0, LSL, kSmiTagSize)); // Convet argc to a smi.
-
- // r0: argc
- // r1: constructor
- // r2: array_size (smi)
- // sp[0]: last argument
- AllocateJSArray(masm,
- r1,
- r2,
- r3,
- r4,
- r5,
- r6,
- r7,
- false,
- call_generic_code);
- __ IncrementCounter(counters->array_function_native(), 1, r2, r6);
-
- // Fill arguments as array elements. Copy from the top of the stack (last
- // element) to the array backing store filling it backwards. Note:
- // elements_array_end points after the backing store therefore PreIndex is
- // used when filling the backing store.
- // r0: argc
- // r3: JSArray
- // r4: elements_array storage start (untagged)
- // r5: elements_array_end (untagged)
- // sp[0]: last argument
- Label loop, entry;
- __ mov(r7, sp);
- __ jmp(&entry);
- __ bind(&loop);
- __ ldr(r2, MemOperand(r7, kPointerSize, PostIndex));
- if (FLAG_smi_only_arrays) {
- __ JumpIfNotSmi(r2, &has_non_smi_element);
- }
- __ str(r2, MemOperand(r5, -kPointerSize, PreIndex));
- __ bind(&entry);
- __ cmp(r4, r5);
- __ b(lt, &loop);
-
- __ bind(&finish);
- __ mov(sp, r7);
-
- // Remove caller arguments and receiver from the stack, setup return value and
- // return.
- // r0: argc
- // r3: JSArray
- // sp[0]: receiver
- __ add(sp, sp, Operand(kPointerSize));
- __ mov(r0, r3);
- __ Jump(lr);
-
- __ bind(&has_non_smi_element);
- // Double values are handled by the runtime.
- __ CheckMap(
- r2, r9, Heap::kHeapNumberMapRootIndex, &not_double, DONT_DO_SMI_CHECK);
- __ bind(&cant_transition_map);
- __ UndoAllocationInNewSpace(r3, r4);
- __ b(call_generic_code);
-
- __ bind(&not_double);
- // Transition FAST_SMI_ELEMENTS to FAST_ELEMENTS.
- // r3: JSArray
- __ ldr(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
- __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- FAST_ELEMENTS,
- r2,
- r9,
- &cant_transition_map);
- __ str(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
- __ RecordWriteField(r3,
- HeapObject::kMapOffset,
- r2,
- r9,
- kLRHasNotBeenSaved,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- Label loop2;
- __ sub(r7, r7, Operand(kPointerSize));
- __ bind(&loop2);
- __ ldr(r2, MemOperand(r7, kPointerSize, PostIndex));
- __ str(r2, MemOperand(r5, -kPointerSize, PreIndex));
- __ cmp(r4, r5);
- __ b(lt, &loop2);
- __ b(&finish);
-}
-
-
-void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r0 : number of arguments
- // -- lr : return address
- // -- sp[...]: constructor arguments
- // -----------------------------------
- Label generic_array_code, one_or_more_arguments, two_or_more_arguments;
-
- // Get the InternalArray function.
- GenerateLoadInternalArrayFunction(masm, r1);
-
- if (FLAG_debug_code) {
- // Initial map for the builtin InternalArray functions should be maps.
- __ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
- __ tst(r2, Operand(kSmiTagMask));
- __ Assert(ne, "Unexpected initial map for InternalArray function");
- __ CompareObjectType(r2, r3, r4, MAP_TYPE);
- __ Assert(eq, "Unexpected initial map for InternalArray function");
- }
-
- // Run the native code for the InternalArray function called as a normal
- // function.
- ArrayNativeCode(masm, &generic_array_code);
-
- // Jump to the generic array code if the specialized code cannot handle the
- // construction.
- __ bind(&generic_array_code);
-
- Handle<Code> array_code =
- masm->isolate()->builtins()->InternalArrayCodeGeneric();
- __ Jump(array_code, RelocInfo::CODE_TARGET);
-}
-
-
-void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r0 : number of arguments
- // -- lr : return address
- // -- sp[...]: constructor arguments
- // -----------------------------------
- Label generic_array_code, one_or_more_arguments, two_or_more_arguments;
-
- // Get the Array function.
- GenerateLoadArrayFunction(masm, r1);
-
- if (FLAG_debug_code) {
- // Initial map for the builtin Array functions should be maps.
- __ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
- __ tst(r2, Operand(kSmiTagMask));
- __ Assert(ne, "Unexpected initial map for Array function");
- __ CompareObjectType(r2, r3, r4, MAP_TYPE);
- __ Assert(eq, "Unexpected initial map for Array function");
- }
-
- // Run the native code for the Array function called as a normal function.
- ArrayNativeCode(masm, &generic_array_code);
-
- // Jump to the generic array code if the specialized code cannot handle
- // the construction.
- __ bind(&generic_array_code);
-
- Handle<Code> array_code =
- masm->isolate()->builtins()->ArrayCodeGeneric();
- __ Jump(array_code, RelocInfo::CODE_TARGET);
-}
-
-
-void Builtins::Generate_ArrayConstructCode(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r0 : number of arguments
- // -- r1 : constructor function
- // -- r2 : type info cell
- // -- lr : return address
- // -- sp[...]: constructor arguments
- // -----------------------------------
-
- if (FLAG_debug_code) {
- // The array construct code is only set for the builtin and internal
- // Array functions which always have a map.
- // Initial map for the builtin Array function should be a map.
- __ ldr(r3, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
- __ tst(r3, Operand(kSmiTagMask));
- __ Assert(ne, "Unexpected initial map for Array function");
- __ CompareObjectType(r1, r3, r4, MAP_TYPE);
- __ Assert(eq, "Unexpected initial map for Array function");
-
- if (FLAG_optimize_constructed_arrays) {
- // We should either have undefined in r2 or a valid jsglobalpropertycell
- Label okay_here;
- Handle<Object> undefined_sentinel(
- masm->isolate()->heap()->undefined_value(), masm->isolate());
- Handle<Map> global_property_cell_map(
- masm->isolate()->heap()->global_property_cell_map());
- __ cmp(r2, Operand(undefined_sentinel));
- __ b(eq, &okay_here);
- __ ldr(r3, FieldMemOperand(r2, 0));
- __ cmp(r3, Operand(global_property_cell_map));
- __ Assert(eq, "Expected property cell in register ebx");
- __ bind(&okay_here);
- }
- }
-
- if (FLAG_optimize_constructed_arrays) {
- Label not_zero_case, not_one_case;
- __ tst(r0, r0);
- __ b(ne, &not_zero_case);
- ArrayNoArgumentConstructorStub no_argument_stub;
- __ TailCallStub(&no_argument_stub);
-
- __ bind(&not_zero_case);
- __ cmp(r0, Operand(1));
- __ b(gt, &not_one_case);
- ArraySingleArgumentConstructorStub single_argument_stub;
- __ TailCallStub(&single_argument_stub);
-
- __ bind(&not_one_case);
- ArrayNArgumentsConstructorStub n_argument_stub;
- __ TailCallStub(&n_argument_stub);
- } else {
- Label generic_constructor;
- // Run the native code for the Array function called as a constructor.
- ArrayNativeCode(masm, &generic_constructor);
-
- // Jump to the generic construct code in case the specialized code cannot
- // handle the construction.
- __ bind(&generic_constructor);
- Handle<Code> generic_construct_stub =
- masm->isolate()->builtins()->JSConstructStubGeneric();
- __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET);
- }
-}
-
-
-void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r0 : number of arguments
- // -- r1 : constructor function
- // -- lr : return address
- // -- sp[(argc - n - 1) * 4] : arg[n] (zero based)
- // -- sp[argc * 4] : receiver
- // -----------------------------------
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->string_ctor_calls(), 1, r2, r3);
-
- Register function = r1;
- if (FLAG_debug_code) {
- __ LoadGlobalFunction(Context::STRING_FUNCTION_INDEX, r2);
- __ cmp(function, Operand(r2));
- __ Assert(eq, "Unexpected String function");
- }
-
- // Load the first arguments in r0 and get rid of the rest.
- Label no_arguments;
- __ cmp(r0, Operand::Zero());
- __ b(eq, &no_arguments);
- // First args = sp[(argc - 1) * 4].
- __ sub(r0, r0, Operand(1));
- __ ldr(r0, MemOperand(sp, r0, LSL, kPointerSizeLog2, PreIndex));
- // sp now point to args[0], drop args[0] + receiver.
- __ Drop(2);
-
- Register argument = r2;
- Label not_cached, argument_is_string;
- NumberToStringStub::GenerateLookupNumberStringCache(
- masm,
- r0, // Input.
- argument, // Result.
- r3, // Scratch.
- r4, // Scratch.
- r5, // Scratch.
- false, // Is it a Smi?
- &not_cached);
- __ IncrementCounter(counters->string_ctor_cached_number(), 1, r3, r4);
- __ bind(&argument_is_string);
-
- // ----------- S t a t e -------------
- // -- r2 : argument converted to string
- // -- r1 : constructor function
- // -- lr : return address
- // -----------------------------------
-
- Label gc_required;
- __ AllocateInNewSpace(JSValue::kSize,
- r0, // Result.
- r3, // Scratch.
- r4, // Scratch.
- &gc_required,
- TAG_OBJECT);
-
- // Initialising the String Object.
- Register map = r3;
- __ LoadGlobalFunctionInitialMap(function, map, r4);
- if (FLAG_debug_code) {
- __ ldrb(r4, FieldMemOperand(map, Map::kInstanceSizeOffset));
- __ cmp(r4, Operand(JSValue::kSize >> kPointerSizeLog2));
- __ Assert(eq, "Unexpected string wrapper instance size");
- __ ldrb(r4, FieldMemOperand(map, Map::kUnusedPropertyFieldsOffset));
- __ cmp(r4, Operand::Zero());
- __ Assert(eq, "Unexpected unused properties of string wrapper");
- }
- __ str(map, FieldMemOperand(r0, HeapObject::kMapOffset));
-
- __ LoadRoot(r3, Heap::kEmptyFixedArrayRootIndex);
- __ str(r3, FieldMemOperand(r0, JSObject::kPropertiesOffset));
- __ str(r3, FieldMemOperand(r0, JSObject::kElementsOffset));
-
- __ str(argument, FieldMemOperand(r0, JSValue::kValueOffset));
-
- // Ensure the object is fully initialized.
- STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
-
- __ Ret();
-
- // The argument was not found in the number to string cache. Check
- // if it's a string already before calling the conversion builtin.
- Label convert_argument;
- __ bind(&not_cached);
- __ JumpIfSmi(r0, &convert_argument);
-
- // Is it a String?
- __ ldr(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ ldrb(r3, FieldMemOperand(r2, Map::kInstanceTypeOffset));
- STATIC_ASSERT(kNotStringTag != 0);
- __ tst(r3, Operand(kIsNotStringMask));
- __ b(ne, &convert_argument);
- __ mov(argument, r0);
- __ IncrementCounter(counters->string_ctor_conversions(), 1, r3, r4);
- __ b(&argument_is_string);
-
- // Invoke the conversion builtin and put the result into r2.
- __ bind(&convert_argument);
- __ push(function); // Preserve the function.
- __ IncrementCounter(counters->string_ctor_conversions(), 1, r3, r4);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(r0);
- __ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION);
- }
- __ pop(function);
- __ mov(argument, r0);
- __ b(&argument_is_string);
-
- // Load the empty string into r2, remove the receiver from the
- // stack, and jump back to the case where the argument is a string.
- __ bind(&no_arguments);
- __ LoadRoot(argument, Heap::kempty_stringRootIndex);
- __ Drop(1);
- __ b(&argument_is_string);
-
- // At this point the argument is already a string. Call runtime to
- // create a string wrapper.
- __ bind(&gc_required);
- __ IncrementCounter(counters->string_ctor_gc_required(), 1, r3, r4);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(argument);
- __ CallRuntime(Runtime::kNewStringWrapper, 1);
- }
- __ Ret();
-}
-
-
-static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
- __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
- __ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kCodeOffset));
- __ add(r2, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ mov(pc, r2);
-}
-
-
-void Builtins::Generate_InRecompileQueue(MacroAssembler* masm) {
- GenerateTailCallToSharedCode(masm);
-}
-
-
-void Builtins::Generate_ParallelRecompile(MacroAssembler* masm) {
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Push a copy of the function onto the stack.
- __ push(r1);
- // Push call kind information.
- __ push(r5);
-
- __ push(r1); // Function is also the parameter to the runtime call.
- __ CallRuntime(Runtime::kParallelRecompile, 1);
-
- // Restore call kind information.
- __ pop(r5);
- // Restore receiver.
- __ pop(r1);
-
- // Tear down internal frame.
- }
-
- GenerateTailCallToSharedCode(masm);
-}
-
-
-static void Generate_JSConstructStubHelper(MacroAssembler* masm,
- bool is_api_function,
- bool count_constructions) {
- // ----------- S t a t e -------------
- // -- r0 : number of arguments
- // -- r1 : constructor function
- // -- lr : return address
- // -- sp[...]: constructor arguments
- // -----------------------------------
-
- // Should never count constructions for api objects.
- ASSERT(!is_api_function || !count_constructions);
-
- Isolate* isolate = masm->isolate();
-
- // Enter a construct frame.
- {
- FrameScope scope(masm, StackFrame::CONSTRUCT);
-
- // Preserve the two incoming parameters on the stack.
- __ mov(r0, Operand(r0, LSL, kSmiTagSize));
- __ push(r0); // Smi-tagged arguments count.
- __ push(r1); // Constructor function.
-
- // Try to allocate the object without transitioning into C code. If any of
- // the preconditions is not met, the code bails out to the runtime call.
- Label rt_call, allocated;
- if (FLAG_inline_new) {
- Label undo_allocation;
-#ifdef ENABLE_DEBUGGER_SUPPORT
- ExternalReference debug_step_in_fp =
- ExternalReference::debug_step_in_fp_address(isolate);
- __ mov(r2, Operand(debug_step_in_fp));
- __ ldr(r2, MemOperand(r2));
- __ tst(r2, r2);
- __ b(ne, &rt_call);
-#endif
-
- // Load the initial map and verify that it is in fact a map.
- // r1: constructor function
- __ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
- __ JumpIfSmi(r2, &rt_call);
- __ CompareObjectType(r2, r3, r4, MAP_TYPE);
- __ b(ne, &rt_call);
-
- // Check that the constructor is not constructing a JSFunction (see
- // comments in Runtime_NewObject in runtime.cc). In which case the
- // initial map's instance type would be JS_FUNCTION_TYPE.
- // r1: constructor function
- // r2: initial map
- __ CompareInstanceType(r2, r3, JS_FUNCTION_TYPE);
- __ b(eq, &rt_call);
-
- if (count_constructions) {
- Label allocate;
- // Decrease generous allocation count.
- __ ldr(r3, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
- MemOperand constructor_count =
- FieldMemOperand(r3, SharedFunctionInfo::kConstructionCountOffset);
- __ ldrb(r4, constructor_count);
- __ sub(r4, r4, Operand(1), SetCC);
- __ strb(r4, constructor_count);
- __ b(ne, &allocate);
-
- __ Push(r1, r2);
-
- __ push(r1); // constructor
- // The call will replace the stub, so the countdown is only done once.
- __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
-
- __ pop(r2);
- __ pop(r1);
-
- __ bind(&allocate);
- }
-
- // Now allocate the JSObject on the heap.
- // r1: constructor function
- // r2: initial map
- __ ldrb(r3, FieldMemOperand(r2, Map::kInstanceSizeOffset));
- __ AllocateInNewSpace(r3, r4, r5, r6, &rt_call, SIZE_IN_WORDS);
-
- // Allocated the JSObject, now initialize the fields. Map is set to
- // initial map and properties and elements are set to empty fixed array.
- // r1: constructor function
- // r2: initial map
- // r3: object size
- // r4: JSObject (not tagged)
- __ LoadRoot(r6, Heap::kEmptyFixedArrayRootIndex);
- __ mov(r5, r4);
- ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
- __ str(r2, MemOperand(r5, kPointerSize, PostIndex));
- ASSERT_EQ(1 * kPointerSize, JSObject::kPropertiesOffset);
- __ str(r6, MemOperand(r5, kPointerSize, PostIndex));
- ASSERT_EQ(2 * kPointerSize, JSObject::kElementsOffset);
- __ str(r6, MemOperand(r5, kPointerSize, PostIndex));
-
- // Fill all the in-object properties with the appropriate filler.
- // r1: constructor function
- // r2: initial map
- // r3: object size (in words)
- // r4: JSObject (not tagged)
- // r5: First in-object property of JSObject (not tagged)
- __ add(r6, r4, Operand(r3, LSL, kPointerSizeLog2)); // End of object.
- ASSERT_EQ(3 * kPointerSize, JSObject::kHeaderSize);
- __ LoadRoot(r7, Heap::kUndefinedValueRootIndex);
- if (count_constructions) {
- __ ldr(r0, FieldMemOperand(r2, Map::kInstanceSizesOffset));
- __ Ubfx(r0, r0, Map::kPreAllocatedPropertyFieldsByte * kBitsPerByte,
- kBitsPerByte);
- __ add(r0, r5, Operand(r0, LSL, kPointerSizeLog2));
- // r0: offset of first field after pre-allocated fields
- if (FLAG_debug_code) {
- __ cmp(r0, r6);
- __ Assert(le, "Unexpected number of pre-allocated property fields.");
- }
- __ InitializeFieldsWithFiller(r5, r0, r7);
- // To allow for truncation.
- __ LoadRoot(r7, Heap::kOnePointerFillerMapRootIndex);
- }
- __ InitializeFieldsWithFiller(r5, r6, r7);
-
- // Add the object tag to make the JSObject real, so that we can continue
- // and jump into the continuation code at any time from now on. Any
- // failures need to undo the allocation, so that the heap is in a
- // consistent state and verifiable.
- __ add(r4, r4, Operand(kHeapObjectTag));
-
- // Check if a non-empty properties array is needed. Continue with
- // allocated object if not fall through to runtime call if it is.
- // r1: constructor function
- // r4: JSObject
- // r5: start of next object (not tagged)
- __ ldrb(r3, FieldMemOperand(r2, Map::kUnusedPropertyFieldsOffset));
- // The field instance sizes contains both pre-allocated property fields
- // and in-object properties.
- __ ldr(r0, FieldMemOperand(r2, Map::kInstanceSizesOffset));
- __ Ubfx(r6, r0, Map::kPreAllocatedPropertyFieldsByte * kBitsPerByte,
- kBitsPerByte);
- __ add(r3, r3, Operand(r6));
- __ Ubfx(r6, r0, Map::kInObjectPropertiesByte * kBitsPerByte,
- kBitsPerByte);
- __ sub(r3, r3, Operand(r6), SetCC);
-
- // Done if no extra properties are to be allocated.
- __ b(eq, &allocated);
- __ Assert(pl, "Property allocation count failed.");
-
- // Scale the number of elements by pointer size and add the header for
- // FixedArrays to the start of the next object calculation from above.
- // r1: constructor
- // r3: number of elements in properties array
- // r4: JSObject
- // r5: start of next object
- __ add(r0, r3, Operand(FixedArray::kHeaderSize / kPointerSize));
- __ AllocateInNewSpace(
- r0,
- r5,
- r6,
- r2,
- &undo_allocation,
- static_cast<AllocationFlags>(RESULT_CONTAINS_TOP | SIZE_IN_WORDS));
-
- // Initialize the FixedArray.
- // r1: constructor
- // r3: number of elements in properties array
- // r4: JSObject
- // r5: FixedArray (not tagged)
- __ LoadRoot(r6, Heap::kFixedArrayMapRootIndex);
- __ mov(r2, r5);
- ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
- __ str(r6, MemOperand(r2, kPointerSize, PostIndex));
- ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
- __ mov(r0, Operand(r3, LSL, kSmiTagSize));
- __ str(r0, MemOperand(r2, kPointerSize, PostIndex));
-
- // Initialize the fields to undefined.
- // r1: constructor function
- // r2: First element of FixedArray (not tagged)
- // r3: number of elements in properties array
- // r4: JSObject
- // r5: FixedArray (not tagged)
- __ add(r6, r2, Operand(r3, LSL, kPointerSizeLog2)); // End of object.
- ASSERT_EQ(2 * kPointerSize, FixedArray::kHeaderSize);
- { Label loop, entry;
- if (count_constructions) {
- __ LoadRoot(r7, Heap::kUndefinedValueRootIndex);
- } else if (FLAG_debug_code) {
- __ LoadRoot(r8, Heap::kUndefinedValueRootIndex);
- __ cmp(r7, r8);
- __ Assert(eq, "Undefined value not loaded.");
- }
- __ b(&entry);
- __ bind(&loop);
- __ str(r7, MemOperand(r2, kPointerSize, PostIndex));
- __ bind(&entry);
- __ cmp(r2, r6);
- __ b(lt, &loop);
- }
-
- // Store the initialized FixedArray into the properties field of
- // the JSObject
- // r1: constructor function
- // r4: JSObject
- // r5: FixedArray (not tagged)
- __ add(r5, r5, Operand(kHeapObjectTag)); // Add the heap tag.
- __ str(r5, FieldMemOperand(r4, JSObject::kPropertiesOffset));
-
- // Continue with JSObject being successfully allocated
- // r1: constructor function
- // r4: JSObject
- __ jmp(&allocated);
-
- // Undo the setting of the new top so that the heap is verifiable. For
- // example, the map's unused properties potentially do not match the
- // allocated objects unused properties.
- // r4: JSObject (previous new top)
- __ bind(&undo_allocation);
- __ UndoAllocationInNewSpace(r4, r5);
- }
-
- // Allocate the new receiver object using the runtime call.
- // r1: constructor function
- __ bind(&rt_call);
- __ push(r1); // argument for Runtime_NewObject
- __ CallRuntime(Runtime::kNewObject, 1);
- __ mov(r4, r0);
-
- // Receiver for constructor call allocated.
- // r4: JSObject
- __ bind(&allocated);
- __ push(r4);
- __ push(r4);
-
- // Reload the number of arguments and the constructor from the stack.
- // sp[0]: receiver
- // sp[1]: receiver
- // sp[2]: constructor function
- // sp[3]: number of arguments (smi-tagged)
- __ ldr(r1, MemOperand(sp, 2 * kPointerSize));
- __ ldr(r3, MemOperand(sp, 3 * kPointerSize));
-
- // Set up pointer to last argument.
- __ add(r2, fp, Operand(StandardFrameConstants::kCallerSPOffset));
-
- // Set up number of arguments for function call below
- __ mov(r0, Operand(r3, LSR, kSmiTagSize));
-
- // Copy arguments and receiver to the expression stack.
- // r0: number of arguments
- // r1: constructor function
- // r2: address of last argument (caller sp)
- // r3: number of arguments (smi-tagged)
- // sp[0]: receiver
- // sp[1]: receiver
- // sp[2]: constructor function
- // sp[3]: number of arguments (smi-tagged)
- Label loop, entry;
- __ b(&entry);
- __ bind(&loop);
- __ ldr(ip, MemOperand(r2, r3, LSL, kPointerSizeLog2 - 1));
- __ push(ip);
- __ bind(&entry);
- __ sub(r3, r3, Operand(2), SetCC);
- __ b(ge, &loop);
-
- // Call the function.
- // r0: number of arguments
- // r1: constructor function
- if (is_api_function) {
- __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
- Handle<Code> code =
- masm->isolate()->builtins()->HandleApiCallConstruct();
- ParameterCount expected(0);
- __ InvokeCode(code, expected, expected,
- RelocInfo::CODE_TARGET, CALL_FUNCTION, CALL_AS_METHOD);
- } else {
- ParameterCount actual(r0);
- __ InvokeFunction(r1, actual, CALL_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
- }
-
- // Store offset of return address for deoptimizer.
- if (!is_api_function && !count_constructions) {
- masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset());
- }
-
- // Restore context from the frame.
- // r0: result
- // sp[0]: receiver
- // sp[1]: constructor function
- // sp[2]: number of arguments (smi-tagged)
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
-
- // If the result is an object (in the ECMA sense), we should get rid
- // of the receiver and use the result; see ECMA-262 section 13.2.2-7
- // on page 74.
- Label use_receiver, exit;
-
- // If the result is a smi, it is *not* an object in the ECMA sense.
- // r0: result
- // sp[0]: receiver (newly allocated object)
- // sp[1]: constructor function
- // sp[2]: number of arguments (smi-tagged)
- __ JumpIfSmi(r0, &use_receiver);
-
- // If the type of the result (stored in its map) is less than
- // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense.
- __ CompareObjectType(r0, r1, r3, FIRST_SPEC_OBJECT_TYPE);
- __ b(ge, &exit);
-
- // Symbols are "objects".
- __ CompareInstanceType(r1, r3, SYMBOL_TYPE);
- __ b(eq, &exit);
-
- // Throw away the result of the constructor invocation and use the
- // on-stack receiver as the result.
- __ bind(&use_receiver);
- __ ldr(r0, MemOperand(sp));
-
- // Remove receiver from the stack, remove caller arguments, and
- // return.
- __ bind(&exit);
- // r0: result
- // sp[0]: receiver (newly allocated object)
- // sp[1]: constructor function
- // sp[2]: number of arguments (smi-tagged)
- __ ldr(r1, MemOperand(sp, 2 * kPointerSize));
-
- // Leave construct frame.
- }
-
- __ add(sp, sp, Operand(r1, LSL, kPointerSizeLog2 - 1));
- __ add(sp, sp, Operand(kPointerSize));
- __ IncrementCounter(isolate->counters()->constructed_objects(), 1, r1, r2);
- __ Jump(lr);
-}
-
-
-void Builtins::Generate_JSConstructStubCountdown(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, true);
-}
-
-
-void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, false);
-}
-
-
-void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, true, false);
-}
-
-
-static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
- bool is_construct) {
- // Called from Generate_JS_Entry
- // r0: code entry
- // r1: function
- // r2: receiver
- // r3: argc
- // r4: argv
- // r5-r7, cp may be clobbered
-
- // Clear the context before we push it when entering the internal frame.
- __ mov(cp, Operand::Zero());
-
- // Enter an internal frame.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Set up the context from the function argument.
- __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
-
- __ InitializeRootRegister();
-
- // Push the function and the receiver onto the stack.
- __ push(r1);
- __ push(r2);
-
- // Copy arguments to the stack in a loop.
- // r1: function
- // r3: argc
- // r4: argv, i.e. points to first arg
- Label loop, entry;
- __ add(r2, r4, Operand(r3, LSL, kPointerSizeLog2));
- // r2 points past last arg.
- __ b(&entry);
- __ bind(&loop);
- __ ldr(r0, MemOperand(r4, kPointerSize, PostIndex)); // read next parameter
- __ ldr(r0, MemOperand(r0)); // dereference handle
- __ push(r0); // push parameter
- __ bind(&entry);
- __ cmp(r4, r2);
- __ b(ne, &loop);
-
- // Initialize all JavaScript callee-saved registers, since they will be seen
- // by the garbage collector as part of handlers.
- __ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
- __ mov(r5, Operand(r4));
- __ mov(r6, Operand(r4));
- __ mov(r7, Operand(r4));
- if (kR9Available == 1) {
- __ mov(r9, Operand(r4));
- }
-
- // Invoke the code and pass argc as r0.
- __ mov(r0, Operand(r3));
- if (is_construct) {
- // No type feedback cell is available
- Handle<Object> undefined_sentinel(
- masm->isolate()->heap()->undefined_value(), masm->isolate());
- __ mov(r2, Operand(undefined_sentinel));
- CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
- __ CallStub(&stub);
- } else {
- ParameterCount actual(r0);
- __ InvokeFunction(r1, actual, CALL_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
- }
- // Exit the JS frame and remove the parameters (except function), and
- // return.
- // Respect ABI stack constraint.
- }
- __ Jump(lr);
-
- // r0: result
-}
-
-
-void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) {
- Generate_JSEntryTrampolineHelper(masm, false);
-}
-
-
-void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
- Generate_JSEntryTrampolineHelper(masm, true);
-}
-
-
-void Builtins::Generate_LazyCompile(MacroAssembler* masm) {
- // Enter an internal frame.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Preserve the function.
- __ push(r1);
- // Push call kind information.
- __ push(r5);
-
- // Push the function on the stack as the argument to the runtime function.
- __ push(r1);
- __ CallRuntime(Runtime::kLazyCompile, 1);
- // Calculate the entry point.
- __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
-
- // Restore call kind information.
- __ pop(r5);
- // Restore saved function.
- __ pop(r1);
-
- // Tear down internal frame.
- }
-
- // Do a tail-call of the compiled function.
- __ Jump(r2);
-}
-
-
-void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
- // Enter an internal frame.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Preserve the function.
- __ push(r1);
- // Push call kind information.
- __ push(r5);
-
- // Push the function on the stack as the argument to the runtime function.
- __ push(r1);
- __ CallRuntime(Runtime::kLazyRecompile, 1);
- // Calculate the entry point.
- __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
-
- // Restore call kind information.
- __ pop(r5);
- // Restore saved function.
- __ pop(r1);
-
- // Tear down internal frame.
- }
-
- // Do a tail-call of the compiled function.
- __ Jump(r2);
-}
-
-
-static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
- // For now, we are relying on the fact that make_code_young doesn't do any
- // garbage collection which allows us to save/restore the registers without
- // worrying about which of them contain pointers. We also don't build an
- // internal frame to make the code faster, since we shouldn't have to do stack
- // crawls in MakeCodeYoung. This seems a bit fragile.
-
- // The following registers must be saved and restored when calling through to
- // the runtime:
- // r0 - contains return address (beginning of patch sequence)
- // r1 - function object
- FrameScope scope(masm, StackFrame::MANUAL);
- __ stm(db_w, sp, r0.bit() | r1.bit() | fp.bit() | lr.bit());
- __ PrepareCallCFunction(1, 0, r1);
- __ CallCFunction(
- ExternalReference::get_make_code_young_function(masm->isolate()), 1);
- __ ldm(ia_w, sp, r0.bit() | r1.bit() | fp.bit() | lr.bit());
- __ mov(pc, r0);
-}
-
-#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C) \
-void Builtins::Generate_Make##C##CodeYoungAgainEvenMarking( \
- MacroAssembler* masm) { \
- GenerateMakeCodeYoungAgainCommon(masm); \
-} \
-void Builtins::Generate_Make##C##CodeYoungAgainOddMarking( \
- MacroAssembler* masm) { \
- GenerateMakeCodeYoungAgainCommon(masm); \
-}
-CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
-#undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
-
-
-void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Preserve registers across notification, this is important for compiled
- // stubs that tail call the runtime on deopts passing their parameters in
- // registers.
- __ stm(db_w, sp, kJSCallerSaved | kCalleeSaved);
- // Pass the function and deoptimization type to the runtime system.
- __ CallRuntime(Runtime::kNotifyStubFailure, 0);
- __ ldm(ia_w, sp, kJSCallerSaved | kCalleeSaved);
- }
-
- __ add(sp, sp, Operand(kPointerSize)); // Ignore state
- __ mov(pc, lr); // Jump to miss handler
-}
-
-
-static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
- Deoptimizer::BailoutType type) {
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- // Pass the function and deoptimization type to the runtime system.
- __ mov(r0, Operand(Smi::FromInt(static_cast<int>(type))));
- __ push(r0);
- __ CallRuntime(Runtime::kNotifyDeoptimized, 1);
- }
-
- // Get the full codegen state from the stack and untag it -> r6.
- __ ldr(r6, MemOperand(sp, 0 * kPointerSize));
- __ SmiUntag(r6);
- // Switch on the state.
- Label with_tos_register, unknown_state;
- __ cmp(r6, Operand(FullCodeGenerator::NO_REGISTERS));
- __ b(ne, &with_tos_register);
- __ add(sp, sp, Operand(1 * kPointerSize)); // Remove state.
- __ Ret();
-
- __ bind(&with_tos_register);
- __ ldr(r0, MemOperand(sp, 1 * kPointerSize));
- __ cmp(r6, Operand(FullCodeGenerator::TOS_REG));
- __ b(ne, &unknown_state);
- __ add(sp, sp, Operand(2 * kPointerSize)); // Remove state.
- __ Ret();
-
- __ bind(&unknown_state);
- __ stop("no cases left");
-}
-
-
-void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
- Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
-}
-
-
-void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
- Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
-}
-
-
-void Builtins::Generate_NotifyOSR(MacroAssembler* masm) {
- // For now, we are relying on the fact that Runtime::NotifyOSR
- // doesn't do any garbage collection which allows us to save/restore
- // the registers without worrying about which of them contain
- // pointers. This seems a bit fragile.
- __ stm(db_w, sp, kJSCallerSaved | kCalleeSaved | lr.bit() | fp.bit());
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kNotifyOSR, 0);
- }
- __ ldm(ia_w, sp, kJSCallerSaved | kCalleeSaved | lr.bit() | fp.bit());
- __ Ret();
-}
-
-
-void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
- CpuFeatures::TryForceFeatureScope scope(VFP3);
- if (!CPU::SupportsCrankshaft()) {
- __ Abort("Unreachable code: Cannot optimize without VFP3 support.");
- return;
- }
-
- // Lookup the function in the JavaScript frame and push it as an
- // argument to the on-stack replacement function.
- __ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(r0);
- __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
- }
-
- // If the result was -1 it means that we couldn't optimize the
- // function. Just return and continue in the unoptimized version.
- Label skip;
- __ cmp(r0, Operand(Smi::FromInt(-1)));
- __ b(ne, &skip);
- __ Ret();
-
- __ bind(&skip);
- // Untag the AST id and push it on the stack.
- __ SmiUntag(r0);
- __ push(r0);
-
- // Generate the code for doing the frame-to-frame translation using
- // the deoptimizer infrastructure.
- Deoptimizer::EntryGenerator generator(masm, Deoptimizer::OSR);
- generator.Generate();
-}
-
-
-void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
- // 1. Make sure we have at least one argument.
- // r0: actual number of arguments
- { Label done;
- __ cmp(r0, Operand::Zero());
- __ b(ne, &done);
- __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
- __ push(r2);
- __ add(r0, r0, Operand(1));
- __ bind(&done);
- }
-
- // 2. Get the function to call (passed as receiver) from the stack, check
- // if it is a function.
- // r0: actual number of arguments
- Label slow, non_function;
- __ ldr(r1, MemOperand(sp, r0, LSL, kPointerSizeLog2));
- __ JumpIfSmi(r1, &non_function);
- __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
- __ b(ne, &slow);
-
- // 3a. Patch the first argument if necessary when calling a function.
- // r0: actual number of arguments
- // r1: function
- Label shift_arguments;
- __ mov(r4, Operand::Zero()); // indicate regular JS_FUNCTION
- { Label convert_to_object, use_global_receiver, patch_receiver;
- // Change context eagerly in case we need the global receiver.
- __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
-
- // Do not transform the receiver for strict mode functions.
- __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
- __ ldr(r3, FieldMemOperand(r2, SharedFunctionInfo::kCompilerHintsOffset));
- __ tst(r3, Operand(1 << (SharedFunctionInfo::kStrictModeFunction +
- kSmiTagSize)));
- __ b(ne, &shift_arguments);
-
- // Do not transform the receiver for native (Compilerhints already in r3).
- __ tst(r3, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
- __ b(ne, &shift_arguments);
-
- // Compute the receiver in non-strict mode.
- __ add(r2, sp, Operand(r0, LSL, kPointerSizeLog2));
- __ ldr(r2, MemOperand(r2, -kPointerSize));
- // r0: actual number of arguments
- // r1: function
- // r2: first argument
- __ JumpIfSmi(r2, &convert_to_object);
-
- __ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
- __ cmp(r2, r3);
- __ b(eq, &use_global_receiver);
- __ LoadRoot(r3, Heap::kNullValueRootIndex);
- __ cmp(r2, r3);
- __ b(eq, &use_global_receiver);
-
- STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
- __ CompareObjectType(r2, r3, r3, FIRST_SPEC_OBJECT_TYPE);
- __ b(ge, &shift_arguments);
-
- __ bind(&convert_to_object);
-
- {
- // Enter an internal frame in order to preserve argument count.
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ mov(r0, Operand(r0, LSL, kSmiTagSize)); // Smi-tagged.
- __ push(r0);
-
- __ push(r2);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
- __ mov(r2, r0);
-
- __ pop(r0);
- __ mov(r0, Operand(r0, ASR, kSmiTagSize));
-
- // Exit the internal frame.
- }
-
- // Restore the function to r1, and the flag to r4.
- __ ldr(r1, MemOperand(sp, r0, LSL, kPointerSizeLog2));
- __ mov(r4, Operand::Zero());
- __ jmp(&patch_receiver);
-
- // Use the global receiver object from the called function as the
- // receiver.
- __ bind(&use_global_receiver);
- const int kGlobalIndex =
- Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
- __ ldr(r2, FieldMemOperand(cp, kGlobalIndex));
- __ ldr(r2, FieldMemOperand(r2, GlobalObject::kNativeContextOffset));
- __ ldr(r2, FieldMemOperand(r2, kGlobalIndex));
- __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalReceiverOffset));
-
- __ bind(&patch_receiver);
- __ add(r3, sp, Operand(r0, LSL, kPointerSizeLog2));
- __ str(r2, MemOperand(r3, -kPointerSize));
-
- __ jmp(&shift_arguments);
- }
-
- // 3b. Check for function proxy.
- __ bind(&slow);
- __ mov(r4, Operand(1, RelocInfo::NONE32)); // indicate function proxy
- __ cmp(r2, Operand(JS_FUNCTION_PROXY_TYPE));
- __ b(eq, &shift_arguments);
- __ bind(&non_function);
- __ mov(r4, Operand(2, RelocInfo::NONE32)); // indicate non-function
-
- // 3c. Patch the first argument when calling a non-function. The
- // CALL_NON_FUNCTION builtin expects the non-function callee as
- // receiver, so overwrite the first argument which will ultimately
- // become the receiver.
- // r0: actual number of arguments
- // r1: function
- // r4: call type (0: JS function, 1: function proxy, 2: non-function)
- __ add(r2, sp, Operand(r0, LSL, kPointerSizeLog2));
- __ str(r1, MemOperand(r2, -kPointerSize));
-
- // 4. Shift arguments and return address one slot down on the stack
- // (overwriting the original receiver). Adjust argument count to make
- // the original first argument the new receiver.
- // r0: actual number of arguments
- // r1: function
- // r4: call type (0: JS function, 1: function proxy, 2: non-function)
- __ bind(&shift_arguments);
- { Label loop;
- // Calculate the copy start address (destination). Copy end address is sp.
- __ add(r2, sp, Operand(r0, LSL, kPointerSizeLog2));
-
- __ bind(&loop);
- __ ldr(ip, MemOperand(r2, -kPointerSize));
- __ str(ip, MemOperand(r2));
- __ sub(r2, r2, Operand(kPointerSize));
- __ cmp(r2, sp);
- __ b(ne, &loop);
- // Adjust the actual number of arguments and remove the top element
- // (which is a copy of the last argument).
- __ sub(r0, r0, Operand(1));
- __ pop();
- }
-
- // 5a. Call non-function via tail call to CALL_NON_FUNCTION builtin,
- // or a function proxy via CALL_FUNCTION_PROXY.
- // r0: actual number of arguments
- // r1: function
- // r4: call type (0: JS function, 1: function proxy, 2: non-function)
- { Label function, non_proxy;
- __ tst(r4, r4);
- __ b(eq, &function);
- // Expected number of arguments is 0 for CALL_NON_FUNCTION.
- __ mov(r2, Operand::Zero());
- __ SetCallKind(r5, CALL_AS_METHOD);
- __ cmp(r4, Operand(1));
- __ b(ne, &non_proxy);
-
- __ push(r1); // re-add proxy object as additional argument
- __ add(r0, r0, Operand(1));
- __ GetBuiltinEntry(r3, Builtins::CALL_FUNCTION_PROXY);
- __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
- RelocInfo::CODE_TARGET);
-
- __ bind(&non_proxy);
- __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION);
- __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
- RelocInfo::CODE_TARGET);
- __ bind(&function);
- }
-
- // 5b. Get the code to call from the function and check that the number of
- // expected arguments matches what we're providing. If so, jump
- // (tail-call) to the code in register edx without checking arguments.
- // r0: actual number of arguments
- // r1: function
- __ ldr(r3, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
- __ ldr(r2,
- FieldMemOperand(r3, SharedFunctionInfo::kFormalParameterCountOffset));
- __ mov(r2, Operand(r2, ASR, kSmiTagSize));
- __ ldr(r3, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
- __ SetCallKind(r5, CALL_AS_METHOD);
- __ cmp(r2, r0); // Check formal and actual parameter counts.
- __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
- RelocInfo::CODE_TARGET,
- ne);
-
- ParameterCount expected(0);
- __ InvokeCode(r3, expected, expected, JUMP_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
-}
-
-
-void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
- const int kIndexOffset = -5 * kPointerSize;
- const int kLimitOffset = -4 * kPointerSize;
- const int kArgsOffset = 2 * kPointerSize;
- const int kRecvOffset = 3 * kPointerSize;
- const int kFunctionOffset = 4 * kPointerSize;
-
- {
- FrameScope frame_scope(masm, StackFrame::INTERNAL);
-
- __ ldr(r0, MemOperand(fp, kFunctionOffset)); // get the function
- __ push(r0);
- __ ldr(r0, MemOperand(fp, kArgsOffset)); // get the args array
- __ push(r0);
- __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
-
- // Check the stack for overflow. We are not trying to catch
- // interruptions (e.g. debug break and preemption) here, so the "real stack
- // limit" is checked.
- Label okay;
- __ LoadRoot(r2, Heap::kRealStackLimitRootIndex);
- // Make r2 the space we have left. The stack might already be overflowed
- // here which will cause r2 to become negative.
- __ sub(r2, sp, r2);
- // Check if the arguments will overflow the stack.
- __ cmp(r2, Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ b(gt, &okay); // Signed comparison.
-
- // Out of stack space.
- __ ldr(r1, MemOperand(fp, kFunctionOffset));
- __ push(r1);
- __ push(r0);
- __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
- // End of stack check.
-
- // Push current limit and index.
- __ bind(&okay);
- __ push(r0); // limit
- __ mov(r1, Operand::Zero()); // initial index
- __ push(r1);
-
- // Get the receiver.
- __ ldr(r0, MemOperand(fp, kRecvOffset));
-
- // Check that the function is a JS function (otherwise it must be a proxy).
- Label push_receiver;
- __ ldr(r1, MemOperand(fp, kFunctionOffset));
- __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
- __ b(ne, &push_receiver);
-
- // Change context eagerly to get the right global object if necessary.
- __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
- // Load the shared function info while the function is still in r1.
- __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
-
- // Compute the receiver.
- // Do not transform the receiver for strict mode functions.
- Label call_to_object, use_global_receiver;
- __ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kCompilerHintsOffset));
- __ tst(r2, Operand(1 << (SharedFunctionInfo::kStrictModeFunction +
- kSmiTagSize)));
- __ b(ne, &push_receiver);
-
- // Do not transform the receiver for strict mode functions.
- __ tst(r2, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
- __ b(ne, &push_receiver);
-
- // Compute the receiver in non-strict mode.
- __ JumpIfSmi(r0, &call_to_object);
- __ LoadRoot(r1, Heap::kNullValueRootIndex);
- __ cmp(r0, r1);
- __ b(eq, &use_global_receiver);
- __ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
- __ cmp(r0, r1);
- __ b(eq, &use_global_receiver);
-
- // Check if the receiver is already a JavaScript object.
- // r0: receiver
- STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
- __ CompareObjectType(r0, r1, r1, FIRST_SPEC_OBJECT_TYPE);
- __ b(ge, &push_receiver);
-
- // Convert the receiver to a regular object.
- // r0: receiver
- __ bind(&call_to_object);
- __ push(r0);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
- __ b(&push_receiver);
-
- // Use the current global receiver object as the receiver.
- __ bind(&use_global_receiver);
- const int kGlobalOffset =
- Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
- __ ldr(r0, FieldMemOperand(cp, kGlobalOffset));
- __ ldr(r0, FieldMemOperand(r0, GlobalObject::kNativeContextOffset));
- __ ldr(r0, FieldMemOperand(r0, kGlobalOffset));
- __ ldr(r0, FieldMemOperand(r0, GlobalObject::kGlobalReceiverOffset));
-
- // Push the receiver.
- // r0: receiver
- __ bind(&push_receiver);
- __ push(r0);
-
- // Copy all arguments from the array to the stack.
- Label entry, loop;
- __ ldr(r0, MemOperand(fp, kIndexOffset));
- __ b(&entry);
-
- // Load the current argument from the arguments array and push it to the
- // stack.
- // r0: current argument index
- __ bind(&loop);
- __ ldr(r1, MemOperand(fp, kArgsOffset));
- __ push(r1);
- __ push(r0);
-
- // Call the runtime to access the property in the arguments array.
- __ CallRuntime(Runtime::kGetProperty, 2);
- __ push(r0);
-
- // Use inline caching to access the arguments.
- __ ldr(r0, MemOperand(fp, kIndexOffset));
- __ add(r0, r0, Operand(1 << kSmiTagSize));
- __ str(r0, MemOperand(fp, kIndexOffset));
-
- // Test if the copy loop has finished copying all the elements from the
- // arguments object.
- __ bind(&entry);
- __ ldr(r1, MemOperand(fp, kLimitOffset));
- __ cmp(r0, r1);
- __ b(ne, &loop);
-
- // Invoke the function.
- Label call_proxy;
- ParameterCount actual(r0);
- __ mov(r0, Operand(r0, ASR, kSmiTagSize));
- __ ldr(r1, MemOperand(fp, kFunctionOffset));
- __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
- __ b(ne, &call_proxy);
- __ InvokeFunction(r1, actual, CALL_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
-
- frame_scope.GenerateLeaveFrame();
- __ add(sp, sp, Operand(3 * kPointerSize));
- __ Jump(lr);
-
- // Invoke the function proxy.
- __ bind(&call_proxy);
- __ push(r1); // add function proxy as last argument
- __ add(r0, r0, Operand(1));
- __ mov(r2, Operand::Zero());
- __ SetCallKind(r5, CALL_AS_METHOD);
- __ GetBuiltinEntry(r3, Builtins::CALL_FUNCTION_PROXY);
- __ Call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
- RelocInfo::CODE_TARGET);
-
- // Tear down the internal frame and remove function, receiver and args.
- }
- __ add(sp, sp, Operand(3 * kPointerSize));
- __ Jump(lr);
-}
-
-
-static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
- __ mov(r0, Operand(r0, LSL, kSmiTagSize));
- __ mov(r4, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ stm(db_w, sp, r0.bit() | r1.bit() | r4.bit() | fp.bit() | lr.bit());
- __ add(fp, sp, Operand(3 * kPointerSize));
-}
-
-
-static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r0 : result being passed through
- // -----------------------------------
- // Get the number of arguments passed (as a smi), tear down the frame and
- // then tear down the parameters.
- __ ldr(r1, MemOperand(fp, -3 * kPointerSize));
- __ mov(sp, fp);
- __ ldm(ia_w, sp, fp.bit() | lr.bit());
- __ add(sp, sp, Operand(r1, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ add(sp, sp, Operand(kPointerSize)); // adjust for receiver
-}
-
-
-void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r0 : actual number of arguments
- // -- r1 : function (passed through to callee)
- // -- r2 : expected number of arguments
- // -- r3 : code entry to call
- // -- r5 : call kind information
- // -----------------------------------
-
- Label invoke, dont_adapt_arguments;
-
- Label enough, too_few;
- __ cmp(r0, r2);
- __ b(lt, &too_few);
- __ cmp(r2, Operand(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
- __ b(eq, &dont_adapt_arguments);
-
- { // Enough parameters: actual >= expected
- __ bind(&enough);
- EnterArgumentsAdaptorFrame(masm);
-
- // Calculate copy start address into r0 and copy end address into r2.
- // r0: actual number of arguments as a smi
- // r1: function
- // r2: expected number of arguments
- // r3: code entry to call
- __ add(r0, fp, Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize));
- // adjust for return address and receiver
- __ add(r0, r0, Operand(2 * kPointerSize));
- __ sub(r2, r0, Operand(r2, LSL, kPointerSizeLog2));
-
- // Copy the arguments (including the receiver) to the new stack frame.
- // r0: copy start address
- // r1: function
- // r2: copy end address
- // r3: code entry to call
-
- Label copy;
- __ bind(&copy);
- __ ldr(ip, MemOperand(r0, 0));
- __ push(ip);
- __ cmp(r0, r2); // Compare before moving to next argument.
- __ sub(r0, r0, Operand(kPointerSize));
- __ b(ne, &copy);
-
- __ b(&invoke);
- }
-
- { // Too few parameters: Actual < expected
- __ bind(&too_few);
- EnterArgumentsAdaptorFrame(masm);
-
- // Calculate copy start address into r0 and copy end address is fp.
- // r0: actual number of arguments as a smi
- // r1: function
- // r2: expected number of arguments
- // r3: code entry to call
- __ add(r0, fp, Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize));
-
- // Copy the arguments (including the receiver) to the new stack frame.
- // r0: copy start address
- // r1: function
- // r2: expected number of arguments
- // r3: code entry to call
- Label copy;
- __ bind(&copy);
- // Adjust load for return address and receiver.
- __ ldr(ip, MemOperand(r0, 2 * kPointerSize));
- __ push(ip);
- __ cmp(r0, fp); // Compare before moving to next argument.
- __ sub(r0, r0, Operand(kPointerSize));
- __ b(ne, &copy);
-
- // Fill the remaining expected arguments with undefined.
- // r1: function
- // r2: expected number of arguments
- // r3: code entry to call
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ sub(r2, fp, Operand(r2, LSL, kPointerSizeLog2));
- __ sub(r2, r2, Operand(4 * kPointerSize)); // Adjust for frame.
-
- Label fill;
- __ bind(&fill);
- __ push(ip);
- __ cmp(sp, r2);
- __ b(ne, &fill);
- }
-
- // Call the entry point.
- __ bind(&invoke);
- __ Call(r3);
-
- // Store offset of return address for deoptimizer.
- masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
-
- // Exit frame and return.
- LeaveArgumentsAdaptorFrame(masm);
- __ Jump(lr);
-
-
- // -------------------------------------------
- // Dont adapt arguments.
- // -------------------------------------------
- __ bind(&dont_adapt_arguments);
- __ Jump(r3);
-}
-
-
-#undef __
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_ARM
diff --git a/src/3rdparty/v8/src/arm/code-stubs-arm.cc b/src/3rdparty/v8/src/arm/code-stubs-arm.cc
deleted file mode 100644
index e7a8489..0000000
--- a/src/3rdparty/v8/src/arm/code-stubs-arm.cc
+++ /dev/null
@@ -1,8166 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_ARM)
-
-#include "bootstrapper.h"
-#include "code-stubs.h"
-#include "regexp-macro-assembler.h"
-#include "stub-cache.h"
-
-namespace v8 {
-namespace internal {
-
-
-void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- static Register registers[] = { r3, r2, r1, r0 };
- descriptor->register_param_count_ = 4;
- descriptor->register_params_ = registers;
- descriptor->stack_parameter_count_ = NULL;
- descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kCreateObjectLiteralShallow)->entry;
-}
-
-
-void KeyedLoadFastElementStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- static Register registers[] = { r1, r0 };
- descriptor->register_param_count_ = 2;
- descriptor->register_params_ = registers;
- descriptor->deoptimization_handler_ =
- FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure);
-}
-
-
-void TransitionElementsKindStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- static Register registers[] = { r0, r1 };
- descriptor->register_param_count_ = 2;
- descriptor->register_params_ = registers;
- Address entry =
- Runtime::FunctionForId(Runtime::kTransitionElementsKind)->entry;
- descriptor->deoptimization_handler_ = FUNCTION_ADDR(entry);
-}
-
-
-static void InitializeArrayConstructorDescriptor(Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- // register state
- // r1 -- constructor function
- // r2 -- type info cell with elements kind
- // r0 -- number of arguments to the constructor function
- static Register registers[] = { r1, r2 };
- descriptor->register_param_count_ = 2;
- // stack param count needs (constructor pointer, and single argument)
- descriptor->stack_parameter_count_ = &r0;
- descriptor->register_params_ = registers;
- descriptor->extra_expression_stack_count_ = 1;
- descriptor->deoptimization_handler_ =
- FUNCTION_ADDR(ArrayConstructor_StubFailure);
-}
-
-
-void ArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- InitializeArrayConstructorDescriptor(isolate, descriptor);
-}
-
-
-void ArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- InitializeArrayConstructorDescriptor(isolate, descriptor);
-}
-
-
-void ArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- InitializeArrayConstructorDescriptor(isolate, descriptor);
-}
-
-
-#define __ ACCESS_MASM(masm)
-
-static void EmitIdenticalObjectComparison(MacroAssembler* masm,
- Label* slow,
- Condition cond);
-static void EmitSmiNonsmiComparison(MacroAssembler* masm,
- Register lhs,
- Register rhs,
- Label* lhs_not_nan,
- Label* slow,
- bool strict);
-static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cond);
-static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
- Register lhs,
- Register rhs);
-
-
-// Check if the operand is a heap number.
-static void EmitCheckForHeapNumber(MacroAssembler* masm, Register operand,
- Register scratch1, Register scratch2,
- Label* not_a_heap_number) {
- __ ldr(scratch1, FieldMemOperand(operand, HeapObject::kMapOffset));
- __ LoadRoot(scratch2, Heap::kHeapNumberMapRootIndex);
- __ cmp(scratch1, scratch2);
- __ b(ne, not_a_heap_number);
-}
-
-
-void ToNumberStub::Generate(MacroAssembler* masm) {
- // The ToNumber stub takes one argument in eax.
- Label check_heap_number, call_builtin;
- __ JumpIfNotSmi(r0, &check_heap_number);
- __ Ret();
-
- __ bind(&check_heap_number);
- EmitCheckForHeapNumber(masm, r0, r1, ip, &call_builtin);
- __ Ret();
-
- __ bind(&call_builtin);
- __ push(r0);
- __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION);
-}
-
-
-void FastNewClosureStub::Generate(MacroAssembler* masm) {
- // Create a new closure from the given function info in new
- // space. Set the context to the current context in cp.
- Counters* counters = masm->isolate()->counters();
-
- Label gc;
-
- // Pop the function info from the stack.
- __ pop(r3);
-
- // Attempt to allocate new JSFunction in new space.
- __ AllocateInNewSpace(JSFunction::kSize,
- r0,
- r1,
- r2,
- &gc,
- TAG_OBJECT);
-
- __ IncrementCounter(counters->fast_new_closure_total(), 1, r6, r7);
-
- int map_index = (language_mode_ == CLASSIC_MODE)
- ? Context::FUNCTION_MAP_INDEX
- : Context::STRICT_MODE_FUNCTION_MAP_INDEX;
-
- // Compute the function map in the current native context and set that
- // as the map of the allocated object.
- __ ldr(r2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ ldr(r2, FieldMemOperand(r2, GlobalObject::kNativeContextOffset));
- __ ldr(r5, MemOperand(r2, Context::SlotOffset(map_index)));
- __ str(r5, FieldMemOperand(r0, HeapObject::kMapOffset));
-
- // Initialize the rest of the function. We don't have to update the
- // write barrier because the allocated object is in new space.
- __ LoadRoot(r1, Heap::kEmptyFixedArrayRootIndex);
- __ LoadRoot(r5, Heap::kTheHoleValueRootIndex);
- __ str(r1, FieldMemOperand(r0, JSObject::kPropertiesOffset));
- __ str(r1, FieldMemOperand(r0, JSObject::kElementsOffset));
- __ str(r5, FieldMemOperand(r0, JSFunction::kPrototypeOrInitialMapOffset));
- __ str(r3, FieldMemOperand(r0, JSFunction::kSharedFunctionInfoOffset));
- __ str(cp, FieldMemOperand(r0, JSFunction::kContextOffset));
- __ str(r1, FieldMemOperand(r0, JSFunction::kLiteralsOffset));
-
- // Initialize the code pointer in the function to be the one
- // found in the shared function info object.
- // But first check if there is an optimized version for our context.
- Label check_optimized;
- Label install_unoptimized;
- if (FLAG_cache_optimized_code) {
- __ ldr(r1,
- FieldMemOperand(r3, SharedFunctionInfo::kOptimizedCodeMapOffset));
- __ tst(r1, r1);
- __ b(ne, &check_optimized);
- }
- __ bind(&install_unoptimized);
- __ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
- __ str(r4, FieldMemOperand(r0, JSFunction::kNextFunctionLinkOffset));
- __ ldr(r3, FieldMemOperand(r3, SharedFunctionInfo::kCodeOffset));
- __ add(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ str(r3, FieldMemOperand(r0, JSFunction::kCodeEntryOffset));
-
- // Return result. The argument function info has been popped already.
- __ Ret();
-
- __ bind(&check_optimized);
-
- __ IncrementCounter(counters->fast_new_closure_try_optimized(), 1, r6, r7);
-
- // r2 holds native context, r1 points to fixed array of 3-element entries
- // (native context, optimized code, literals).
- // The optimized code map must never be empty, so check the first elements.
- Label install_optimized;
- // Speculatively move code object into r4.
- __ ldr(r4, FieldMemOperand(r1, FixedArray::kHeaderSize + kPointerSize));
- __ ldr(r5, FieldMemOperand(r1, FixedArray::kHeaderSize));
- __ cmp(r2, r5);
- __ b(eq, &install_optimized);
-
- // Iterate through the rest of map backwards. r4 holds an index as a Smi.
- Label loop;
- __ ldr(r4, FieldMemOperand(r1, FixedArray::kLengthOffset));
- __ bind(&loop);
- // Do not double check first entry.
-
- __ cmp(r4, Operand(Smi::FromInt(SharedFunctionInfo::kEntryLength)));
- __ b(eq, &install_unoptimized);
- __ sub(r4, r4, Operand(
- Smi::FromInt(SharedFunctionInfo::kEntryLength))); // Skip an entry.
- __ add(r5, r1, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ add(r5, r5, Operand(r4, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ ldr(r5, MemOperand(r5));
- __ cmp(r2, r5);
- __ b(ne, &loop);
- // Hit: fetch the optimized code.
- __ add(r5, r1, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ add(r5, r5, Operand(r4, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ add(r5, r5, Operand(kPointerSize));
- __ ldr(r4, MemOperand(r5));
-
- __ bind(&install_optimized);
- __ IncrementCounter(counters->fast_new_closure_install_optimized(),
- 1, r6, r7);
-
- // TODO(fschneider): Idea: store proper code pointers in the map and either
- // unmangle them on marking or do nothing as the whole map is discarded on
- // major GC anyway.
- __ add(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ str(r4, FieldMemOperand(r0, JSFunction::kCodeEntryOffset));
-
- // Now link a function into a list of optimized functions.
- __ ldr(r4, ContextOperand(r2, Context::OPTIMIZED_FUNCTIONS_LIST));
-
- __ str(r4, FieldMemOperand(r0, JSFunction::kNextFunctionLinkOffset));
- // No need for write barrier as JSFunction (eax) is in the new space.
-
- __ str(r0, ContextOperand(r2, Context::OPTIMIZED_FUNCTIONS_LIST));
- // Store JSFunction (eax) into edx before issuing write barrier as
- // it clobbers all the registers passed.
- __ mov(r4, r0);
- __ RecordWriteContextSlot(
- r2,
- Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST),
- r4,
- r1,
- kLRHasNotBeenSaved,
- kDontSaveFPRegs);
-
- // Return result. The argument function info has been popped already.
- __ Ret();
-
- // Create a new closure through the slower runtime call.
- __ bind(&gc);
- __ LoadRoot(r4, Heap::kFalseValueRootIndex);
- __ Push(cp, r3, r4);
- __ TailCallRuntime(Runtime::kNewClosure, 3, 1);
-}
-
-
-void FastNewContextStub::Generate(MacroAssembler* masm) {
- // Try to allocate the context in new space.
- Label gc;
- int length = slots_ + Context::MIN_CONTEXT_SLOTS;
-
- // Attempt to allocate the context in new space.
- __ AllocateInNewSpace(FixedArray::SizeFor(length),
- r0,
- r1,
- r2,
- &gc,
- TAG_OBJECT);
-
- // Load the function from the stack.
- __ ldr(r3, MemOperand(sp, 0));
-
- // Set up the object header.
- __ LoadRoot(r1, Heap::kFunctionContextMapRootIndex);
- __ mov(r2, Operand(Smi::FromInt(length)));
- __ str(r2, FieldMemOperand(r0, FixedArray::kLengthOffset));
- __ str(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
-
- // Set up the fixed slots, copy the global object from the previous context.
- __ ldr(r2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ mov(r1, Operand(Smi::FromInt(0)));
- __ str(r3, MemOperand(r0, Context::SlotOffset(Context::CLOSURE_INDEX)));
- __ str(cp, MemOperand(r0, Context::SlotOffset(Context::PREVIOUS_INDEX)));
- __ str(r1, MemOperand(r0, Context::SlotOffset(Context::EXTENSION_INDEX)));
- __ str(r2, MemOperand(r0, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
-
- // Copy the qml global object from the surrounding context.
- __ ldr(r1,
- MemOperand(cp, Context::SlotOffset(Context::QML_GLOBAL_OBJECT_INDEX)));
- __ str(r1,
- MemOperand(r0, Context::SlotOffset(Context::QML_GLOBAL_OBJECT_INDEX)));
-
- // Initialize the rest of the slots to undefined.
- __ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
- for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
- __ str(r1, MemOperand(r0, Context::SlotOffset(i)));
- }
-
- // Remove the on-stack argument and return.
- __ mov(cp, r0);
- __ pop();
- __ Ret();
-
- // Need to collect. Call into runtime system.
- __ bind(&gc);
- __ TailCallRuntime(Runtime::kNewFunctionContext, 1, 1);
-}
-
-
-void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
- // Stack layout on entry:
- //
- // [sp]: function.
- // [sp + kPointerSize]: serialized scope info
-
- // Try to allocate the context in new space.
- Label gc;
- int length = slots_ + Context::MIN_CONTEXT_SLOTS;
- __ AllocateInNewSpace(FixedArray::SizeFor(length),
- r0, r1, r2, &gc, TAG_OBJECT);
-
- // Load the function from the stack.
- __ ldr(r3, MemOperand(sp, 0));
-
- // Load the serialized scope info from the stack.
- __ ldr(r1, MemOperand(sp, 1 * kPointerSize));
-
- // Set up the object header.
- __ LoadRoot(r2, Heap::kBlockContextMapRootIndex);
- __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ mov(r2, Operand(Smi::FromInt(length)));
- __ str(r2, FieldMemOperand(r0, FixedArray::kLengthOffset));
-
- // If this block context is nested in the native context we get a smi
- // sentinel instead of a function. The block context should get the
- // canonical empty function of the native context as its closure which
- // we still have to look up.
- Label after_sentinel;
- __ JumpIfNotSmi(r3, &after_sentinel);
- if (FLAG_debug_code) {
- const char* message = "Expected 0 as a Smi sentinel";
- __ cmp(r3, Operand::Zero());
- __ Assert(eq, message);
- }
- __ ldr(r3, GlobalObjectOperand());
- __ ldr(r3, FieldMemOperand(r3, GlobalObject::kNativeContextOffset));
- __ ldr(r3, ContextOperand(r3, Context::CLOSURE_INDEX));
- __ bind(&after_sentinel);
-
- // Set up the fixed slots, copy the global object from the previous context.
- __ ldr(r2, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
- __ str(r3, ContextOperand(r0, Context::CLOSURE_INDEX));
- __ str(cp, ContextOperand(r0, Context::PREVIOUS_INDEX));
- __ str(r1, ContextOperand(r0, Context::EXTENSION_INDEX));
- __ str(r2, ContextOperand(r0, Context::GLOBAL_OBJECT_INDEX));
-
- // Copy the qml global object from the surrounding context.
- __ ldr(r1, ContextOperand(cp, Context::QML_GLOBAL_OBJECT_INDEX));
- __ str(r1, ContextOperand(r0, Context::QML_GLOBAL_OBJECT_INDEX));
-
- // Initialize the rest of the slots to the hole value.
- __ LoadRoot(r1, Heap::kTheHoleValueRootIndex);
- for (int i = 0; i < slots_; i++) {
- __ str(r1, ContextOperand(r0, i + Context::MIN_CONTEXT_SLOTS));
- }
-
- // Remove the on-stack argument and return.
- __ mov(cp, r0);
- __ add(sp, sp, Operand(2 * kPointerSize));
- __ Ret();
-
- // Need to collect. Call into runtime system.
- __ bind(&gc);
- __ TailCallRuntime(Runtime::kPushBlockContext, 2, 1);
-}
-
-
-static void GenerateFastCloneShallowArrayCommon(
- MacroAssembler* masm,
- int length,
- FastCloneShallowArrayStub::Mode mode,
- AllocationSiteMode allocation_site_mode,
- Label* fail) {
- // Registers on entry:
- //
- // r3: boilerplate literal array.
- ASSERT(mode != FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS);
-
- // All sizes here are multiples of kPointerSize.
- int elements_size = 0;
- if (length > 0) {
- elements_size = mode == FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
- ? FixedDoubleArray::SizeFor(length)
- : FixedArray::SizeFor(length);
- }
-
- int size = JSArray::kSize;
- int allocation_info_start = size;
- if (allocation_site_mode == TRACK_ALLOCATION_SITE) {
- size += AllocationSiteInfo::kSize;
- }
- size += elements_size;
-
- // Allocate both the JS array and the elements array in one big
- // allocation. This avoids multiple limit checks.
- AllocationFlags flags = TAG_OBJECT;
- if (mode == FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS) {
- flags = static_cast<AllocationFlags>(DOUBLE_ALIGNMENT | flags);
- }
- __ AllocateInNewSpace(size, r0, r1, r2, fail, flags);
-
- if (allocation_site_mode == TRACK_ALLOCATION_SITE) {
- __ mov(r2, Operand(Handle<Map>(masm->isolate()->heap()->
- allocation_site_info_map())));
- __ str(r2, FieldMemOperand(r0, allocation_info_start));
- __ str(r3, FieldMemOperand(r0, allocation_info_start + kPointerSize));
- }
-
- // Copy the JS array part.
- for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
- if ((i != JSArray::kElementsOffset) || (length == 0)) {
- __ ldr(r1, FieldMemOperand(r3, i));
- __ str(r1, FieldMemOperand(r0, i));
- }
- }
-
- if (length > 0) {
- // Get hold of the elements array of the boilerplate and setup the
- // elements pointer in the resulting object.
- __ ldr(r3, FieldMemOperand(r3, JSArray::kElementsOffset));
- if (allocation_site_mode == TRACK_ALLOCATION_SITE) {
- __ add(r2, r0, Operand(JSArray::kSize + AllocationSiteInfo::kSize));
- } else {
- __ add(r2, r0, Operand(JSArray::kSize));
- }
- __ str(r2, FieldMemOperand(r0, JSArray::kElementsOffset));
-
- // Copy the elements array.
- ASSERT((elements_size % kPointerSize) == 0);
- __ CopyFields(r2, r3, r1.bit(), elements_size / kPointerSize);
- }
-}
-
-void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
- // Stack layout on entry:
- //
- // [sp]: constant elements.
- // [sp + kPointerSize]: literal index.
- // [sp + (2 * kPointerSize)]: literals array.
-
- // Load boilerplate object into r3 and check if we need to create a
- // boilerplate.
- Label slow_case;
- __ ldr(r3, MemOperand(sp, 2 * kPointerSize));
- __ ldr(r0, MemOperand(sp, 1 * kPointerSize));
- __ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ ldr(r3, MemOperand(r3, r0, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ CompareRoot(r3, Heap::kUndefinedValueRootIndex);
- __ b(eq, &slow_case);
-
- FastCloneShallowArrayStub::Mode mode = mode_;
- if (mode == CLONE_ANY_ELEMENTS) {
- Label double_elements, check_fast_elements;
- __ ldr(r0, FieldMemOperand(r3, JSArray::kElementsOffset));
- __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ CompareRoot(r0, Heap::kFixedCOWArrayMapRootIndex);
- __ b(ne, &check_fast_elements);
- GenerateFastCloneShallowArrayCommon(masm, 0, COPY_ON_WRITE_ELEMENTS,
- allocation_site_mode_,
- &slow_case);
- // Return and remove the on-stack parameters.
- __ add(sp, sp, Operand(3 * kPointerSize));
- __ Ret();
-
- __ bind(&check_fast_elements);
- __ CompareRoot(r0, Heap::kFixedArrayMapRootIndex);
- __ b(ne, &double_elements);
- GenerateFastCloneShallowArrayCommon(masm, length_, CLONE_ELEMENTS,
- allocation_site_mode_,
- &slow_case);
- // Return and remove the on-stack parameters.
- __ add(sp, sp, Operand(3 * kPointerSize));
- __ Ret();
-
- __ bind(&double_elements);
- mode = CLONE_DOUBLE_ELEMENTS;
- // Fall through to generate the code to handle double elements.
- }
-
- if (FLAG_debug_code) {
- const char* message;
- Heap::RootListIndex expected_map_index;
- if (mode == CLONE_ELEMENTS) {
- message = "Expected (writable) fixed array";
- expected_map_index = Heap::kFixedArrayMapRootIndex;
- } else if (mode == CLONE_DOUBLE_ELEMENTS) {
- message = "Expected (writable) fixed double array";
- expected_map_index = Heap::kFixedDoubleArrayMapRootIndex;
- } else {
- ASSERT(mode == COPY_ON_WRITE_ELEMENTS);
- message = "Expected copy-on-write fixed array";
- expected_map_index = Heap::kFixedCOWArrayMapRootIndex;
- }
- __ push(r3);
- __ ldr(r3, FieldMemOperand(r3, JSArray::kElementsOffset));
- __ ldr(r3, FieldMemOperand(r3, HeapObject::kMapOffset));
- __ CompareRoot(r3, expected_map_index);
- __ Assert(eq, message);
- __ pop(r3);
- }
-
- GenerateFastCloneShallowArrayCommon(masm, length_, mode,
- allocation_site_mode_,
- &slow_case);
-
- // Return and remove the on-stack parameters.
- __ add(sp, sp, Operand(3 * kPointerSize));
- __ Ret();
-
- __ bind(&slow_case);
- __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1);
-}
-
-
-// Takes a Smi and converts to an IEEE 64 bit floating point value in two
-// registers. The format is 1 sign bit, 11 exponent bits (biased 1023) and
-// 52 fraction bits (20 in the first word, 32 in the second). Zeros is a
-// scratch register. Destroys the source register. No GC occurs during this
-// stub so you don't have to set up the frame.
-class ConvertToDoubleStub : public PlatformCodeStub {
- public:
- ConvertToDoubleStub(Register result_reg_1,
- Register result_reg_2,
- Register source_reg,
- Register scratch_reg)
- : result1_(result_reg_1),
- result2_(result_reg_2),
- source_(source_reg),
- zeros_(scratch_reg) { }
-
- private:
- Register result1_;
- Register result2_;
- Register source_;
- Register zeros_;
-
- // Minor key encoding in 16 bits.
- class ModeBits: public BitField<OverwriteMode, 0, 2> {};
- class OpBits: public BitField<Token::Value, 2, 14> {};
-
- Major MajorKey() { return ConvertToDouble; }
- int MinorKey() {
- // Encode the parameters in a unique 16 bit value.
- return result1_.code() +
- (result2_.code() << 4) +
- (source_.code() << 8) +
- (zeros_.code() << 12);
- }
-
- void Generate(MacroAssembler* masm);
-};
-
-
-void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
- Register exponent = result1_;
- Register mantissa = result2_;
-
- Label not_special;
- // Convert from Smi to integer.
- __ mov(source_, Operand(source_, ASR, kSmiTagSize));
- // Move sign bit from source to destination. This works because the sign bit
- // in the exponent word of the double has the same position and polarity as
- // the 2's complement sign bit in a Smi.
- STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
- __ and_(exponent, source_, Operand(HeapNumber::kSignMask), SetCC);
- // Subtract from 0 if source was negative.
- __ rsb(source_, source_, Operand::Zero(), LeaveCC, ne);
-
- // We have -1, 0 or 1, which we treat specially. Register source_ contains
- // absolute value: it is either equal to 1 (special case of -1 and 1),
- // greater than 1 (not a special case) or less than 1 (special case of 0).
- __ cmp(source_, Operand(1));
- __ b(gt, &not_special);
-
- // For 1 or -1 we need to or in the 0 exponent (biased to 1023).
- const uint32_t exponent_word_for_1 =
- HeapNumber::kExponentBias << HeapNumber::kExponentShift;
- __ orr(exponent, exponent, Operand(exponent_word_for_1), LeaveCC, eq);
- // 1, 0 and -1 all have 0 for the second word.
- __ mov(mantissa, Operand::Zero());
- __ Ret();
-
- __ bind(&not_special);
- // Count leading zeros. Uses mantissa for a scratch register on pre-ARM5.
- // Gets the wrong answer for 0, but we already checked for that case above.
- __ CountLeadingZeros(zeros_, source_, mantissa);
- // Compute exponent and or it into the exponent register.
- // We use mantissa as a scratch register here. Use a fudge factor to
- // divide the constant 31 + HeapNumber::kExponentBias, 0x41d, into two parts
- // that fit in the ARM's constant field.
- int fudge = 0x400;
- __ rsb(mantissa, zeros_, Operand(31 + HeapNumber::kExponentBias - fudge));
- __ add(mantissa, mantissa, Operand(fudge));
- __ orr(exponent,
- exponent,
- Operand(mantissa, LSL, HeapNumber::kExponentShift));
- // Shift up the source chopping the top bit off.
- __ add(zeros_, zeros_, Operand(1));
- // This wouldn't work for 1.0 or -1.0 as the shift would be 32 which means 0.
- __ mov(source_, Operand(source_, LSL, zeros_));
- // Compute lower part of fraction (last 12 bits).
- __ mov(mantissa, Operand(source_, LSL, HeapNumber::kMantissaBitsInTopWord));
- // And the top (top 20 bits).
- __ orr(exponent,
- exponent,
- Operand(source_, LSR, 32 - HeapNumber::kMantissaBitsInTopWord));
- __ Ret();
-}
-
-
-void FloatingPointHelper::LoadSmis(MacroAssembler* masm,
- FloatingPointHelper::Destination destination,
- Register scratch1,
- Register scratch2) {
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatures::Scope scope(VFP2);
- __ mov(scratch1, Operand(r0, ASR, kSmiTagSize));
- __ vmov(d7.high(), scratch1);
- __ vcvt_f64_s32(d7, d7.high());
- __ mov(scratch1, Operand(r1, ASR, kSmiTagSize));
- __ vmov(d6.high(), scratch1);
- __ vcvt_f64_s32(d6, d6.high());
- if (destination == kCoreRegisters) {
- __ vmov(r2, r3, d7);
- __ vmov(r0, r1, d6);
- }
- } else {
- ASSERT(destination == kCoreRegisters);
- // Write Smi from r0 to r3 and r2 in double format.
- __ mov(scratch1, Operand(r0));
- ConvertToDoubleStub stub1(r3, r2, scratch1, scratch2);
- __ push(lr);
- __ Call(stub1.GetCode(masm->isolate()));
- // Write Smi from r1 to r1 and r0 in double format.
- __ mov(scratch1, Operand(r1));
- ConvertToDoubleStub stub2(r1, r0, scratch1, scratch2);
- __ Call(stub2.GetCode(masm->isolate()));
- __ pop(lr);
- }
-}
-
-
-void FloatingPointHelper::LoadNumber(MacroAssembler* masm,
- Destination destination,
- Register object,
- DwVfpRegister dst,
- Register dst1,
- Register dst2,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Label* not_number) {
- __ AssertRootValue(heap_number_map,
- Heap::kHeapNumberMapRootIndex,
- "HeapNumberMap register clobbered.");
-
- Label is_smi, done;
-
- // Smi-check
- __ UntagAndJumpIfSmi(scratch1, object, &is_smi);
- // Heap number check
- __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number);
-
- // Handle loading a double from a heap number.
- if (CpuFeatures::IsSupported(VFP2) &&
- destination == kVFPRegisters) {
- CpuFeatures::Scope scope(VFP2);
- // Load the double from tagged HeapNumber to double register.
- __ sub(scratch1, object, Operand(kHeapObjectTag));
- __ vldr(dst, scratch1, HeapNumber::kValueOffset);
- } else {
- ASSERT(destination == kCoreRegisters);
- // Load the double from heap number to dst1 and dst2 in double format.
- __ Ldrd(dst1, dst2, FieldMemOperand(object, HeapNumber::kValueOffset));
- }
- __ jmp(&done);
-
- // Handle loading a double from a smi.
- __ bind(&is_smi);
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatures::Scope scope(VFP2);
- // Convert smi to double using VFP instructions.
- __ vmov(dst.high(), scratch1);
- __ vcvt_f64_s32(dst, dst.high());
- if (destination == kCoreRegisters) {
- // Load the converted smi to dst1 and dst2 in double format.
- __ vmov(dst1, dst2, dst);
- }
- } else {
- ASSERT(destination == kCoreRegisters);
- // Write smi to dst1 and dst2 double format.
- __ mov(scratch1, Operand(object));
- ConvertToDoubleStub stub(dst2, dst1, scratch1, scratch2);
- __ push(lr);
- __ Call(stub.GetCode(masm->isolate()));
- __ pop(lr);
- }
-
- __ bind(&done);
-}
-
-
-void FloatingPointHelper::ConvertNumberToInt32(MacroAssembler* masm,
- Register object,
- Register dst,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- DwVfpRegister double_scratch,
- Label* not_number) {
- __ AssertRootValue(heap_number_map,
- Heap::kHeapNumberMapRootIndex,
- "HeapNumberMap register clobbered.");
- Label done;
- Label not_in_int32_range;
-
- __ UntagAndJumpIfSmi(dst, object, &done);
- __ ldr(scratch1, FieldMemOperand(object, HeapNumber::kMapOffset));
- __ cmp(scratch1, heap_number_map);
- __ b(ne, not_number);
- __ ConvertToInt32(object,
- dst,
- scratch1,
- scratch2,
- double_scratch,
- &not_in_int32_range);
- __ jmp(&done);
-
- __ bind(&not_in_int32_range);
- __ ldr(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
- __ ldr(scratch2, FieldMemOperand(object, HeapNumber::kMantissaOffset));
-
- __ EmitOutOfInt32RangeTruncate(dst,
- scratch1,
- scratch2,
- scratch3);
- __ bind(&done);
-}
-
-
-void FloatingPointHelper::ConvertIntToDouble(MacroAssembler* masm,
- Register int_scratch,
- Destination destination,
- DwVfpRegister double_dst,
- Register dst_mantissa,
- Register dst_exponent,
- Register scratch2,
- SwVfpRegister single_scratch) {
- ASSERT(!int_scratch.is(scratch2));
- ASSERT(!int_scratch.is(dst_mantissa));
- ASSERT(!int_scratch.is(dst_exponent));
-
- Label done;
-
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatures::Scope scope(VFP2);
- __ vmov(single_scratch, int_scratch);
- __ vcvt_f64_s32(double_dst, single_scratch);
- if (destination == kCoreRegisters) {
- __ vmov(dst_mantissa, dst_exponent, double_dst);
- }
- } else {
- Label fewer_than_20_useful_bits;
- // Expected output:
- // | dst_exponent | dst_mantissa |
- // | s | exp | mantissa |
-
- // Check for zero.
- __ cmp(int_scratch, Operand::Zero());
- __ mov(dst_exponent, int_scratch);
- __ mov(dst_mantissa, int_scratch);
- __ b(eq, &done);
-
- // Preload the sign of the value.
- __ and_(dst_exponent, int_scratch, Operand(HeapNumber::kSignMask), SetCC);
- // Get the absolute value of the object (as an unsigned integer).
- __ rsb(int_scratch, int_scratch, Operand::Zero(), SetCC, mi);
-
- // Get mantissa[51:20].
-
- // Get the position of the first set bit.
- __ CountLeadingZeros(dst_mantissa, int_scratch, scratch2);
- __ rsb(dst_mantissa, dst_mantissa, Operand(31));
-
- // Set the exponent.
- __ add(scratch2, dst_mantissa, Operand(HeapNumber::kExponentBias));
- __ Bfi(dst_exponent, scratch2, scratch2,
- HeapNumber::kExponentShift, HeapNumber::kExponentBits);
-
- // Clear the first non null bit.
- __ mov(scratch2, Operand(1));
- __ bic(int_scratch, int_scratch, Operand(scratch2, LSL, dst_mantissa));
-
- __ cmp(dst_mantissa, Operand(HeapNumber::kMantissaBitsInTopWord));
- // Get the number of bits to set in the lower part of the mantissa.
- __ sub(scratch2, dst_mantissa, Operand(HeapNumber::kMantissaBitsInTopWord),
- SetCC);
- __ b(mi, &fewer_than_20_useful_bits);
- // Set the higher 20 bits of the mantissa.
- __ orr(dst_exponent, dst_exponent, Operand(int_scratch, LSR, scratch2));
- __ rsb(scratch2, scratch2, Operand(32));
- __ mov(dst_mantissa, Operand(int_scratch, LSL, scratch2));
- __ b(&done);
-
- __ bind(&fewer_than_20_useful_bits);
- __ rsb(scratch2, dst_mantissa, Operand(HeapNumber::kMantissaBitsInTopWord));
- __ mov(scratch2, Operand(int_scratch, LSL, scratch2));
- __ orr(dst_exponent, dst_exponent, scratch2);
- // Set dst1 to 0.
- __ mov(dst_mantissa, Operand::Zero());
- }
- __ bind(&done);
-}
-
-
-void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm,
- Register object,
- Destination destination,
- DwVfpRegister double_dst,
- DwVfpRegister double_scratch,
- Register dst_mantissa,
- Register dst_exponent,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- SwVfpRegister single_scratch,
- Label* not_int32) {
- ASSERT(!scratch1.is(object) && !scratch2.is(object));
- ASSERT(!scratch1.is(scratch2));
- ASSERT(!heap_number_map.is(object) &&
- !heap_number_map.is(scratch1) &&
- !heap_number_map.is(scratch2));
-
- Label done, obj_is_not_smi;
-
- __ JumpIfNotSmi(object, &obj_is_not_smi);
- __ SmiUntag(scratch1, object);
- ConvertIntToDouble(masm, scratch1, destination, double_dst, dst_mantissa,
- dst_exponent, scratch2, single_scratch);
- __ b(&done);
-
- __ bind(&obj_is_not_smi);
- __ AssertRootValue(heap_number_map,
- Heap::kHeapNumberMapRootIndex,
- "HeapNumberMap register clobbered.");
- __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32);
-
- // Load the number.
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatures::Scope scope(VFP2);
- // Load the double value.
- __ sub(scratch1, object, Operand(kHeapObjectTag));
- __ vldr(double_dst, scratch1, HeapNumber::kValueOffset);
-
- __ EmitVFPTruncate(kRoundToZero,
- scratch1,
- double_dst,
- scratch2,
- double_scratch,
- kCheckForInexactConversion);
-
- // Jump to not_int32 if the operation did not succeed.
- __ b(ne, not_int32);
-
- if (destination == kCoreRegisters) {
- __ vmov(dst_mantissa, dst_exponent, double_dst);
- }
-
- } else {
- ASSERT(!scratch1.is(object) && !scratch2.is(object));
- // Load the double value in the destination registers.
- bool save_registers = object.is(dst_mantissa) || object.is(dst_exponent);
- if (save_registers) {
- // Save both output registers, because the other one probably holds
- // an important value too.
- __ Push(dst_exponent, dst_mantissa);
- }
- __ Ldrd(dst_mantissa, dst_exponent,
- FieldMemOperand(object, HeapNumber::kValueOffset));
-
- // Check for 0 and -0.
- Label zero;
- __ bic(scratch1, dst_exponent, Operand(HeapNumber::kSignMask));
- __ orr(scratch1, scratch1, Operand(dst_mantissa));
- __ cmp(scratch1, Operand::Zero());
- __ b(eq, &zero);
-
- // Check that the value can be exactly represented by a 32-bit integer.
- // Jump to not_int32 if that's not the case.
- Label restore_input_and_miss;
- DoubleIs32BitInteger(masm, dst_exponent, dst_mantissa, scratch1, scratch2,
- &restore_input_and_miss);
-
- // dst_* were trashed. Reload the double value.
- if (save_registers) {
- __ Pop(dst_exponent, dst_mantissa);
- }
- __ Ldrd(dst_mantissa, dst_exponent,
- FieldMemOperand(object, HeapNumber::kValueOffset));
- __ b(&done);
-
- __ bind(&restore_input_and_miss);
- if (save_registers) {
- __ Pop(dst_exponent, dst_mantissa);
- }
- __ b(not_int32);
-
- __ bind(&zero);
- if (save_registers) {
- __ Drop(2);
- }
- }
-
- __ bind(&done);
-}
-
-
-void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm,
- Register object,
- Register dst,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- DwVfpRegister double_scratch0,
- DwVfpRegister double_scratch1,
- Label* not_int32) {
- ASSERT(!dst.is(object));
- ASSERT(!scratch1.is(object) && !scratch2.is(object) && !scratch3.is(object));
- ASSERT(!scratch1.is(scratch2) &&
- !scratch1.is(scratch3) &&
- !scratch2.is(scratch3));
-
- Label done, maybe_undefined;
-
- __ UntagAndJumpIfSmi(dst, object, &done);
-
- __ AssertRootValue(heap_number_map,
- Heap::kHeapNumberMapRootIndex,
- "HeapNumberMap register clobbered.");
-
- __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, &maybe_undefined);
-
- // Object is a heap number.
- // Convert the floating point value to a 32-bit integer.
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatures::Scope scope(VFP2);
-
- // Load the double value.
- __ sub(scratch1, object, Operand(kHeapObjectTag));
- __ vldr(double_scratch0, scratch1, HeapNumber::kValueOffset);
-
- __ EmitVFPTruncate(kRoundToZero,
- dst,
- double_scratch0,
- scratch1,
- double_scratch1,
- kCheckForInexactConversion);
-
- // Jump to not_int32 if the operation did not succeed.
- __ b(ne, not_int32);
- } else {
- // Load the double value in the destination registers.
- __ ldr(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
- __ ldr(scratch2, FieldMemOperand(object, HeapNumber::kMantissaOffset));
-
- // Check for 0 and -0.
- __ bic(dst, scratch1, Operand(HeapNumber::kSignMask));
- __ orr(dst, scratch2, Operand(dst));
- __ cmp(dst, Operand::Zero());
- __ b(eq, &done);
-
- DoubleIs32BitInteger(masm, scratch1, scratch2, dst, scratch3, not_int32);
-
- // Registers state after DoubleIs32BitInteger.
- // dst: mantissa[51:20].
- // scratch2: 1
-
- // Shift back the higher bits of the mantissa.
- __ mov(dst, Operand(dst, LSR, scratch3));
- // Set the implicit first bit.
- __ rsb(scratch3, scratch3, Operand(32));
- __ orr(dst, dst, Operand(scratch2, LSL, scratch3));
- // Set the sign.
- __ ldr(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
- __ tst(scratch1, Operand(HeapNumber::kSignMask));
- __ rsb(dst, dst, Operand::Zero(), LeaveCC, mi);
- }
- __ b(&done);
-
- __ bind(&maybe_undefined);
- __ CompareRoot(object, Heap::kUndefinedValueRootIndex);
- __ b(ne, not_int32);
- // |undefined| is truncated to 0.
- __ mov(dst, Operand(Smi::FromInt(0)));
- // Fall through.
-
- __ bind(&done);
-}
-
-
-void FloatingPointHelper::DoubleIs32BitInteger(MacroAssembler* masm,
- Register src_exponent,
- Register src_mantissa,
- Register dst,
- Register scratch,
- Label* not_int32) {
- // Get exponent alone in scratch.
- __ Ubfx(scratch,
- src_exponent,
- HeapNumber::kExponentShift,
- HeapNumber::kExponentBits);
-
- // Substract the bias from the exponent.
- __ sub(scratch, scratch, Operand(HeapNumber::kExponentBias), SetCC);
-
- // src1: higher (exponent) part of the double value.
- // src2: lower (mantissa) part of the double value.
- // scratch: unbiased exponent.
-
- // Fast cases. Check for obvious non 32-bit integer values.
- // Negative exponent cannot yield 32-bit integers.
- __ b(mi, not_int32);
- // Exponent greater than 31 cannot yield 32-bit integers.
- // Also, a positive value with an exponent equal to 31 is outside of the
- // signed 32-bit integer range.
- // Another way to put it is that if (exponent - signbit) > 30 then the
- // number cannot be represented as an int32.
- Register tmp = dst;
- __ sub(tmp, scratch, Operand(src_exponent, LSR, 31));
- __ cmp(tmp, Operand(30));
- __ b(gt, not_int32);
- // - Bits [21:0] in the mantissa are not null.
- __ tst(src_mantissa, Operand(0x3fffff));
- __ b(ne, not_int32);
-
- // Otherwise the exponent needs to be big enough to shift left all the
- // non zero bits left. So we need the (30 - exponent) last bits of the
- // 31 higher bits of the mantissa to be null.
- // Because bits [21:0] are null, we can check instead that the
- // (32 - exponent) last bits of the 32 higher bits of the mantissa are null.
-
- // Get the 32 higher bits of the mantissa in dst.
- __ Ubfx(dst,
- src_mantissa,
- HeapNumber::kMantissaBitsInTopWord,
- 32 - HeapNumber::kMantissaBitsInTopWord);
- __ orr(dst,
- dst,
- Operand(src_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord));
-
- // Create the mask and test the lower bits (of the higher bits).
- __ rsb(scratch, scratch, Operand(32));
- __ mov(src_mantissa, Operand(1));
- __ mov(src_exponent, Operand(src_mantissa, LSL, scratch));
- __ sub(src_exponent, src_exponent, Operand(1));
- __ tst(dst, src_exponent);
- __ b(ne, not_int32);
-}
-
-
-void FloatingPointHelper::CallCCodeForDoubleOperation(
- MacroAssembler* masm,
- Token::Value op,
- Register heap_number_result,
- Register scratch) {
- // Using core registers:
- // r0: Left value (least significant part of mantissa).
- // r1: Left value (sign, exponent, top of mantissa).
- // r2: Right value (least significant part of mantissa).
- // r3: Right value (sign, exponent, top of mantissa).
-
- // Assert that heap_number_result is callee-saved.
- // We currently always use r5 to pass it.
- ASSERT(heap_number_result.is(r5));
-
- // Push the current return address before the C call. Return will be
- // through pop(pc) below.
- __ push(lr);
- __ PrepareCallCFunction(0, 2, scratch);
- if (masm->use_eabi_hardfloat()) {
- CpuFeatures::Scope scope(VFP2);
- __ vmov(d0, r0, r1);
- __ vmov(d1, r2, r3);
- }
- {
- AllowExternalCallThatCantCauseGC scope(masm);
- __ CallCFunction(
- ExternalReference::double_fp_operation(op, masm->isolate()), 0, 2);
- }
- // Store answer in the overwritable heap number. Double returned in
- // registers r0 and r1 or in d0.
- if (masm->use_eabi_hardfloat()) {
- CpuFeatures::Scope scope(VFP2);
- __ vstr(d0,
- FieldMemOperand(heap_number_result, HeapNumber::kValueOffset));
- } else {
- __ Strd(r0, r1, FieldMemOperand(heap_number_result,
- HeapNumber::kValueOffset));
- }
- // Place heap_number_result in r0 and return to the pushed return address.
- __ mov(r0, Operand(heap_number_result));
- __ pop(pc);
-}
-
-
-bool WriteInt32ToHeapNumberStub::IsPregenerated() {
- // These variants are compiled ahead of time. See next method.
- if (the_int_.is(r1) && the_heap_number_.is(r0) && scratch_.is(r2)) {
- return true;
- }
- if (the_int_.is(r2) && the_heap_number_.is(r0) && scratch_.is(r3)) {
- return true;
- }
- // Other register combinations are generated as and when they are needed,
- // so it is unsafe to call them from stubs (we can't generate a stub while
- // we are generating a stub).
- return false;
-}
-
-
-void WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(
- Isolate* isolate) {
- WriteInt32ToHeapNumberStub stub1(r1, r0, r2);
- WriteInt32ToHeapNumberStub stub2(r2, r0, r3);
- stub1.GetCode(isolate)->set_is_pregenerated(true);
- stub2.GetCode(isolate)->set_is_pregenerated(true);
-}
-
-
-// See comment for class.
-void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
- Label max_negative_int;
- // the_int_ has the answer which is a signed int32 but not a Smi.
- // We test for the special value that has a different exponent. This test
- // has the neat side effect of setting the flags according to the sign.
- STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
- __ cmp(the_int_, Operand(0x80000000u));
- __ b(eq, &max_negative_int);
- // Set up the correct exponent in scratch_. All non-Smi int32s have the same.
- // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased).
- uint32_t non_smi_exponent =
- (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
- __ mov(scratch_, Operand(non_smi_exponent));
- // Set the sign bit in scratch_ if the value was negative.
- __ orr(scratch_, scratch_, Operand(HeapNumber::kSignMask), LeaveCC, cs);
- // Subtract from 0 if the value was negative.
- __ rsb(the_int_, the_int_, Operand::Zero(), LeaveCC, cs);
- // We should be masking the implict first digit of the mantissa away here,
- // but it just ends up combining harmlessly with the last digit of the
- // exponent that happens to be 1. The sign bit is 0 so we shift 10 to get
- // the most significant 1 to hit the last bit of the 12 bit sign and exponent.
- ASSERT(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0);
- const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
- __ orr(scratch_, scratch_, Operand(the_int_, LSR, shift_distance));
- __ str(scratch_, FieldMemOperand(the_heap_number_,
- HeapNumber::kExponentOffset));
- __ mov(scratch_, Operand(the_int_, LSL, 32 - shift_distance));
- __ str(scratch_, FieldMemOperand(the_heap_number_,
- HeapNumber::kMantissaOffset));
- __ Ret();
-
- __ bind(&max_negative_int);
- // The max negative int32 is stored as a positive number in the mantissa of
- // a double because it uses a sign bit instead of using two's complement.
- // The actual mantissa bits stored are all 0 because the implicit most
- // significant 1 bit is not stored.
- non_smi_exponent += 1 << HeapNumber::kExponentShift;
- __ mov(ip, Operand(HeapNumber::kSignMask | non_smi_exponent));
- __ str(ip, FieldMemOperand(the_heap_number_, HeapNumber::kExponentOffset));
- __ mov(ip, Operand::Zero());
- __ str(ip, FieldMemOperand(the_heap_number_, HeapNumber::kMantissaOffset));
- __ Ret();
-}
-
-
-// Handle the case where the lhs and rhs are the same object.
-// Equality is almost reflexive (everything but NaN), so this is a test
-// for "identity and not NaN".
-static void EmitIdenticalObjectComparison(MacroAssembler* masm,
- Label* slow,
- Condition cond) {
- Label not_identical;
- Label heap_number, return_equal;
- __ cmp(r0, r1);
- __ b(ne, &not_identical);
-
- // Test for NaN. Sadly, we can't just compare to FACTORY->nan_value(),
- // so we do the second best thing - test it ourselves.
- // They are both equal and they are not both Smis so both of them are not
- // Smis. If it's not a heap number, then return equal.
- if (cond == lt || cond == gt) {
- __ CompareObjectType(r0, r4, r4, FIRST_SPEC_OBJECT_TYPE);
- __ b(ge, slow);
- } else {
- __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
- __ b(eq, &heap_number);
- // Comparing JS objects with <=, >= is complicated.
- if (cond != eq) {
- __ cmp(r4, Operand(FIRST_SPEC_OBJECT_TYPE));
- __ b(ge, slow);
- // Normally here we fall through to return_equal, but undefined is
- // special: (undefined == undefined) == true, but
- // (undefined <= undefined) == false! See ECMAScript 11.8.5.
- if (cond == le || cond == ge) {
- __ cmp(r4, Operand(ODDBALL_TYPE));
- __ b(ne, &return_equal);
- __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
- __ cmp(r0, r2);
- __ b(ne, &return_equal);
- if (cond == le) {
- // undefined <= undefined should fail.
- __ mov(r0, Operand(GREATER));
- } else {
- // undefined >= undefined should fail.
- __ mov(r0, Operand(LESS));
- }
- __ Ret();
- }
- }
- }
-
- __ bind(&return_equal);
- if (cond == lt) {
- __ mov(r0, Operand(GREATER)); // Things aren't less than themselves.
- } else if (cond == gt) {
- __ mov(r0, Operand(LESS)); // Things aren't greater than themselves.
- } else {
- __ mov(r0, Operand(EQUAL)); // Things are <=, >=, ==, === themselves.
- }
- __ Ret();
-
- // For less and greater we don't have to check for NaN since the result of
- // x < x is false regardless. For the others here is some code to check
- // for NaN.
- if (cond != lt && cond != gt) {
- __ bind(&heap_number);
- // It is a heap number, so return non-equal if it's NaN and equal if it's
- // not NaN.
-
- // The representation of NaN values has all exponent bits (52..62) set,
- // and not all mantissa bits (0..51) clear.
- // Read top bits of double representation (second word of value).
- __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
- // Test that exponent bits are all set.
- __ Sbfx(r3, r2, HeapNumber::kExponentShift, HeapNumber::kExponentBits);
- // NaNs have all-one exponents so they sign extend to -1.
- __ cmp(r3, Operand(-1));
- __ b(ne, &return_equal);
-
- // Shift out flag and all exponent bits, retaining only mantissa.
- __ mov(r2, Operand(r2, LSL, HeapNumber::kNonMantissaBitsInTopWord));
- // Or with all low-bits of mantissa.
- __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
- __ orr(r0, r3, Operand(r2), SetCC);
- // For equal we already have the right value in r0: Return zero (equal)
- // if all bits in mantissa are zero (it's an Infinity) and non-zero if
- // not (it's a NaN). For <= and >= we need to load r0 with the failing
- // value if it's a NaN.
- if (cond != eq) {
- // All-zero means Infinity means equal.
- __ Ret(eq);
- if (cond == le) {
- __ mov(r0, Operand(GREATER)); // NaN <= NaN should fail.
- } else {
- __ mov(r0, Operand(LESS)); // NaN >= NaN should fail.
- }
- }
- __ Ret();
- }
- // No fall through here.
-
- __ bind(&not_identical);
-}
-
-
-// See comment at call site.
-static void EmitSmiNonsmiComparison(MacroAssembler* masm,
- Register lhs,
- Register rhs,
- Label* lhs_not_nan,
- Label* slow,
- bool strict) {
- ASSERT((lhs.is(r0) && rhs.is(r1)) ||
- (lhs.is(r1) && rhs.is(r0)));
-
- Label rhs_is_smi;
- __ JumpIfSmi(rhs, &rhs_is_smi);
-
- // Lhs is a Smi. Check whether the rhs is a heap number.
- __ CompareObjectType(rhs, r4, r4, HEAP_NUMBER_TYPE);
- if (strict) {
- // If rhs is not a number and lhs is a Smi then strict equality cannot
- // succeed. Return non-equal
- // If rhs is r0 then there is already a non zero value in it.
- if (!rhs.is(r0)) {
- __ mov(r0, Operand(NOT_EQUAL), LeaveCC, ne);
- }
- __ Ret(ne);
- } else {
- // Smi compared non-strictly with a non-Smi non-heap-number. Call
- // the runtime.
- __ b(ne, slow);
- }
-
- // Lhs is a smi, rhs is a number.
- if (CpuFeatures::IsSupported(VFP2)) {
- // Convert lhs to a double in d7.
- CpuFeatures::Scope scope(VFP2);
- __ SmiToDoubleVFPRegister(lhs, d7, r7, s15);
- // Load the double from rhs, tagged HeapNumber r0, to d6.
- __ sub(r7, rhs, Operand(kHeapObjectTag));
- __ vldr(d6, r7, HeapNumber::kValueOffset);
- } else {
- __ push(lr);
- // Convert lhs to a double in r2, r3.
- __ mov(r7, Operand(lhs));
- ConvertToDoubleStub stub1(r3, r2, r7, r6);
- __ Call(stub1.GetCode(masm->isolate()));
- // Load rhs to a double in r0, r1.
- __ Ldrd(r0, r1, FieldMemOperand(rhs, HeapNumber::kValueOffset));
- __ pop(lr);
- }
-
- // We now have both loaded as doubles but we can skip the lhs nan check
- // since it's a smi.
- __ jmp(lhs_not_nan);
-
- __ bind(&rhs_is_smi);
- // Rhs is a smi. Check whether the non-smi lhs is a heap number.
- __ CompareObjectType(lhs, r4, r4, HEAP_NUMBER_TYPE);
- if (strict) {
- // If lhs is not a number and rhs is a smi then strict equality cannot
- // succeed. Return non-equal.
- // If lhs is r0 then there is already a non zero value in it.
- if (!lhs.is(r0)) {
- __ mov(r0, Operand(NOT_EQUAL), LeaveCC, ne);
- }
- __ Ret(ne);
- } else {
- // Smi compared non-strictly with a non-smi non-heap-number. Call
- // the runtime.
- __ b(ne, slow);
- }
-
- // Rhs is a smi, lhs is a heap number.
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatures::Scope scope(VFP2);
- // Load the double from lhs, tagged HeapNumber r1, to d7.
- __ sub(r7, lhs, Operand(kHeapObjectTag));
- __ vldr(d7, r7, HeapNumber::kValueOffset);
- // Convert rhs to a double in d6 .
- __ SmiToDoubleVFPRegister(rhs, d6, r7, s13);
- } else {
- __ push(lr);
- // Load lhs to a double in r2, r3.
- __ Ldrd(r2, r3, FieldMemOperand(lhs, HeapNumber::kValueOffset));
- // Convert rhs to a double in r0, r1.
- __ mov(r7, Operand(rhs));
- ConvertToDoubleStub stub2(r1, r0, r7, r6);
- __ Call(stub2.GetCode(masm->isolate()));
- __ pop(lr);
- }
- // Fall through to both_loaded_as_doubles.
-}
-
-
-void EmitNanCheck(MacroAssembler* masm, Label* lhs_not_nan, Condition cond) {
- bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset);
- Register rhs_exponent = exp_first ? r0 : r1;
- Register lhs_exponent = exp_first ? r2 : r3;
- Register rhs_mantissa = exp_first ? r1 : r0;
- Register lhs_mantissa = exp_first ? r3 : r2;
- Label one_is_nan, neither_is_nan;
-
- __ Sbfx(r4,
- lhs_exponent,
- HeapNumber::kExponentShift,
- HeapNumber::kExponentBits);
- // NaNs have all-one exponents so they sign extend to -1.
- __ cmp(r4, Operand(-1));
- __ b(ne, lhs_not_nan);
- __ mov(r4,
- Operand(lhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord),
- SetCC);
- __ b(ne, &one_is_nan);
- __ cmp(lhs_mantissa, Operand::Zero());
- __ b(ne, &one_is_nan);
-
- __ bind(lhs_not_nan);
- __ Sbfx(r4,
- rhs_exponent,
- HeapNumber::kExponentShift,
- HeapNumber::kExponentBits);
- // NaNs have all-one exponents so they sign extend to -1.
- __ cmp(r4, Operand(-1));
- __ b(ne, &neither_is_nan);
- __ mov(r4,
- Operand(rhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord),
- SetCC);
- __ b(ne, &one_is_nan);
- __ cmp(rhs_mantissa, Operand::Zero());
- __ b(eq, &neither_is_nan);
-
- __ bind(&one_is_nan);
- // NaN comparisons always fail.
- // Load whatever we need in r0 to make the comparison fail.
- if (cond == lt || cond == le) {
- __ mov(r0, Operand(GREATER));
- } else {
- __ mov(r0, Operand(LESS));
- }
- __ Ret();
-
- __ bind(&neither_is_nan);
-}
-
-
-// See comment at call site.
-static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm,
- Condition cond) {
- bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset);
- Register rhs_exponent = exp_first ? r0 : r1;
- Register lhs_exponent = exp_first ? r2 : r3;
- Register rhs_mantissa = exp_first ? r1 : r0;
- Register lhs_mantissa = exp_first ? r3 : r2;
-
- // r0, r1, r2, r3 have the two doubles. Neither is a NaN.
- if (cond == eq) {
- // Doubles are not equal unless they have the same bit pattern.
- // Exception: 0 and -0.
- __ cmp(rhs_mantissa, Operand(lhs_mantissa));
- __ orr(r0, rhs_mantissa, Operand(lhs_mantissa), LeaveCC, ne);
- // Return non-zero if the numbers are unequal.
- __ Ret(ne);
-
- __ sub(r0, rhs_exponent, Operand(lhs_exponent), SetCC);
- // If exponents are equal then return 0.
- __ Ret(eq);
-
- // Exponents are unequal. The only way we can return that the numbers
- // are equal is if one is -0 and the other is 0. We already dealt
- // with the case where both are -0 or both are 0.
- // We start by seeing if the mantissas (that are equal) or the bottom
- // 31 bits of the rhs exponent are non-zero. If so we return not
- // equal.
- __ orr(r4, lhs_mantissa, Operand(lhs_exponent, LSL, kSmiTagSize), SetCC);
- __ mov(r0, Operand(r4), LeaveCC, ne);
- __ Ret(ne);
- // Now they are equal if and only if the lhs exponent is zero in its
- // low 31 bits.
- __ mov(r0, Operand(rhs_exponent, LSL, kSmiTagSize));
- __ Ret();
- } else {
- // Call a native function to do a comparison between two non-NaNs.
- // Call C routine that may not cause GC or other trouble.
- __ push(lr);
- __ PrepareCallCFunction(0, 2, r5);
- if (masm->use_eabi_hardfloat()) {
- CpuFeatures::Scope scope(VFP2);
- __ vmov(d0, r0, r1);
- __ vmov(d1, r2, r3);
- }
-
- AllowExternalCallThatCantCauseGC scope(masm);
- __ CallCFunction(ExternalReference::compare_doubles(masm->isolate()),
- 0, 2);
- __ pop(pc); // Return.
- }
-}
-
-
-// See comment at call site.
-static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
- Register lhs,
- Register rhs) {
- ASSERT((lhs.is(r0) && rhs.is(r1)) ||
- (lhs.is(r1) && rhs.is(r0)));
-
- // If either operand is a JS object or an oddball value, then they are
- // not equal since their pointers are different.
- // There is no test for undetectability in strict equality.
- STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
- Label first_non_object;
- // Get the type of the first operand into r2 and compare it with
- // FIRST_SPEC_OBJECT_TYPE.
- __ CompareObjectType(rhs, r2, r2, FIRST_SPEC_OBJECT_TYPE);
- __ b(lt, &first_non_object);
-
- // Return non-zero (r0 is not zero)
- Label return_not_equal;
- __ bind(&return_not_equal);
- __ Ret();
-
- __ bind(&first_non_object);
- // Check for oddballs: true, false, null, undefined.
- __ cmp(r2, Operand(ODDBALL_TYPE));
- __ b(eq, &return_not_equal);
-
- __ CompareObjectType(lhs, r3, r3, FIRST_SPEC_OBJECT_TYPE);
- __ b(ge, &return_not_equal);
-
- // Check for oddballs: true, false, null, undefined.
- __ cmp(r3, Operand(ODDBALL_TYPE));
- __ b(eq, &return_not_equal);
-
- // Now that we have the types we might as well check for
- // internalized-internalized.
- // Ensure that no non-strings have the internalized bit set.
- STATIC_ASSERT(LAST_TYPE < kNotStringTag + kIsInternalizedMask);
- STATIC_ASSERT(kInternalizedTag != 0);
- __ and_(r2, r2, Operand(r3));
- __ tst(r2, Operand(kIsInternalizedMask));
- __ b(ne, &return_not_equal);
-}
-
-
-// See comment at call site.
-static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
- Register lhs,
- Register rhs,
- Label* both_loaded_as_doubles,
- Label* not_heap_numbers,
- Label* slow) {
- ASSERT((lhs.is(r0) && rhs.is(r1)) ||
- (lhs.is(r1) && rhs.is(r0)));
-
- __ CompareObjectType(rhs, r3, r2, HEAP_NUMBER_TYPE);
- __ b(ne, not_heap_numbers);
- __ ldr(r2, FieldMemOperand(lhs, HeapObject::kMapOffset));
- __ cmp(r2, r3);
- __ b(ne, slow); // First was a heap number, second wasn't. Go slow case.
-
- // Both are heap numbers. Load them up then jump to the code we have
- // for that.
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatures::Scope scope(VFP2);
- __ sub(r7, rhs, Operand(kHeapObjectTag));
- __ vldr(d6, r7, HeapNumber::kValueOffset);
- __ sub(r7, lhs, Operand(kHeapObjectTag));
- __ vldr(d7, r7, HeapNumber::kValueOffset);
- } else {
- __ Ldrd(r2, r3, FieldMemOperand(lhs, HeapNumber::kValueOffset));
- __ Ldrd(r0, r1, FieldMemOperand(rhs, HeapNumber::kValueOffset));
- }
- __ jmp(both_loaded_as_doubles);
-}
-
-
-// Fast negative check for internalized-to-internalized equality.
-static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
- Register lhs,
- Register rhs,
- Label* possible_strings,
- Label* not_both_strings) {
- ASSERT((lhs.is(r0) && rhs.is(r1)) ||
- (lhs.is(r1) && rhs.is(r0)));
-
- // r2 is object type of rhs.
- // Ensure that no non-strings have the internalized bit set.
- Label object_test;
- STATIC_ASSERT(kInternalizedTag != 0);
- __ tst(r2, Operand(kIsNotStringMask));
- __ b(ne, &object_test);
- __ tst(r2, Operand(kIsInternalizedMask));
- __ b(eq, possible_strings);
- __ CompareObjectType(lhs, r3, r3, FIRST_NONSTRING_TYPE);
- __ b(ge, not_both_strings);
- __ tst(r3, Operand(kIsInternalizedMask));
- __ b(eq, possible_strings);
-
- // Both are internalized. We already checked they weren't the same pointer
- // so they are not equal.
- __ mov(r0, Operand(NOT_EQUAL));
- __ Ret();
-
- __ bind(&object_test);
- __ cmp(r2, Operand(FIRST_SPEC_OBJECT_TYPE));
- __ b(lt, not_both_strings);
- __ CompareObjectType(lhs, r2, r3, FIRST_SPEC_OBJECT_TYPE);
- __ b(lt, not_both_strings);
- // If both objects are undetectable, they are equal. Otherwise, they
- // are not equal, since they are different objects and an object is not
- // equal to undefined.
- __ ldr(r3, FieldMemOperand(rhs, HeapObject::kMapOffset));
- __ ldrb(r2, FieldMemOperand(r2, Map::kBitFieldOffset));
- __ ldrb(r3, FieldMemOperand(r3, Map::kBitFieldOffset));
- __ and_(r0, r2, Operand(r3));
- __ and_(r0, r0, Operand(1 << Map::kIsUndetectable));
- __ eor(r0, r0, Operand(1 << Map::kIsUndetectable));
- __ Ret();
-}
-
-
-void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
- Register object,
- Register result,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- bool object_is_smi,
- Label* not_found) {
- // Use of registers. Register result is used as a temporary.
- Register number_string_cache = result;
- Register mask = scratch3;
-
- // Load the number string cache.
- __ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
-
- // Make the hash mask from the length of the number string cache. It
- // contains two elements (number and string) for each cache entry.
- __ ldr(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset));
- // Divide length by two (length is a smi).
- __ mov(mask, Operand(mask, ASR, kSmiTagSize + 1));
- __ sub(mask, mask, Operand(1)); // Make mask.
-
- // Calculate the entry in the number string cache. The hash value in the
- // number string cache for smis is just the smi value, and the hash for
- // doubles is the xor of the upper and lower words. See
- // Heap::GetNumberStringCache.
- Isolate* isolate = masm->isolate();
- Label is_smi;
- Label load_result_from_cache;
- if (!object_is_smi) {
- __ JumpIfSmi(object, &is_smi);
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatures::Scope scope(VFP2);
- __ CheckMap(object,
- scratch1,
- Heap::kHeapNumberMapRootIndex,
- not_found,
- DONT_DO_SMI_CHECK);
-
- STATIC_ASSERT(8 == kDoubleSize);
- __ add(scratch1,
- object,
- Operand(HeapNumber::kValueOffset - kHeapObjectTag));
- __ ldm(ia, scratch1, scratch1.bit() | scratch2.bit());
- __ eor(scratch1, scratch1, Operand(scratch2));
- __ and_(scratch1, scratch1, Operand(mask));
-
- // Calculate address of entry in string cache: each entry consists
- // of two pointer sized fields.
- __ add(scratch1,
- number_string_cache,
- Operand(scratch1, LSL, kPointerSizeLog2 + 1));
-
- Register probe = mask;
- __ ldr(probe,
- FieldMemOperand(scratch1, FixedArray::kHeaderSize));
- __ JumpIfSmi(probe, not_found);
- __ sub(scratch2, object, Operand(kHeapObjectTag));
- __ vldr(d0, scratch2, HeapNumber::kValueOffset);
- __ sub(probe, probe, Operand(kHeapObjectTag));
- __ vldr(d1, probe, HeapNumber::kValueOffset);
- __ VFPCompareAndSetFlags(d0, d1);
- __ b(ne, not_found); // The cache did not contain this value.
- __ b(&load_result_from_cache);
- } else {
- __ b(not_found);
- }
- }
-
- __ bind(&is_smi);
- Register scratch = scratch1;
- __ and_(scratch, mask, Operand(object, ASR, 1));
- // Calculate address of entry in string cache: each entry consists
- // of two pointer sized fields.
- __ add(scratch,
- number_string_cache,
- Operand(scratch, LSL, kPointerSizeLog2 + 1));
-
- // Check if the entry is the smi we are looking for.
- Register probe = mask;
- __ ldr(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize));
- __ cmp(object, probe);
- __ b(ne, not_found);
-
- // Get the result from the cache.
- __ bind(&load_result_from_cache);
- __ ldr(result,
- FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
- __ IncrementCounter(isolate->counters()->number_to_string_native(),
- 1,
- scratch1,
- scratch2);
-}
-
-
-void NumberToStringStub::Generate(MacroAssembler* masm) {
- Label runtime;
-
- __ ldr(r1, MemOperand(sp, 0));
-
- // Generate code to lookup number in the number string cache.
- GenerateLookupNumberStringCache(masm, r1, r0, r2, r3, r4, false, &runtime);
- __ add(sp, sp, Operand(1 * kPointerSize));
- __ Ret();
-
- __ bind(&runtime);
- // Handle number to string in the runtime system if not found in the cache.
- __ TailCallRuntime(Runtime::kNumberToStringSkipCache, 1, 1);
-}
-
-
-static void ICCompareStub_CheckInputType(MacroAssembler* masm,
- Register input,
- Register scratch,
- CompareIC::State expected,
- Label* fail) {
- Label ok;
- if (expected == CompareIC::SMI) {
- __ JumpIfNotSmi(input, fail);
- } else if (expected == CompareIC::NUMBER) {
- __ JumpIfSmi(input, &ok);
- __ CheckMap(input, scratch, Heap::kHeapNumberMapRootIndex, fail,
- DONT_DO_SMI_CHECK);
- }
- // We could be strict about internalized/non-internalized here, but as long as
- // hydrogen doesn't care, the stub doesn't have to care either.
- __ bind(&ok);
-}
-
-
-// On entry r1 and r2 are the values to be compared.
-// On exit r0 is 0, positive or negative to indicate the result of
-// the comparison.
-void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
- Register lhs = r1;
- Register rhs = r0;
- Condition cc = GetCondition();
-
- Label miss;
- ICCompareStub_CheckInputType(masm, lhs, r2, left_, &miss);
- ICCompareStub_CheckInputType(masm, rhs, r3, right_, &miss);
-
- Label slow; // Call builtin.
- Label not_smis, both_loaded_as_doubles, lhs_not_nan;
-
- Label not_two_smis, smi_done;
- __ orr(r2, r1, r0);
- __ JumpIfNotSmi(r2, &not_two_smis);
- __ mov(r1, Operand(r1, ASR, 1));
- __ sub(r0, r1, Operand(r0, ASR, 1));
- __ Ret();
- __ bind(&not_two_smis);
-
- // NOTICE! This code is only reached after a smi-fast-case check, so
- // it is certain that at least one operand isn't a smi.
-
- {
- Label not_user_equal, user_equal;
- __ and_(r2, r1, Operand(r0));
- __ tst(r2, Operand(kSmiTagMask));
- __ b(eq, &not_user_equal);
-
- __ CompareObjectType(r0, r2, r4, JS_OBJECT_TYPE);
- __ b(ne, &not_user_equal);
-
- __ CompareObjectType(r1, r3, r4, JS_OBJECT_TYPE);
- __ b(ne, &not_user_equal);
-
- __ ldrb(r2, FieldMemOperand(r2, Map::kBitField2Offset));
- __ and_(r2, r2, Operand(1 << Map::kUseUserObjectComparison));
- __ cmp(r2, Operand(1 << Map::kUseUserObjectComparison));
- __ b(eq, &user_equal);
-
- __ ldrb(r3, FieldMemOperand(r3, Map::kBitField2Offset));
- __ and_(r3, r3, Operand(1 << Map::kUseUserObjectComparison));
- __ cmp(r3, Operand(1 << Map::kUseUserObjectComparison));
- __ b(ne, &not_user_equal);
-
- __ bind(&user_equal);
-
- __ Push(r0, r1);
- __ TailCallRuntime(Runtime::kUserObjectEquals, 2, 1);
-
- __ bind(&not_user_equal);
- }
-
-
- // Handle the case where the objects are identical. Either returns the answer
- // or goes to slow. Only falls through if the objects were not identical.
- EmitIdenticalObjectComparison(masm, &slow, cc);
-
- // If either is a Smi (we know that not both are), then they can only
- // be strictly equal if the other is a HeapNumber.
- STATIC_ASSERT(kSmiTag == 0);
- ASSERT_EQ(0, Smi::FromInt(0));
- __ and_(r2, lhs, Operand(rhs));
- __ JumpIfNotSmi(r2, &not_smis);
- // One operand is a smi. EmitSmiNonsmiComparison generates code that can:
- // 1) Return the answer.
- // 2) Go to slow.
- // 3) Fall through to both_loaded_as_doubles.
- // 4) Jump to lhs_not_nan.
- // In cases 3 and 4 we have found out we were dealing with a number-number
- // comparison. If VFP3 is supported the double values of the numbers have
- // been loaded into d7 and d6. Otherwise, the double values have been loaded
- // into r0, r1, r2, and r3.
- EmitSmiNonsmiComparison(masm, lhs, rhs, &lhs_not_nan, &slow, strict());
-
- __ bind(&both_loaded_as_doubles);
- // The arguments have been converted to doubles and stored in d6 and d7, if
- // VFP3 is supported, or in r0, r1, r2, and r3.
- Isolate* isolate = masm->isolate();
- if (CpuFeatures::IsSupported(VFP2)) {
- __ bind(&lhs_not_nan);
- CpuFeatures::Scope scope(VFP2);
- Label no_nan;
- // ARMv7 VFP3 instructions to implement double precision comparison.
- __ VFPCompareAndSetFlags(d7, d6);
- Label nan;
- __ b(vs, &nan);
- __ mov(r0, Operand(EQUAL), LeaveCC, eq);
- __ mov(r0, Operand(LESS), LeaveCC, lt);
- __ mov(r0, Operand(GREATER), LeaveCC, gt);
- __ Ret();
-
- __ bind(&nan);
- // If one of the sides was a NaN then the v flag is set. Load r0 with
- // whatever it takes to make the comparison fail, since comparisons with NaN
- // always fail.
- if (cc == lt || cc == le) {
- __ mov(r0, Operand(GREATER));
- } else {
- __ mov(r0, Operand(LESS));
- }
- __ Ret();
- } else {
- // Checks for NaN in the doubles we have loaded. Can return the answer or
- // fall through if neither is a NaN. Also binds lhs_not_nan.
- EmitNanCheck(masm, &lhs_not_nan, cc);
- // Compares two doubles in r0, r1, r2, r3 that are not NaNs. Returns the
- // answer. Never falls through.
- EmitTwoNonNanDoubleComparison(masm, cc);
- }
-
- __ bind(&not_smis);
- // At this point we know we are dealing with two different objects,
- // and neither of them is a Smi. The objects are in rhs_ and lhs_.
- if (strict()) {
- // This returns non-equal for some object types, or falls through if it
- // was not lucky.
- EmitStrictTwoHeapObjectCompare(masm, lhs, rhs);
- }
-
- Label check_for_internalized_strings;
- Label flat_string_check;
- // Check for heap-number-heap-number comparison. Can jump to slow case,
- // or load both doubles into r0, r1, r2, r3 and jump to the code that handles
- // that case. If the inputs are not doubles then jumps to
- // check_for_internalized_strings.
- // In this case r2 will contain the type of rhs_. Never falls through.
- EmitCheckForTwoHeapNumbers(masm,
- lhs,
- rhs,
- &both_loaded_as_doubles,
- &check_for_internalized_strings,
- &flat_string_check);
-
- __ bind(&check_for_internalized_strings);
- // In the strict case the EmitStrictTwoHeapObjectCompare already took care of
- // internalized strings.
- if (cc == eq && !strict()) {
- // Returns an answer for two internalized strings or two detectable objects.
- // Otherwise jumps to string case or not both strings case.
- // Assumes that r2 is the type of rhs_ on entry.
- EmitCheckForInternalizedStringsOrObjects(
- masm, lhs, rhs, &flat_string_check, &slow);
- }
-
- // Check for both being sequential ASCII strings, and inline if that is the
- // case.
- __ bind(&flat_string_check);
-
- __ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs, rhs, r2, r3, &slow);
-
- __ IncrementCounter(isolate->counters()->string_compare_native(), 1, r2, r3);
- if (cc == eq) {
- StringCompareStub::GenerateFlatAsciiStringEquals(masm,
- lhs,
- rhs,
- r2,
- r3,
- r4);
- } else {
- StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
- lhs,
- rhs,
- r2,
- r3,
- r4,
- r5);
- }
- // Never falls through to here.
-
- __ bind(&slow);
-
- __ Push(lhs, rhs);
- // Figure out which native to call and setup the arguments.
- Builtins::JavaScript native;
- if (cc == eq) {
- native = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
- } else {
- native = Builtins::COMPARE;
- int ncr; // NaN compare result
- if (cc == lt || cc == le) {
- ncr = GREATER;
- } else {
- ASSERT(cc == gt || cc == ge); // remaining cases
- ncr = LESS;
- }
- __ mov(r0, Operand(Smi::FromInt(ncr)));
- __ push(r0);
- }
-
- // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
- // tagged as a small integer.
- __ InvokeBuiltin(native, JUMP_FUNCTION);
-
- __ bind(&miss);
- GenerateMiss(masm);
-}
-
-
-// The stub expects its argument in the tos_ register and returns its result in
-// it, too: zero for false, and a non-zero value for true.
-void ToBooleanStub::Generate(MacroAssembler* masm) {
- // This stub overrides SometimesSetsUpAFrame() to return false. That means
- // we cannot call anything that could cause a GC from this stub.
- Label patch;
- const Register map = r9.is(tos_) ? r7 : r9;
- const Register temp = map;
-
- // undefined -> false.
- CheckOddball(masm, UNDEFINED, Heap::kUndefinedValueRootIndex, false);
-
- // Boolean -> its value.
- CheckOddball(masm, BOOLEAN, Heap::kFalseValueRootIndex, false);
- CheckOddball(masm, BOOLEAN, Heap::kTrueValueRootIndex, true);
-
- // 'null' -> false.
- CheckOddball(masm, NULL_TYPE, Heap::kNullValueRootIndex, false);
-
- if (types_.Contains(SMI)) {
- // Smis: 0 -> false, all other -> true
- __ tst(tos_, Operand(kSmiTagMask));
- // tos_ contains the correct return value already
- __ Ret(eq);
- } else if (types_.NeedsMap()) {
- // If we need a map later and have a Smi -> patch.
- __ JumpIfSmi(tos_, &patch);
- }
-
- if (types_.NeedsMap()) {
- __ ldr(map, FieldMemOperand(tos_, HeapObject::kMapOffset));
-
- if (types_.CanBeUndetectable()) {
- __ ldrb(ip, FieldMemOperand(map, Map::kBitFieldOffset));
- __ tst(ip, Operand(1 << Map::kIsUndetectable));
- // Undetectable -> false.
- __ mov(tos_, Operand::Zero(), LeaveCC, ne);
- __ Ret(ne);
- }
- }
-
- if (types_.Contains(SPEC_OBJECT)) {
- // Spec object -> true.
- __ CompareInstanceType(map, ip, FIRST_SPEC_OBJECT_TYPE);
- // tos_ contains the correct non-zero return value already.
- __ Ret(ge);
- }
-
- if (types_.Contains(STRING)) {
- // String value -> false iff empty.
- __ CompareInstanceType(map, ip, FIRST_NONSTRING_TYPE);
- __ ldr(tos_, FieldMemOperand(tos_, String::kLengthOffset), lt);
- __ Ret(lt); // the string length is OK as the return value
- }
-
- if (types_.Contains(HEAP_NUMBER)) {
- // Heap number -> false iff +0, -0, or NaN.
- Label not_heap_number;
- __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
- __ b(ne, &not_heap_number);
-
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatures::Scope scope(VFP2);
-
- __ vldr(d1, FieldMemOperand(tos_, HeapNumber::kValueOffset));
- __ VFPCompareAndSetFlags(d1, 0.0);
- // "tos_" is a register, and contains a non zero value by default.
- // Hence we only need to overwrite "tos_" with zero to return false for
- // FP_ZERO or FP_NAN cases. Otherwise, by default it returns true.
- __ mov(tos_, Operand::Zero(), LeaveCC, eq); // for FP_ZERO
- __ mov(tos_, Operand::Zero(), LeaveCC, vs); // for FP_NAN
- } else {
- Label done, not_nan, not_zero;
- __ ldr(temp, FieldMemOperand(tos_, HeapNumber::kExponentOffset));
- // -0 maps to false:
- __ bic(
- temp, temp, Operand(HeapNumber::kSignMask, RelocInfo::NONE32), SetCC);
- __ b(ne, &not_zero);
- // If exponent word is zero then the answer depends on the mantissa word.
- __ ldr(tos_, FieldMemOperand(tos_, HeapNumber::kMantissaOffset));
- __ jmp(&done);
-
- // Check for NaN.
- __ bind(&not_zero);
- // We already zeroed the sign bit, now shift out the mantissa so we only
- // have the exponent left.
- __ mov(temp, Operand(temp, LSR, HeapNumber::kMantissaBitsInTopWord));
- unsigned int shifted_exponent_mask =
- HeapNumber::kExponentMask >> HeapNumber::kMantissaBitsInTopWord;
- __ cmp(temp, Operand(shifted_exponent_mask, RelocInfo::NONE32));
- __ b(ne, &not_nan); // If exponent is not 0x7ff then it can't be a NaN.
-
- // Reload exponent word.
- __ ldr(temp, FieldMemOperand(tos_, HeapNumber::kExponentOffset));
- __ tst(temp, Operand(HeapNumber::kMantissaMask, RelocInfo::NONE32));
- // If mantissa is not zero then we have a NaN, so return 0.
- __ mov(tos_, Operand::Zero(), LeaveCC, ne);
- __ b(ne, &done);
-
- // Load mantissa word.
- __ ldr(temp, FieldMemOperand(tos_, HeapNumber::kMantissaOffset));
- __ cmp(temp, Operand::Zero());
- // If mantissa is not zero then we have a NaN, so return 0.
- __ mov(tos_, Operand::Zero(), LeaveCC, ne);
- __ b(ne, &done);
-
- __ bind(&not_nan);
- __ mov(tos_, Operand(1, RelocInfo::NONE32));
- __ bind(&done);
- }
- __ Ret();
- __ bind(&not_heap_number);
- }
-
- __ bind(&patch);
- GenerateTypeTransition(masm);
-}
-
-
-void ToBooleanStub::CheckOddball(MacroAssembler* masm,
- Type type,
- Heap::RootListIndex value,
- bool result) {
- if (types_.Contains(type)) {
- // If we see an expected oddball, return its ToBoolean value tos_.
- __ LoadRoot(ip, value);
- __ cmp(tos_, ip);
- // The value of a root is never NULL, so we can avoid loading a non-null
- // value into tos_ when we want to return 'true'.
- if (!result) {
- __ mov(tos_, Operand::Zero(), LeaveCC, eq);
- }
- __ Ret(eq);
- }
-}
-
-
-void ToBooleanStub::GenerateTypeTransition(MacroAssembler* masm) {
- if (!tos_.is(r3)) {
- __ mov(r3, Operand(tos_));
- }
- __ mov(r2, Operand(Smi::FromInt(tos_.code())));
- __ mov(r1, Operand(Smi::FromInt(types_.ToByte())));
- __ Push(r3, r2, r1);
- // Patch the caller to an appropriate specialized stub and return the
- // operation result to the caller of the stub.
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kToBoolean_Patch), masm->isolate()),
- 3,
- 1);
-}
-
-
-void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
- // We don't allow a GC during a store buffer overflow so there is no need to
- // store the registers in any particular way, but we do have to store and
- // restore them.
- __ stm(db_w, sp, kCallerSaved | lr.bit());
-
- const Register scratch = r1;
-
- if (save_doubles_ == kSaveFPRegs) {
- CpuFeatures::Scope scope(VFP2);
- // Check CPU flags for number of registers, setting the Z condition flag.
- __ CheckFor32DRegs(scratch);
-
- __ sub(sp, sp, Operand(kDoubleSize * DwVfpRegister::kMaxNumRegisters));
- for (int i = 0; i < DwVfpRegister::kMaxNumRegisters; i++) {
- DwVfpRegister reg = DwVfpRegister::from_code(i);
- __ vstr(reg, MemOperand(sp, i * kDoubleSize), i < 16 ? al : ne);
- }
- }
- const int argument_count = 1;
- const int fp_argument_count = 0;
-
- AllowExternalCallThatCantCauseGC scope(masm);
- __ PrepareCallCFunction(argument_count, fp_argument_count, scratch);
- __ mov(r0, Operand(ExternalReference::isolate_address()));
- __ CallCFunction(
- ExternalReference::store_buffer_overflow_function(masm->isolate()),
- argument_count);
- if (save_doubles_ == kSaveFPRegs) {
- CpuFeatures::Scope scope(VFP2);
-
- // Check CPU flags for number of registers, setting the Z condition flag.
- __ CheckFor32DRegs(scratch);
-
- for (int i = 0; i < DwVfpRegister::kMaxNumRegisters; i++) {
- DwVfpRegister reg = DwVfpRegister::from_code(i);
- __ vldr(reg, MemOperand(sp, i * kDoubleSize), i < 16 ? al : ne);
- }
- __ add(sp, sp, Operand(kDoubleSize * DwVfpRegister::kMaxNumRegisters));
- }
- __ ldm(ia_w, sp, kCallerSaved | pc.bit()); // Also pop pc to get Ret(0).
-}
-
-
-void UnaryOpStub::PrintName(StringStream* stream) {
- const char* op_name = Token::Name(op_);
- const char* overwrite_name = NULL; // Make g++ happy.
- switch (mode_) {
- case UNARY_NO_OVERWRITE: overwrite_name = "Alloc"; break;
- case UNARY_OVERWRITE: overwrite_name = "Overwrite"; break;
- }
- stream->Add("UnaryOpStub_%s_%s_%s",
- op_name,
- overwrite_name,
- UnaryOpIC::GetName(operand_type_));
-}
-
-
-// TODO(svenpanne): Use virtual functions instead of switch.
-void UnaryOpStub::Generate(MacroAssembler* masm) {
- switch (operand_type_) {
- case UnaryOpIC::UNINITIALIZED:
- GenerateTypeTransition(masm);
- break;
- case UnaryOpIC::SMI:
- GenerateSmiStub(masm);
- break;
- case UnaryOpIC::NUMBER:
- GenerateNumberStub(masm);
- break;
- case UnaryOpIC::GENERIC:
- GenerateGenericStub(masm);
- break;
- }
-}
-
-
-void UnaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
- __ mov(r3, Operand(r0)); // the operand
- __ mov(r2, Operand(Smi::FromInt(op_)));
- __ mov(r1, Operand(Smi::FromInt(mode_)));
- __ mov(r0, Operand(Smi::FromInt(operand_type_)));
- __ Push(r3, r2, r1, r0);
-
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kUnaryOp_Patch), masm->isolate()), 4, 1);
-}
-
-
-// TODO(svenpanne): Use virtual functions instead of switch.
-void UnaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
- switch (op_) {
- case Token::SUB:
- GenerateSmiStubSub(masm);
- break;
- case Token::BIT_NOT:
- GenerateSmiStubBitNot(masm);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void UnaryOpStub::GenerateSmiStubSub(MacroAssembler* masm) {
- Label non_smi, slow;
- GenerateSmiCodeSub(masm, &non_smi, &slow);
- __ bind(&non_smi);
- __ bind(&slow);
- GenerateTypeTransition(masm);
-}
-
-
-void UnaryOpStub::GenerateSmiStubBitNot(MacroAssembler* masm) {
- Label non_smi;
- GenerateSmiCodeBitNot(masm, &non_smi);
- __ bind(&non_smi);
- GenerateTypeTransition(masm);
-}
-
-
-void UnaryOpStub::GenerateSmiCodeSub(MacroAssembler* masm,
- Label* non_smi,
- Label* slow) {
- __ JumpIfNotSmi(r0, non_smi);
-
- // The result of negating zero or the smallest negative smi is not a smi.
- __ bic(ip, r0, Operand(0x80000000), SetCC);
- __ b(eq, slow);
-
- // Return '0 - value'.
- __ rsb(r0, r0, Operand::Zero());
- __ Ret();
-}
-
-
-void UnaryOpStub::GenerateSmiCodeBitNot(MacroAssembler* masm,
- Label* non_smi) {
- __ JumpIfNotSmi(r0, non_smi);
-
- // Flip bits and revert inverted smi-tag.
- __ mvn(r0, Operand(r0));
- __ bic(r0, r0, Operand(kSmiTagMask));
- __ Ret();
-}
-
-
-// TODO(svenpanne): Use virtual functions instead of switch.
-void UnaryOpStub::GenerateNumberStub(MacroAssembler* masm) {
- switch (op_) {
- case Token::SUB:
- GenerateNumberStubSub(masm);
- break;
- case Token::BIT_NOT:
- GenerateNumberStubBitNot(masm);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void UnaryOpStub::GenerateNumberStubSub(MacroAssembler* masm) {
- Label non_smi, slow, call_builtin;
- GenerateSmiCodeSub(masm, &non_smi, &call_builtin);
- __ bind(&non_smi);
- GenerateHeapNumberCodeSub(masm, &slow);
- __ bind(&slow);
- GenerateTypeTransition(masm);
- __ bind(&call_builtin);
- GenerateGenericCodeFallback(masm);
-}
-
-
-void UnaryOpStub::GenerateNumberStubBitNot(MacroAssembler* masm) {
- Label non_smi, slow;
- GenerateSmiCodeBitNot(masm, &non_smi);
- __ bind(&non_smi);
- GenerateHeapNumberCodeBitNot(masm, &slow);
- __ bind(&slow);
- GenerateTypeTransition(masm);
-}
-
-void UnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm,
- Label* slow) {
- EmitCheckForHeapNumber(masm, r0, r1, r6, slow);
- // r0 is a heap number. Get a new heap number in r1.
- if (mode_ == UNARY_OVERWRITE) {
- __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
- __ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign.
- __ str(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
- } else {
- Label slow_allocate_heapnumber, heapnumber_allocated;
- __ AllocateHeapNumber(r1, r2, r3, r6, &slow_allocate_heapnumber);
- __ jmp(&heapnumber_allocated);
-
- __ bind(&slow_allocate_heapnumber);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(r0);
- __ CallRuntime(Runtime::kNumberAlloc, 0);
- __ mov(r1, Operand(r0));
- __ pop(r0);
- }
-
- __ bind(&heapnumber_allocated);
- __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
- __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
- __ str(r3, FieldMemOperand(r1, HeapNumber::kMantissaOffset));
- __ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign.
- __ str(r2, FieldMemOperand(r1, HeapNumber::kExponentOffset));
- __ mov(r0, Operand(r1));
- }
- __ Ret();
-}
-
-
-void UnaryOpStub::GenerateHeapNumberCodeBitNot(
- MacroAssembler* masm, Label* slow) {
- Label impossible;
-
- EmitCheckForHeapNumber(masm, r0, r1, r6, slow);
- // Convert the heap number is r0 to an untagged integer in r1.
- __ ConvertToInt32(r0, r1, r2, r3, d0, slow);
-
- // Do the bitwise operation and check if the result fits in a smi.
- Label try_float;
- __ mvn(r1, Operand(r1));
- __ add(r2, r1, Operand(0x40000000), SetCC);
- __ b(mi, &try_float);
-
- // Tag the result as a smi and we're done.
- __ mov(r0, Operand(r1, LSL, kSmiTagSize));
- __ Ret();
-
- // Try to store the result in a heap number.
- __ bind(&try_float);
- if (mode_ == UNARY_NO_OVERWRITE) {
- Label slow_allocate_heapnumber, heapnumber_allocated;
- // Allocate a new heap number without zapping r0, which we need if it fails.
- __ AllocateHeapNumber(r2, r3, r4, r6, &slow_allocate_heapnumber);
- __ jmp(&heapnumber_allocated);
-
- __ bind(&slow_allocate_heapnumber);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(r0); // Push the heap number, not the untagged int32.
- __ CallRuntime(Runtime::kNumberAlloc, 0);
- __ mov(r2, r0); // Move the new heap number into r2.
- // Get the heap number into r0, now that the new heap number is in r2.
- __ pop(r0);
- }
-
- // Convert the heap number in r0 to an untagged integer in r1.
- // This can't go slow-case because it's the same number we already
- // converted once again.
- __ ConvertToInt32(r0, r1, r3, r4, d0, &impossible);
- __ mvn(r1, Operand(r1));
-
- __ bind(&heapnumber_allocated);
- __ mov(r0, r2); // Move newly allocated heap number to r0.
- }
-
- if (CpuFeatures::IsSupported(VFP2)) {
- // Convert the int32 in r1 to the heap number in r0. r2 is corrupted.
- CpuFeatures::Scope scope(VFP2);
- __ vmov(s0, r1);
- __ vcvt_f64_s32(d0, s0);
- __ sub(r2, r0, Operand(kHeapObjectTag));
- __ vstr(d0, r2, HeapNumber::kValueOffset);
- __ Ret();
- } else {
- // WriteInt32ToHeapNumberStub does not trigger GC, so we do not
- // have to set up a frame.
- WriteInt32ToHeapNumberStub stub(r1, r0, r2);
- __ Jump(stub.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
- }
-
- __ bind(&impossible);
- if (FLAG_debug_code) {
- __ stop("Incorrect assumption in bit-not stub");
- }
-}
-
-
-// TODO(svenpanne): Use virtual functions instead of switch.
-void UnaryOpStub::GenerateGenericStub(MacroAssembler* masm) {
- switch (op_) {
- case Token::SUB:
- GenerateGenericStubSub(masm);
- break;
- case Token::BIT_NOT:
- GenerateGenericStubBitNot(masm);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void UnaryOpStub::GenerateGenericStubSub(MacroAssembler* masm) {
- Label non_smi, slow;
- GenerateSmiCodeSub(masm, &non_smi, &slow);
- __ bind(&non_smi);
- GenerateHeapNumberCodeSub(masm, &slow);
- __ bind(&slow);
- GenerateGenericCodeFallback(masm);
-}
-
-
-void UnaryOpStub::GenerateGenericStubBitNot(MacroAssembler* masm) {
- Label non_smi, slow;
- GenerateSmiCodeBitNot(masm, &non_smi);
- __ bind(&non_smi);
- GenerateHeapNumberCodeBitNot(masm, &slow);
- __ bind(&slow);
- GenerateGenericCodeFallback(masm);
-}
-
-
-void UnaryOpStub::GenerateGenericCodeFallback(MacroAssembler* masm) {
- // Handle the slow case by jumping to the JavaScript builtin.
- __ push(r0);
- switch (op_) {
- case Token::SUB:
- __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
- break;
- case Token::BIT_NOT:
- __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void BinaryOpStub::Initialize() {
- platform_specific_bit_ = CpuFeatures::IsSupported(VFP2);
-}
-
-
-void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
- Label get_result;
-
- __ Push(r1, r0);
-
- __ mov(r2, Operand(Smi::FromInt(MinorKey())));
- __ push(r2);
-
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
- masm->isolate()),
- 3,
- 1);
-}
-
-
-void BinaryOpStub::GenerateTypeTransitionWithSavedArgs(
- MacroAssembler* masm) {
- UNIMPLEMENTED();
-}
-
-
-void BinaryOpStub_GenerateSmiSmiOperation(MacroAssembler* masm,
- Token::Value op) {
- Register left = r1;
- Register right = r0;
- Register scratch1 = r7;
- Register scratch2 = r9;
-
- ASSERT(right.is(r0));
- STATIC_ASSERT(kSmiTag == 0);
-
- Label not_smi_result;
- switch (op) {
- case Token::ADD:
- __ add(right, left, Operand(right), SetCC); // Add optimistically.
- __ Ret(vc);
- __ sub(right, right, Operand(left)); // Revert optimistic add.
- break;
- case Token::SUB:
- __ sub(right, left, Operand(right), SetCC); // Subtract optimistically.
- __ Ret(vc);
- __ sub(right, left, Operand(right)); // Revert optimistic subtract.
- break;
- case Token::MUL:
- // Remove tag from one of the operands. This way the multiplication result
- // will be a smi if it fits the smi range.
- __ SmiUntag(ip, right);
- // Do multiplication
- // scratch1 = lower 32 bits of ip * left.
- // scratch2 = higher 32 bits of ip * left.
- __ smull(scratch1, scratch2, left, ip);
- // Check for overflowing the smi range - no overflow if higher 33 bits of
- // the result are identical.
- __ mov(ip, Operand(scratch1, ASR, 31));
- __ cmp(ip, Operand(scratch2));
- __ b(ne, &not_smi_result);
- // Go slow on zero result to handle -0.
- __ cmp(scratch1, Operand::Zero());
- __ mov(right, Operand(scratch1), LeaveCC, ne);
- __ Ret(ne);
- // We need -0 if we were multiplying a negative number with 0 to get 0.
- // We know one of them was zero.
- __ add(scratch2, right, Operand(left), SetCC);
- __ mov(right, Operand(Smi::FromInt(0)), LeaveCC, pl);
- __ Ret(pl); // Return smi 0 if the non-zero one was positive.
- // We fall through here if we multiplied a negative number with 0, because
- // that would mean we should produce -0.
- break;
- case Token::DIV: {
- Label div_with_sdiv;
-
- // Check for 0 divisor.
- __ cmp(right, Operand::Zero());
- __ b(eq, &not_smi_result);
-
- // Check for power of two on the right hand side.
- __ sub(scratch1, right, Operand(1));
- __ tst(scratch1, right);
- if (CpuFeatures::IsSupported(SUDIV)) {
- __ b(ne, &div_with_sdiv);
- // Check for no remainder.
- __ tst(left, scratch1);
- __ b(ne, &not_smi_result);
- // Check for positive left hand side.
- __ cmp(left, Operand::Zero());
- __ b(mi, &div_with_sdiv);
- } else {
- __ b(ne, &not_smi_result);
- // Check for positive and no remainder.
- __ orr(scratch2, scratch1, Operand(0x80000000u));
- __ tst(left, scratch2);
- __ b(ne, &not_smi_result);
- }
-
- // Perform division by shifting.
- __ CountLeadingZeros(scratch1, scratch1, scratch2);
- __ rsb(scratch1, scratch1, Operand(31));
- __ mov(right, Operand(left, LSR, scratch1));
- __ Ret();
-
- if (CpuFeatures::IsSupported(SUDIV)) {
- Label result_not_zero;
-
- __ bind(&div_with_sdiv);
- // Do division.
- __ sdiv(scratch1, left, right);
- // Check that the remainder is zero.
- __ mls(scratch2, scratch1, right, left);
- __ cmp(scratch2, Operand::Zero());
- __ b(ne, &not_smi_result);
- // Check for negative zero result.
- __ cmp(scratch1, Operand::Zero());
- __ b(ne, &result_not_zero);
- __ cmp(right, Operand::Zero());
- __ b(lt, &not_smi_result);
- __ bind(&result_not_zero);
- // Check for the corner case of dividing the most negative smi by -1.
- __ cmp(scratch1, Operand(0x40000000));
- __ b(eq, &not_smi_result);
- // Tag and return the result.
- __ SmiTag(right, scratch1);
- __ Ret();
- }
- break;
- }
- case Token::MOD: {
- Label modulo_with_sdiv;
-
- if (CpuFeatures::IsSupported(SUDIV)) {
- // Check for x % 0.
- __ cmp(right, Operand::Zero());
- __ b(eq, &not_smi_result);
-
- // Check for two positive smis.
- __ orr(scratch1, left, Operand(right));
- __ tst(scratch1, Operand(0x80000000u));
- __ b(ne, &modulo_with_sdiv);
-
- // Check for power of two on the right hand side.
- __ sub(scratch1, right, Operand(1));
- __ tst(scratch1, right);
- __ b(ne, &modulo_with_sdiv);
- } else {
- // Check for two positive smis.
- __ orr(scratch1, left, Operand(right));
- __ tst(scratch1, Operand(0x80000000u));
- __ b(ne, &not_smi_result);
-
- // Check for power of two on the right hand side.
- __ JumpIfNotPowerOfTwoOrZero(right, scratch1, &not_smi_result);
- }
-
- // Perform modulus by masking (scratch1 contains right - 1).
- __ and_(right, left, Operand(scratch1));
- __ Ret();
-
- if (CpuFeatures::IsSupported(SUDIV)) {
- __ bind(&modulo_with_sdiv);
- __ mov(scratch2, right);
- // Perform modulus with sdiv and mls.
- __ sdiv(scratch1, left, right);
- __ mls(right, scratch1, right, left);
- // Return if the result is not 0.
- __ cmp(right, Operand::Zero());
- __ Ret(ne);
- // The result is 0, check for -0 case.
- __ cmp(left, Operand::Zero());
- __ Ret(pl);
- // This is a -0 case, restore the value of right.
- __ mov(right, scratch2);
- // We fall through here to not_smi_result to produce -0.
- }
- break;
- }
- case Token::BIT_OR:
- __ orr(right, left, Operand(right));
- __ Ret();
- break;
- case Token::BIT_AND:
- __ and_(right, left, Operand(right));
- __ Ret();
- break;
- case Token::BIT_XOR:
- __ eor(right, left, Operand(right));
- __ Ret();
- break;
- case Token::SAR:
- // Remove tags from right operand.
- __ GetLeastBitsFromSmi(scratch1, right, 5);
- __ mov(right, Operand(left, ASR, scratch1));
- // Smi tag result.
- __ bic(right, right, Operand(kSmiTagMask));
- __ Ret();
- break;
- case Token::SHR:
- // Remove tags from operands. We can't do this on a 31 bit number
- // because then the 0s get shifted into bit 30 instead of bit 31.
- __ SmiUntag(scratch1, left);
- __ GetLeastBitsFromSmi(scratch2, right, 5);
- __ mov(scratch1, Operand(scratch1, LSR, scratch2));
- // Unsigned shift is not allowed to produce a negative number, so
- // check the sign bit and the sign bit after Smi tagging.
- __ tst(scratch1, Operand(0xc0000000));
- __ b(ne, &not_smi_result);
- // Smi tag result.
- __ SmiTag(right, scratch1);
- __ Ret();
- break;
- case Token::SHL:
- // Remove tags from operands.
- __ SmiUntag(scratch1, left);
- __ GetLeastBitsFromSmi(scratch2, right, 5);
- __ mov(scratch1, Operand(scratch1, LSL, scratch2));
- // Check that the signed result fits in a Smi.
- __ add(scratch2, scratch1, Operand(0x40000000), SetCC);
- __ b(mi, &not_smi_result);
- __ SmiTag(right, scratch1);
- __ Ret();
- break;
- default:
- UNREACHABLE();
- }
- __ bind(&not_smi_result);
-}
-
-
-void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm,
- Register result,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Label* gc_required,
- OverwriteMode mode);
-
-
-void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm,
- BinaryOpIC::TypeInfo left_type,
- BinaryOpIC::TypeInfo right_type,
- bool smi_operands,
- Label* not_numbers,
- Label* gc_required,
- Label* miss,
- Token::Value op,
- OverwriteMode mode) {
- Register left = r1;
- Register right = r0;
- Register scratch1 = r7;
- Register scratch2 = r9;
- Register scratch3 = r4;
-
- ASSERT(smi_operands || (not_numbers != NULL));
- if (smi_operands) {
- __ AssertSmi(left);
- __ AssertSmi(right);
- }
- if (left_type == BinaryOpIC::SMI) {
- __ JumpIfNotSmi(left, miss);
- }
- if (right_type == BinaryOpIC::SMI) {
- __ JumpIfNotSmi(right, miss);
- }
-
- Register heap_number_map = r6;
- __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
-
- switch (op) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- case Token::MOD: {
- // Load left and right operands into d6 and d7 or r0/r1 and r2/r3
- // depending on whether VFP3 is available or not.
- FloatingPointHelper::Destination destination =
- CpuFeatures::IsSupported(VFP2) &&
- op != Token::MOD ?
- FloatingPointHelper::kVFPRegisters :
- FloatingPointHelper::kCoreRegisters;
-
- // Allocate new heap number for result.
- Register result = r5;
- BinaryOpStub_GenerateHeapResultAllocation(
- masm, result, heap_number_map, scratch1, scratch2, gc_required, mode);
-
- // Load the operands.
- if (smi_operands) {
- FloatingPointHelper::LoadSmis(masm, destination, scratch1, scratch2);
- } else {
- // Load right operand to d7 or r2/r3.
- if (right_type == BinaryOpIC::INT32) {
- FloatingPointHelper::LoadNumberAsInt32Double(
- masm, right, destination, d7, d8, r2, r3, heap_number_map,
- scratch1, scratch2, s0, miss);
- } else {
- Label* fail = (right_type == BinaryOpIC::NUMBER) ? miss : not_numbers;
- FloatingPointHelper::LoadNumber(
- masm, destination, right, d7, r2, r3, heap_number_map,
- scratch1, scratch2, fail);
- }
- // Load left operand to d6 or r0/r1. This keeps r0/r1 intact if it
- // jumps to |miss|.
- if (left_type == BinaryOpIC::INT32) {
- FloatingPointHelper::LoadNumberAsInt32Double(
- masm, left, destination, d6, d8, r0, r1, heap_number_map,
- scratch1, scratch2, s0, miss);
- } else {
- Label* fail = (left_type == BinaryOpIC::NUMBER) ? miss : not_numbers;
- FloatingPointHelper::LoadNumber(
- masm, destination, left, d6, r0, r1, heap_number_map,
- scratch1, scratch2, fail);
- }
- }
-
- // Calculate the result.
- if (destination == FloatingPointHelper::kVFPRegisters) {
- // Using VFP registers:
- // d6: Left value
- // d7: Right value
- CpuFeatures::Scope scope(VFP2);
- switch (op) {
- case Token::ADD:
- __ vadd(d5, d6, d7);
- break;
- case Token::SUB:
- __ vsub(d5, d6, d7);
- break;
- case Token::MUL:
- __ vmul(d5, d6, d7);
- break;
- case Token::DIV:
- __ vdiv(d5, d6, d7);
- break;
- default:
- UNREACHABLE();
- }
-
- __ sub(r0, result, Operand(kHeapObjectTag));
- __ vstr(d5, r0, HeapNumber::kValueOffset);
- __ add(r0, r0, Operand(kHeapObjectTag));
- __ Ret();
- } else {
- // Call the C function to handle the double operation.
- FloatingPointHelper::CallCCodeForDoubleOperation(masm,
- op,
- result,
- scratch1);
- if (FLAG_debug_code) {
- __ stop("Unreachable code.");
- }
- }
- break;
- }
- case Token::BIT_OR:
- case Token::BIT_XOR:
- case Token::BIT_AND:
- case Token::SAR:
- case Token::SHR:
- case Token::SHL: {
- if (smi_operands) {
- __ SmiUntag(r3, left);
- __ SmiUntag(r2, right);
- } else {
- // Convert operands to 32-bit integers. Right in r2 and left in r3.
- FloatingPointHelper::ConvertNumberToInt32(masm,
- left,
- r3,
- heap_number_map,
- scratch1,
- scratch2,
- scratch3,
- d0,
- not_numbers);
- FloatingPointHelper::ConvertNumberToInt32(masm,
- right,
- r2,
- heap_number_map,
- scratch1,
- scratch2,
- scratch3,
- d0,
- not_numbers);
- }
-
- Label result_not_a_smi;
- switch (op) {
- case Token::BIT_OR:
- __ orr(r2, r3, Operand(r2));
- break;
- case Token::BIT_XOR:
- __ eor(r2, r3, Operand(r2));
- break;
- case Token::BIT_AND:
- __ and_(r2, r3, Operand(r2));
- break;
- case Token::SAR:
- // Use only the 5 least significant bits of the shift count.
- __ GetLeastBitsFromInt32(r2, r2, 5);
- __ mov(r2, Operand(r3, ASR, r2));
- break;
- case Token::SHR:
- // Use only the 5 least significant bits of the shift count.
- __ GetLeastBitsFromInt32(r2, r2, 5);
- __ mov(r2, Operand(r3, LSR, r2), SetCC);
- // SHR is special because it is required to produce a positive answer.
- // The code below for writing into heap numbers isn't capable of
- // writing the register as an unsigned int so we go to slow case if we
- // hit this case.
- if (CpuFeatures::IsSupported(VFP2)) {
- __ b(mi, &result_not_a_smi);
- } else {
- __ b(mi, not_numbers);
- }
- break;
- case Token::SHL:
- // Use only the 5 least significant bits of the shift count.
- __ GetLeastBitsFromInt32(r2, r2, 5);
- __ mov(r2, Operand(r3, LSL, r2));
- break;
- default:
- UNREACHABLE();
- }
-
- // Check that the *signed* result fits in a smi.
- __ add(r3, r2, Operand(0x40000000), SetCC);
- __ b(mi, &result_not_a_smi);
- __ SmiTag(r0, r2);
- __ Ret();
-
- // Allocate new heap number for result.
- __ bind(&result_not_a_smi);
- Register result = r5;
- if (smi_operands) {
- __ AllocateHeapNumber(
- result, scratch1, scratch2, heap_number_map, gc_required);
- } else {
- BinaryOpStub_GenerateHeapResultAllocation(
- masm, result, heap_number_map, scratch1, scratch2, gc_required,
- mode);
- }
-
- // r2: Answer as signed int32.
- // r5: Heap number to write answer into.
-
- // Nothing can go wrong now, so move the heap number to r0, which is the
- // result.
- __ mov(r0, Operand(r5));
-
- if (CpuFeatures::IsSupported(VFP2)) {
- // Convert the int32 in r2 to the heap number in r0. r3 is corrupted. As
- // mentioned above SHR needs to always produce a positive result.
- CpuFeatures::Scope scope(VFP2);
- __ vmov(s0, r2);
- if (op == Token::SHR) {
- __ vcvt_f64_u32(d0, s0);
- } else {
- __ vcvt_f64_s32(d0, s0);
- }
- __ sub(r3, r0, Operand(kHeapObjectTag));
- __ vstr(d0, r3, HeapNumber::kValueOffset);
- __ Ret();
- } else {
- // Tail call that writes the int32 in r2 to the heap number in r0, using
- // r3 as scratch. r0 is preserved and returned.
- WriteInt32ToHeapNumberStub stub(r2, r0, r3);
- __ TailCallStub(&stub);
- }
- break;
- }
- default:
- UNREACHABLE();
- }
-}
-
-
-// Generate the smi code. If the operation on smis are successful this return is
-// generated. If the result is not a smi and heap number allocation is not
-// requested the code falls through. If number allocation is requested but a
-// heap number cannot be allocated the code jumps to the label gc_required.
-void BinaryOpStub_GenerateSmiCode(
- MacroAssembler* masm,
- Label* use_runtime,
- Label* gc_required,
- Token::Value op,
- BinaryOpStub::SmiCodeGenerateHeapNumberResults allow_heapnumber_results,
- OverwriteMode mode) {
- Label not_smis;
-
- Register left = r1;
- Register right = r0;
- Register scratch1 = r7;
-
- // Perform combined smi check on both operands.
- __ orr(scratch1, left, Operand(right));
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfNotSmi(scratch1, &not_smis);
-
- // If the smi-smi operation results in a smi return is generated.
- BinaryOpStub_GenerateSmiSmiOperation(masm, op);
-
- // If heap number results are possible generate the result in an allocated
- // heap number.
- if (allow_heapnumber_results == BinaryOpStub::ALLOW_HEAPNUMBER_RESULTS) {
- BinaryOpStub_GenerateFPOperation(
- masm, BinaryOpIC::UNINITIALIZED, BinaryOpIC::UNINITIALIZED, true,
- use_runtime, gc_required, &not_smis, op, mode);
- }
- __ bind(&not_smis);
-}
-
-
-void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
- Label not_smis, call_runtime;
-
- if (result_type_ == BinaryOpIC::UNINITIALIZED ||
- result_type_ == BinaryOpIC::SMI) {
- // Only allow smi results.
- BinaryOpStub_GenerateSmiCode(
- masm, &call_runtime, NULL, op_, NO_HEAPNUMBER_RESULTS, mode_);
- } else {
- // Allow heap number result and don't make a transition if a heap number
- // cannot be allocated.
- BinaryOpStub_GenerateSmiCode(
- masm, &call_runtime, &call_runtime, op_, ALLOW_HEAPNUMBER_RESULTS,
- mode_);
- }
-
- // Code falls through if the result is not returned as either a smi or heap
- // number.
- GenerateTypeTransition(masm);
-
- __ bind(&call_runtime);
- GenerateRegisterArgsPush(masm);
- GenerateCallRuntime(masm);
-}
-
-
-void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
- Label call_runtime;
- ASSERT(left_type_ == BinaryOpIC::STRING && right_type_ == BinaryOpIC::STRING);
- ASSERT(op_ == Token::ADD);
- // If both arguments are strings, call the string add stub.
- // Otherwise, do a transition.
-
- // Registers containing left and right operands respectively.
- Register left = r1;
- Register right = r0;
-
- // Test if left operand is a string.
- __ JumpIfSmi(left, &call_runtime);
- __ CompareObjectType(left, r2, r2, FIRST_NONSTRING_TYPE);
- __ b(ge, &call_runtime);
-
- // Test if right operand is a string.
- __ JumpIfSmi(right, &call_runtime);
- __ CompareObjectType(right, r2, r2, FIRST_NONSTRING_TYPE);
- __ b(ge, &call_runtime);
-
- StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
- GenerateRegisterArgsPush(masm);
- __ TailCallStub(&string_add_stub);
-
- __ bind(&call_runtime);
- GenerateTypeTransition(masm);
-}
-
-
-void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
- ASSERT(Max(left_type_, right_type_) == BinaryOpIC::INT32);
-
- Register left = r1;
- Register right = r0;
- Register scratch1 = r7;
- Register scratch2 = r9;
- DwVfpRegister double_scratch = d0;
-
- Register heap_number_result = no_reg;
- Register heap_number_map = r6;
- __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
-
- Label call_runtime;
- // Labels for type transition, used for wrong input or output types.
- // Both label are currently actually bound to the same position. We use two
- // different label to differentiate the cause leading to type transition.
- Label transition;
-
- // Smi-smi fast case.
- Label skip;
- __ orr(scratch1, left, right);
- __ JumpIfNotSmi(scratch1, &skip);
- BinaryOpStub_GenerateSmiSmiOperation(masm, op_);
- // Fall through if the result is not a smi.
- __ bind(&skip);
-
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- case Token::MOD: {
- // It could be that only SMIs have been seen at either the left
- // or the right operand. For precise type feedback, patch the IC
- // again if this changes.
- if (left_type_ == BinaryOpIC::SMI) {
- __ JumpIfNotSmi(left, &transition);
- }
- if (right_type_ == BinaryOpIC::SMI) {
- __ JumpIfNotSmi(right, &transition);
- }
- // Load both operands and check that they are 32-bit integer.
- // Jump to type transition if they are not. The registers r0 and r1 (right
- // and left) are preserved for the runtime call.
- FloatingPointHelper::Destination destination =
- (CpuFeatures::IsSupported(VFP2) && op_ != Token::MOD)
- ? FloatingPointHelper::kVFPRegisters
- : FloatingPointHelper::kCoreRegisters;
-
- FloatingPointHelper::LoadNumberAsInt32Double(masm,
- right,
- destination,
- d7,
- d8,
- r2,
- r3,
- heap_number_map,
- scratch1,
- scratch2,
- s0,
- &transition);
- FloatingPointHelper::LoadNumberAsInt32Double(masm,
- left,
- destination,
- d6,
- d8,
- r4,
- r5,
- heap_number_map,
- scratch1,
- scratch2,
- s0,
- &transition);
-
- if (destination == FloatingPointHelper::kVFPRegisters) {
- CpuFeatures::Scope scope(VFP2);
- Label return_heap_number;
- switch (op_) {
- case Token::ADD:
- __ vadd(d5, d6, d7);
- break;
- case Token::SUB:
- __ vsub(d5, d6, d7);
- break;
- case Token::MUL:
- __ vmul(d5, d6, d7);
- break;
- case Token::DIV:
- __ vdiv(d5, d6, d7);
- break;
- default:
- UNREACHABLE();
- }
-
- if (op_ != Token::DIV) {
- // These operations produce an integer result.
- // Try to return a smi if we can.
- // Otherwise return a heap number if allowed, or jump to type
- // transition.
-
- __ EmitVFPTruncate(kRoundToZero,
- scratch1,
- d5,
- scratch2,
- d8);
-
- if (result_type_ <= BinaryOpIC::INT32) {
- // If the ne condition is set, result does
- // not fit in a 32-bit integer.
- __ b(ne, &transition);
- }
-
- // Check if the result fits in a smi.
- __ add(scratch2, scratch1, Operand(0x40000000), SetCC);
- // If not try to return a heap number.
- __ b(mi, &return_heap_number);
- // Check for minus zero. Return heap number for minus zero.
- Label not_zero;
- __ cmp(scratch1, Operand::Zero());
- __ b(ne, &not_zero);
- __ vmov(scratch2, d5.high());
- __ tst(scratch2, Operand(HeapNumber::kSignMask));
- __ b(ne, &return_heap_number);
- __ bind(&not_zero);
-
- // Tag the result and return.
- __ SmiTag(r0, scratch1);
- __ Ret();
- } else {
- // DIV just falls through to allocating a heap number.
- }
-
- __ bind(&return_heap_number);
- // Return a heap number, or fall through to type transition or runtime
- // call if we can't.
- if (result_type_ >= ((op_ == Token::DIV) ? BinaryOpIC::NUMBER
- : BinaryOpIC::INT32)) {
- // We are using vfp registers so r5 is available.
- heap_number_result = r5;
- BinaryOpStub_GenerateHeapResultAllocation(masm,
- heap_number_result,
- heap_number_map,
- scratch1,
- scratch2,
- &call_runtime,
- mode_);
- __ sub(r0, heap_number_result, Operand(kHeapObjectTag));
- __ vstr(d5, r0, HeapNumber::kValueOffset);
- __ mov(r0, heap_number_result);
- __ Ret();
- }
-
- // A DIV operation expecting an integer result falls through
- // to type transition.
-
- } else {
- // We preserved r0 and r1 to be able to call runtime.
- // Save the left value on the stack.
- __ Push(r5, r4);
-
- Label pop_and_call_runtime;
-
- // Allocate a heap number to store the result.
- heap_number_result = r5;
- BinaryOpStub_GenerateHeapResultAllocation(masm,
- heap_number_result,
- heap_number_map,
- scratch1,
- scratch2,
- &pop_and_call_runtime,
- mode_);
-
- // Load the left value from the value saved on the stack.
- __ Pop(r1, r0);
-
- // Call the C function to handle the double operation.
- FloatingPointHelper::CallCCodeForDoubleOperation(
- masm, op_, heap_number_result, scratch1);
- if (FLAG_debug_code) {
- __ stop("Unreachable code.");
- }
-
- __ bind(&pop_and_call_runtime);
- __ Drop(2);
- __ b(&call_runtime);
- }
-
- break;
- }
-
- case Token::BIT_OR:
- case Token::BIT_XOR:
- case Token::BIT_AND:
- case Token::SAR:
- case Token::SHR:
- case Token::SHL: {
- Label return_heap_number;
- Register scratch3 = r5;
- // Convert operands to 32-bit integers. Right in r2 and left in r3. The
- // registers r0 and r1 (right and left) are preserved for the runtime
- // call.
- FloatingPointHelper::LoadNumberAsInt32(masm,
- left,
- r3,
- heap_number_map,
- scratch1,
- scratch2,
- scratch3,
- d0,
- d1,
- &transition);
- FloatingPointHelper::LoadNumberAsInt32(masm,
- right,
- r2,
- heap_number_map,
- scratch1,
- scratch2,
- scratch3,
- d0,
- d1,
- &transition);
-
- // The ECMA-262 standard specifies that, for shift operations, only the
- // 5 least significant bits of the shift value should be used.
- switch (op_) {
- case Token::BIT_OR:
- __ orr(r2, r3, Operand(r2));
- break;
- case Token::BIT_XOR:
- __ eor(r2, r3, Operand(r2));
- break;
- case Token::BIT_AND:
- __ and_(r2, r3, Operand(r2));
- break;
- case Token::SAR:
- __ and_(r2, r2, Operand(0x1f));
- __ mov(r2, Operand(r3, ASR, r2));
- break;
- case Token::SHR:
- __ and_(r2, r2, Operand(0x1f));
- __ mov(r2, Operand(r3, LSR, r2), SetCC);
- // SHR is special because it is required to produce a positive answer.
- // We only get a negative result if the shift value (r2) is 0.
- // This result cannot be respresented as a signed 32-bit integer, try
- // to return a heap number if we can.
- // The non vfp2 code does not support this special case, so jump to
- // runtime if we don't support it.
- if (CpuFeatures::IsSupported(VFP2)) {
- __ b(mi, (result_type_ <= BinaryOpIC::INT32)
- ? &transition
- : &return_heap_number);
- } else {
- __ b(mi, (result_type_ <= BinaryOpIC::INT32)
- ? &transition
- : &call_runtime);
- }
- break;
- case Token::SHL:
- __ and_(r2, r2, Operand(0x1f));
- __ mov(r2, Operand(r3, LSL, r2));
- break;
- default:
- UNREACHABLE();
- }
-
- // Check if the result fits in a smi.
- __ add(scratch1, r2, Operand(0x40000000), SetCC);
- // If not try to return a heap number. (We know the result is an int32.)
- __ b(mi, &return_heap_number);
- // Tag the result and return.
- __ SmiTag(r0, r2);
- __ Ret();
-
- __ bind(&return_heap_number);
- heap_number_result = r5;
- BinaryOpStub_GenerateHeapResultAllocation(masm,
- heap_number_result,
- heap_number_map,
- scratch1,
- scratch2,
- &call_runtime,
- mode_);
-
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatures::Scope scope(VFP2);
- if (op_ != Token::SHR) {
- // Convert the result to a floating point value.
- __ vmov(double_scratch.low(), r2);
- __ vcvt_f64_s32(double_scratch, double_scratch.low());
- } else {
- // The result must be interpreted as an unsigned 32-bit integer.
- __ vmov(double_scratch.low(), r2);
- __ vcvt_f64_u32(double_scratch, double_scratch.low());
- }
-
- // Store the result.
- __ sub(r0, heap_number_result, Operand(kHeapObjectTag));
- __ vstr(double_scratch, r0, HeapNumber::kValueOffset);
- __ mov(r0, heap_number_result);
- __ Ret();
- } else {
- // Tail call that writes the int32 in r2 to the heap number in r0, using
- // r3 as scratch. r0 is preserved and returned.
- __ mov(r0, r5);
- WriteInt32ToHeapNumberStub stub(r2, r0, r3);
- __ TailCallStub(&stub);
- }
-
- break;
- }
-
- default:
- UNREACHABLE();
- }
-
- // We never expect DIV to yield an integer result, so we always generate
- // type transition code for DIV operations expecting an integer result: the
- // code will fall through to this type transition.
- if (transition.is_linked() ||
- ((op_ == Token::DIV) && (result_type_ <= BinaryOpIC::INT32))) {
- __ bind(&transition);
- GenerateTypeTransition(masm);
- }
-
- __ bind(&call_runtime);
- GenerateRegisterArgsPush(masm);
- GenerateCallRuntime(masm);
-}
-
-
-void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
- Label call_runtime;
-
- if (op_ == Token::ADD) {
- // Handle string addition here, because it is the only operation
- // that does not do a ToNumber conversion on the operands.
- GenerateAddStrings(masm);
- }
-
- // Convert oddball arguments to numbers.
- Label check, done;
- __ CompareRoot(r1, Heap::kUndefinedValueRootIndex);
- __ b(ne, &check);
- if (Token::IsBitOp(op_)) {
- __ mov(r1, Operand(Smi::FromInt(0)));
- } else {
- __ LoadRoot(r1, Heap::kNanValueRootIndex);
- }
- __ jmp(&done);
- __ bind(&check);
- __ CompareRoot(r0, Heap::kUndefinedValueRootIndex);
- __ b(ne, &done);
- if (Token::IsBitOp(op_)) {
- __ mov(r0, Operand(Smi::FromInt(0)));
- } else {
- __ LoadRoot(r0, Heap::kNanValueRootIndex);
- }
- __ bind(&done);
-
- GenerateNumberStub(masm);
-}
-
-
-void BinaryOpStub::GenerateNumberStub(MacroAssembler* masm) {
- Label call_runtime, transition;
- BinaryOpStub_GenerateFPOperation(
- masm, left_type_, right_type_, false,
- &transition, &call_runtime, &transition, op_, mode_);
-
- __ bind(&transition);
- GenerateTypeTransition(masm);
-
- __ bind(&call_runtime);
- GenerateRegisterArgsPush(masm);
- GenerateCallRuntime(masm);
-}
-
-
-void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
- Label call_runtime, call_string_add_or_runtime, transition;
-
- BinaryOpStub_GenerateSmiCode(
- masm, &call_runtime, &call_runtime, op_, ALLOW_HEAPNUMBER_RESULTS, mode_);
-
- BinaryOpStub_GenerateFPOperation(
- masm, left_type_, right_type_, false,
- &call_string_add_or_runtime, &call_runtime, &transition, op_, mode_);
-
- __ bind(&transition);
- GenerateTypeTransition(masm);
-
- __ bind(&call_string_add_or_runtime);
- if (op_ == Token::ADD) {
- GenerateAddStrings(masm);
- }
-
- __ bind(&call_runtime);
- GenerateRegisterArgsPush(masm);
- GenerateCallRuntime(masm);
-}
-
-
-void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
- ASSERT(op_ == Token::ADD);
- Label left_not_string, call_runtime;
-
- Register left = r1;
- Register right = r0;
-
- // Check if left argument is a string.
- __ JumpIfSmi(left, &left_not_string);
- __ CompareObjectType(left, r2, r2, FIRST_NONSTRING_TYPE);
- __ b(ge, &left_not_string);
-
- StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB);
- GenerateRegisterArgsPush(masm);
- __ TailCallStub(&string_add_left_stub);
-
- // Left operand is not a string, test right.
- __ bind(&left_not_string);
- __ JumpIfSmi(right, &call_runtime);
- __ CompareObjectType(right, r2, r2, FIRST_NONSTRING_TYPE);
- __ b(ge, &call_runtime);
-
- StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB);
- GenerateRegisterArgsPush(masm);
- __ TailCallStub(&string_add_right_stub);
-
- // At least one argument is not a string.
- __ bind(&call_runtime);
-}
-
-
-void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm,
- Register result,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Label* gc_required,
- OverwriteMode mode) {
- // Code below will scratch result if allocation fails. To keep both arguments
- // intact for the runtime call result cannot be one of these.
- ASSERT(!result.is(r0) && !result.is(r1));
-
- if (mode == OVERWRITE_LEFT || mode == OVERWRITE_RIGHT) {
- Label skip_allocation, allocated;
- Register overwritable_operand = mode == OVERWRITE_LEFT ? r1 : r0;
- // If the overwritable operand is already an object, we skip the
- // allocation of a heap number.
- __ JumpIfNotSmi(overwritable_operand, &skip_allocation);
- // Allocate a heap number for the result.
- __ AllocateHeapNumber(
- result, scratch1, scratch2, heap_number_map, gc_required);
- __ b(&allocated);
- __ bind(&skip_allocation);
- // Use object holding the overwritable operand for result.
- __ mov(result, Operand(overwritable_operand));
- __ bind(&allocated);
- } else {
- ASSERT(mode == NO_OVERWRITE);
- __ AllocateHeapNumber(
- result, scratch1, scratch2, heap_number_map, gc_required);
- }
-}
-
-
-void BinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
- __ Push(r1, r0);
-}
-
-
-void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
- // Untagged case: double input in d2, double result goes
- // into d2.
- // Tagged case: tagged input on top of stack and in r0,
- // tagged result (heap number) goes into r0.
-
- Label input_not_smi;
- Label loaded;
- Label calculate;
- Label invalid_cache;
- const Register scratch0 = r9;
- const Register scratch1 = r7;
- const Register cache_entry = r0;
- const bool tagged = (argument_type_ == TAGGED);
-
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatures::Scope scope(VFP2);
- if (tagged) {
- // Argument is a number and is on stack and in r0.
- // Load argument and check if it is a smi.
- __ JumpIfNotSmi(r0, &input_not_smi);
-
- // Input is a smi. Convert to double and load the low and high words
- // of the double into r2, r3.
- __ IntegerToDoubleConversionWithVFP3(r0, r3, r2);
- __ b(&loaded);
-
- __ bind(&input_not_smi);
- // Check if input is a HeapNumber.
- __ CheckMap(r0,
- r1,
- Heap::kHeapNumberMapRootIndex,
- &calculate,
- DONT_DO_SMI_CHECK);
- // Input is a HeapNumber. Load it to a double register and store the
- // low and high words into r2, r3.
- __ vldr(d0, FieldMemOperand(r0, HeapNumber::kValueOffset));
- __ vmov(r2, r3, d0);
- } else {
- // Input is untagged double in d2. Output goes to d2.
- __ vmov(r2, r3, d2);
- }
- __ bind(&loaded);
- // r2 = low 32 bits of double value
- // r3 = high 32 bits of double value
- // Compute hash (the shifts are arithmetic):
- // h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1);
- __ eor(r1, r2, Operand(r3));
- __ eor(r1, r1, Operand(r1, ASR, 16));
- __ eor(r1, r1, Operand(r1, ASR, 8));
- ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize));
- __ And(r1, r1, Operand(TranscendentalCache::SubCache::kCacheSize - 1));
-
- // r2 = low 32 bits of double value.
- // r3 = high 32 bits of double value.
- // r1 = TranscendentalCache::hash(double value).
- Isolate* isolate = masm->isolate();
- ExternalReference cache_array =
- ExternalReference::transcendental_cache_array_address(isolate);
- __ mov(cache_entry, Operand(cache_array));
- // cache_entry points to cache array.
- int cache_array_index
- = type_ * sizeof(isolate->transcendental_cache()->caches_[0]);
- __ ldr(cache_entry, MemOperand(cache_entry, cache_array_index));
- // r0 points to the cache for the type type_.
- // If NULL, the cache hasn't been initialized yet, so go through runtime.
- __ cmp(cache_entry, Operand::Zero());
- __ b(eq, &invalid_cache);
-
-#ifdef DEBUG
- // Check that the layout of cache elements match expectations.
- { TranscendentalCache::SubCache::Element test_elem[2];
- char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
- char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
- char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0]));
- char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1]));
- char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output));
- CHECK_EQ(12, elem2_start - elem_start); // Two uint_32's and a pointer.
- CHECK_EQ(0, elem_in0 - elem_start);
- CHECK_EQ(kIntSize, elem_in1 - elem_start);
- CHECK_EQ(2 * kIntSize, elem_out - elem_start);
- }
-#endif
-
- // Find the address of the r1'st entry in the cache, i.e., &r0[r1*12].
- __ add(r1, r1, Operand(r1, LSL, 1));
- __ add(cache_entry, cache_entry, Operand(r1, LSL, 2));
- // Check if cache matches: Double value is stored in uint32_t[2] array.
- __ ldm(ia, cache_entry, r4.bit() | r5.bit() | r6.bit());
- __ cmp(r2, r4);
- __ cmp(r3, r5, eq);
- __ b(ne, &calculate);
- // Cache hit. Load result, cleanup and return.
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(
- counters->transcendental_cache_hit(), 1, scratch0, scratch1);
- if (tagged) {
- // Pop input value from stack and load result into r0.
- __ pop();
- __ mov(r0, Operand(r6));
- } else {
- // Load result into d2.
- __ vldr(d2, FieldMemOperand(r6, HeapNumber::kValueOffset));
- }
- __ Ret();
- } // if (CpuFeatures::IsSupported(VFP3))
-
- __ bind(&calculate);
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(
- counters->transcendental_cache_miss(), 1, scratch0, scratch1);
- if (tagged) {
- __ bind(&invalid_cache);
- ExternalReference runtime_function =
- ExternalReference(RuntimeFunction(), masm->isolate());
- __ TailCallExternalReference(runtime_function, 1, 1);
- } else {
- ASSERT(CpuFeatures::IsSupported(VFP2));
- CpuFeatures::Scope scope(VFP2);
-
- Label no_update;
- Label skip_cache;
-
- // Call C function to calculate the result and update the cache.
- // r0: precalculated cache entry address.
- // r2 and r3: parts of the double value.
- // Store r0, r2 and r3 on stack for later before calling C function.
- __ Push(r3, r2, cache_entry);
- GenerateCallCFunction(masm, scratch0);
- __ GetCFunctionDoubleResult(d2);
-
- // Try to update the cache. If we cannot allocate a
- // heap number, we return the result without updating.
- __ Pop(r3, r2, cache_entry);
- __ LoadRoot(r5, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(r6, scratch0, scratch1, r5, &no_update);
- __ vstr(d2, FieldMemOperand(r6, HeapNumber::kValueOffset));
- __ stm(ia, cache_entry, r2.bit() | r3.bit() | r6.bit());
- __ Ret();
-
- __ bind(&invalid_cache);
- // The cache is invalid. Call runtime which will recreate the
- // cache.
- __ LoadRoot(r5, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(r0, scratch0, scratch1, r5, &skip_cache);
- __ vstr(d2, FieldMemOperand(r0, HeapNumber::kValueOffset));
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(r0);
- __ CallRuntime(RuntimeFunction(), 1);
- }
- __ vldr(d2, FieldMemOperand(r0, HeapNumber::kValueOffset));
- __ Ret();
-
- __ bind(&skip_cache);
- // Call C function to calculate the result and answer directly
- // without updating the cache.
- GenerateCallCFunction(masm, scratch0);
- __ GetCFunctionDoubleResult(d2);
- __ bind(&no_update);
-
- // We return the value in d2 without adding it to the cache, but
- // we cause a scavenging GC so that future allocations will succeed.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Allocate an aligned object larger than a HeapNumber.
- ASSERT(4 * kPointerSize >= HeapNumber::kSize);
- __ mov(scratch0, Operand(4 * kPointerSize));
- __ push(scratch0);
- __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
- }
- __ Ret();
- }
-}
-
-
-void TranscendentalCacheStub::GenerateCallCFunction(MacroAssembler* masm,
- Register scratch) {
- ASSERT(CpuFeatures::IsEnabled(VFP2));
- Isolate* isolate = masm->isolate();
-
- __ push(lr);
- __ PrepareCallCFunction(0, 1, scratch);
- if (masm->use_eabi_hardfloat()) {
- __ vmov(d0, d2);
- } else {
- __ vmov(r0, r1, d2);
- }
- AllowExternalCallThatCantCauseGC scope(masm);
- switch (type_) {
- case TranscendentalCache::SIN:
- __ CallCFunction(ExternalReference::math_sin_double_function(isolate),
- 0, 1);
- break;
- case TranscendentalCache::COS:
- __ CallCFunction(ExternalReference::math_cos_double_function(isolate),
- 0, 1);
- break;
- case TranscendentalCache::TAN:
- __ CallCFunction(ExternalReference::math_tan_double_function(isolate),
- 0, 1);
- break;
- case TranscendentalCache::LOG:
- __ CallCFunction(ExternalReference::math_log_double_function(isolate),
- 0, 1);
- break;
- default:
- UNIMPLEMENTED();
- break;
- }
- __ pop(lr);
-}
-
-
-Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
- switch (type_) {
- // Add more cases when necessary.
- case TranscendentalCache::SIN: return Runtime::kMath_sin;
- case TranscendentalCache::COS: return Runtime::kMath_cos;
- case TranscendentalCache::TAN: return Runtime::kMath_tan;
- case TranscendentalCache::LOG: return Runtime::kMath_log;
- default:
- UNIMPLEMENTED();
- return Runtime::kAbort;
- }
-}
-
-
-void StackCheckStub::Generate(MacroAssembler* masm) {
- __ TailCallRuntime(Runtime::kStackGuard, 0, 1);
-}
-
-
-void InterruptStub::Generate(MacroAssembler* masm) {
- __ TailCallRuntime(Runtime::kInterrupt, 0, 1);
-}
-
-
-void MathPowStub::Generate(MacroAssembler* masm) {
- CpuFeatures::Scope vfp2_scope(VFP2);
- const Register base = r1;
- const Register exponent = r2;
- const Register heapnumbermap = r5;
- const Register heapnumber = r0;
- const DwVfpRegister double_base = d1;
- const DwVfpRegister double_exponent = d2;
- const DwVfpRegister double_result = d3;
- const DwVfpRegister double_scratch = d0;
- const SwVfpRegister single_scratch = s0;
- const Register scratch = r9;
- const Register scratch2 = r7;
-
- Label call_runtime, done, int_exponent;
- if (exponent_type_ == ON_STACK) {
- Label base_is_smi, unpack_exponent;
- // The exponent and base are supplied as arguments on the stack.
- // This can only happen if the stub is called from non-optimized code.
- // Load input parameters from stack to double registers.
- __ ldr(base, MemOperand(sp, 1 * kPointerSize));
- __ ldr(exponent, MemOperand(sp, 0 * kPointerSize));
-
- __ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex);
-
- __ UntagAndJumpIfSmi(scratch, base, &base_is_smi);
- __ ldr(scratch, FieldMemOperand(base, JSObject::kMapOffset));
- __ cmp(scratch, heapnumbermap);
- __ b(ne, &call_runtime);
-
- __ vldr(double_base, FieldMemOperand(base, HeapNumber::kValueOffset));
- __ jmp(&unpack_exponent);
-
- __ bind(&base_is_smi);
- __ vmov(single_scratch, scratch);
- __ vcvt_f64_s32(double_base, single_scratch);
- __ bind(&unpack_exponent);
-
- __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
-
- __ ldr(scratch, FieldMemOperand(exponent, JSObject::kMapOffset));
- __ cmp(scratch, heapnumbermap);
- __ b(ne, &call_runtime);
- __ vldr(double_exponent,
- FieldMemOperand(exponent, HeapNumber::kValueOffset));
- } else if (exponent_type_ == TAGGED) {
- // Base is already in double_base.
- __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
-
- __ vldr(double_exponent,
- FieldMemOperand(exponent, HeapNumber::kValueOffset));
- }
-
- if (exponent_type_ != INTEGER) {
- Label int_exponent_convert;
- // Detect integer exponents stored as double.
- __ vcvt_u32_f64(single_scratch, double_exponent);
- // We do not check for NaN or Infinity here because comparing numbers on
- // ARM correctly distinguishes NaNs. We end up calling the built-in.
- __ vcvt_f64_u32(double_scratch, single_scratch);
- __ VFPCompareAndSetFlags(double_scratch, double_exponent);
- __ b(eq, &int_exponent_convert);
-
- if (exponent_type_ == ON_STACK) {
- // Detect square root case. Crankshaft detects constant +/-0.5 at
- // compile time and uses DoMathPowHalf instead. We then skip this check
- // for non-constant cases of +/-0.5 as these hardly occur.
- Label not_plus_half;
-
- // Test for 0.5.
- __ vmov(double_scratch, 0.5, scratch);
- __ VFPCompareAndSetFlags(double_exponent, double_scratch);
- __ b(ne, &not_plus_half);
-
- // Calculates square root of base. Check for the special case of
- // Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
- __ vmov(double_scratch, -V8_INFINITY, scratch);
- __ VFPCompareAndSetFlags(double_base, double_scratch);
- __ vneg(double_result, double_scratch, eq);
- __ b(eq, &done);
-
- // Add +0 to convert -0 to +0.
- __ vadd(double_scratch, double_base, kDoubleRegZero);
- __ vsqrt(double_result, double_scratch);
- __ jmp(&done);
-
- __ bind(&not_plus_half);
- __ vmov(double_scratch, -0.5, scratch);
- __ VFPCompareAndSetFlags(double_exponent, double_scratch);
- __ b(ne, &call_runtime);
-
- // Calculates square root of base. Check for the special case of
- // Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
- __ vmov(double_scratch, -V8_INFINITY, scratch);
- __ VFPCompareAndSetFlags(double_base, double_scratch);
- __ vmov(double_result, kDoubleRegZero, eq);
- __ b(eq, &done);
-
- // Add +0 to convert -0 to +0.
- __ vadd(double_scratch, double_base, kDoubleRegZero);
- __ vmov(double_result, 1.0, scratch);
- __ vsqrt(double_scratch, double_scratch);
- __ vdiv(double_result, double_result, double_scratch);
- __ jmp(&done);
- }
-
- __ push(lr);
- {
- AllowExternalCallThatCantCauseGC scope(masm);
- __ PrepareCallCFunction(0, 2, scratch);
- __ SetCallCDoubleArguments(double_base, double_exponent);
- __ CallCFunction(
- ExternalReference::power_double_double_function(masm->isolate()),
- 0, 2);
- }
- __ pop(lr);
- __ GetCFunctionDoubleResult(double_result);
- __ jmp(&done);
-
- __ bind(&int_exponent_convert);
- __ vcvt_u32_f64(single_scratch, double_exponent);
- __ vmov(scratch, single_scratch);
- }
-
- // Calculate power with integer exponent.
- __ bind(&int_exponent);
-
- // Get two copies of exponent in the registers scratch and exponent.
- if (exponent_type_ == INTEGER) {
- __ mov(scratch, exponent);
- } else {
- // Exponent has previously been stored into scratch as untagged integer.
- __ mov(exponent, scratch);
- }
- __ vmov(double_scratch, double_base); // Back up base.
- __ vmov(double_result, 1.0, scratch2);
-
- // Get absolute value of exponent.
- __ cmp(scratch, Operand::Zero());
- __ mov(scratch2, Operand::Zero(), LeaveCC, mi);
- __ sub(scratch, scratch2, scratch, LeaveCC, mi);
-
- Label while_true;
- __ bind(&while_true);
- __ mov(scratch, Operand(scratch, ASR, 1), SetCC);
- __ vmul(double_result, double_result, double_scratch, cs);
- __ vmul(double_scratch, double_scratch, double_scratch, ne);
- __ b(ne, &while_true);
-
- __ cmp(exponent, Operand::Zero());
- __ b(ge, &done);
- __ vmov(double_scratch, 1.0, scratch);
- __ vdiv(double_result, double_scratch, double_result);
- // Test whether result is zero. Bail out to check for subnormal result.
- // Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
- __ VFPCompareAndSetFlags(double_result, 0.0);
- __ b(ne, &done);
- // double_exponent may not containe the exponent value if the input was a
- // smi. We set it with exponent value before bailing out.
- __ vmov(single_scratch, exponent);
- __ vcvt_f64_s32(double_exponent, single_scratch);
-
- // Returning or bailing out.
- Counters* counters = masm->isolate()->counters();
- if (exponent_type_ == ON_STACK) {
- // The arguments are still on the stack.
- __ bind(&call_runtime);
- __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
-
- // The stub is called from non-optimized code, which expects the result
- // as heap number in exponent.
- __ bind(&done);
- __ AllocateHeapNumber(
- heapnumber, scratch, scratch2, heapnumbermap, &call_runtime);
- __ vstr(double_result,
- FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
- ASSERT(heapnumber.is(r0));
- __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
- __ Ret(2);
- } else {
- __ push(lr);
- {
- AllowExternalCallThatCantCauseGC scope(masm);
- __ PrepareCallCFunction(0, 2, scratch);
- __ SetCallCDoubleArguments(double_base, double_exponent);
- __ CallCFunction(
- ExternalReference::power_double_double_function(masm->isolate()),
- 0, 2);
- }
- __ pop(lr);
- __ GetCFunctionDoubleResult(double_result);
-
- __ bind(&done);
- __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
- __ Ret();
- }
-}
-
-
-bool CEntryStub::NeedsImmovableCode() {
- return true;
-}
-
-
-bool CEntryStub::IsPregenerated() {
- return (!save_doubles_ || ISOLATE->fp_stubs_generated()) &&
- result_size_ == 1;
-}
-
-
-void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
- CEntryStub::GenerateAheadOfTime(isolate);
- WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(isolate);
- StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
- RecordWriteStub::GenerateFixedRegStubsAheadOfTime(isolate);
-}
-
-
-void CodeStub::GenerateFPStubs(Isolate* isolate) {
- SaveFPRegsMode mode = CpuFeatures::IsSupported(VFP2)
- ? kSaveFPRegs
- : kDontSaveFPRegs;
- CEntryStub save_doubles(1, mode);
- StoreBufferOverflowStub stub(mode);
- // These stubs might already be in the snapshot, detect that and don't
- // regenerate, which would lead to code stub initialization state being messed
- // up.
- Code* save_doubles_code = NULL;
- Code* store_buffer_overflow_code = NULL;
- if (!save_doubles.FindCodeInCache(&save_doubles_code, ISOLATE)) {
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatures::Scope scope2(VFP2);
- save_doubles_code = *save_doubles.GetCode(isolate);
- store_buffer_overflow_code = *stub.GetCode(isolate);
- } else {
- save_doubles_code = *save_doubles.GetCode(isolate);
- store_buffer_overflow_code = *stub.GetCode(isolate);
- }
- save_doubles_code->set_is_pregenerated(true);
- store_buffer_overflow_code->set_is_pregenerated(true);
- }
- ISOLATE->set_fp_stubs_generated(true);
-}
-
-
-void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
- CEntryStub stub(1, kDontSaveFPRegs);
- Handle<Code> code = stub.GetCode(isolate);
- code->set_is_pregenerated(true);
-}
-
-
-static void JumpIfOOM(MacroAssembler* masm,
- Register value,
- Register scratch,
- Label* oom_label) {
- STATIC_ASSERT(Failure::OUT_OF_MEMORY_EXCEPTION == 3);
- STATIC_ASSERT(kFailureTag == 3);
- __ and_(scratch, value, Operand(0xf));
- __ cmp(scratch, Operand(0xf));
- __ b(eq, oom_label);
-}
-
-
-void CEntryStub::GenerateCore(MacroAssembler* masm,
- Label* throw_normal_exception,
- Label* throw_termination_exception,
- Label* throw_out_of_memory_exception,
- bool do_gc,
- bool always_allocate) {
- // r0: result parameter for PerformGC, if any
- // r4: number of arguments including receiver (C callee-saved)
- // r5: pointer to builtin function (C callee-saved)
- // r6: pointer to the first argument (C callee-saved)
- Isolate* isolate = masm->isolate();
-
- if (do_gc) {
- // Passing r0.
- __ PrepareCallCFunction(1, 0, r1);
- __ CallCFunction(ExternalReference::perform_gc_function(isolate),
- 1, 0);
- }
-
- ExternalReference scope_depth =
- ExternalReference::heap_always_allocate_scope_depth(isolate);
- if (always_allocate) {
- __ mov(r0, Operand(scope_depth));
- __ ldr(r1, MemOperand(r0));
- __ add(r1, r1, Operand(1));
- __ str(r1, MemOperand(r0));
- }
-
- // Call C built-in.
- // r0 = argc, r1 = argv
- __ mov(r0, Operand(r4));
- __ mov(r1, Operand(r6));
-
-#if defined(V8_HOST_ARCH_ARM)
- int frame_alignment = MacroAssembler::ActivationFrameAlignment();
- int frame_alignment_mask = frame_alignment - 1;
- if (FLAG_debug_code) {
- if (frame_alignment > kPointerSize) {
- Label alignment_as_expected;
- ASSERT(IsPowerOf2(frame_alignment));
- __ tst(sp, Operand(frame_alignment_mask));
- __ b(eq, &alignment_as_expected);
- // Don't use Check here, as it will call Runtime_Abort re-entering here.
- __ stop("Unexpected alignment");
- __ bind(&alignment_as_expected);
- }
- }
-#endif
-
- __ mov(r2, Operand(ExternalReference::isolate_address()));
-
- // To let the GC traverse the return address of the exit frames, we need to
- // know where the return address is. The CEntryStub is unmovable, so
- // we can store the address on the stack to be able to find it again and
- // we never have to restore it, because it will not change.
- // Compute the return address in lr to return to after the jump below. Pc is
- // already at '+ 8' from the current instruction but return is after three
- // instructions so add another 4 to pc to get the return address.
- {
- // Prevent literal pool emission before return address.
- Assembler::BlockConstPoolScope block_const_pool(masm);
- masm->add(lr, pc, Operand(4));
- __ str(lr, MemOperand(sp, 0));
- masm->Jump(r5);
- }
-
- if (always_allocate) {
- // It's okay to clobber r2 and r3 here. Don't mess with r0 and r1
- // though (contain the result).
- __ mov(r2, Operand(scope_depth));
- __ ldr(r3, MemOperand(r2));
- __ sub(r3, r3, Operand(1));
- __ str(r3, MemOperand(r2));
- }
-
- // check for failure result
- Label failure_returned;
- STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
- // Lower 2 bits of r2 are 0 iff r0 has failure tag.
- __ add(r2, r0, Operand(1));
- __ tst(r2, Operand(kFailureTagMask));
- __ b(eq, &failure_returned);
-
- // Exit C frame and return.
- // r0:r1: result
- // sp: stack pointer
- // fp: frame pointer
- // Callee-saved register r4 still holds argc.
- __ LeaveExitFrame(save_doubles_, r4);
- __ mov(pc, lr);
-
- // check if we should retry or throw exception
- Label retry;
- __ bind(&failure_returned);
- STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0);
- __ tst(r0, Operand(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize));
- __ b(eq, &retry);
-
- // Special handling of out of memory exceptions.
- JumpIfOOM(masm, r0, ip, throw_out_of_memory_exception);
-
- // Retrieve the pending exception and clear the variable.
- __ mov(r3, Operand(isolate->factory()->the_hole_value()));
- __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
- isolate)));
- __ ldr(r0, MemOperand(ip));
- __ str(r3, MemOperand(ip));
-
- // Special handling of termination exceptions which are uncatchable
- // by javascript code.
- __ cmp(r0, Operand(isolate->factory()->termination_exception()));
- __ b(eq, throw_termination_exception);
-
- // Handle normal exception.
- __ jmp(throw_normal_exception);
-
- __ bind(&retry); // pass last failure (r0) as parameter (r0) when retrying
-}
-
-
-void CEntryStub::Generate(MacroAssembler* masm) {
- // Called from JavaScript; parameters are on stack as if calling JS function
- // r0: number of arguments including receiver
- // r1: pointer to builtin function
- // fp: frame pointer (restored after C call)
- // sp: stack pointer (restored as callee's sp after C call)
- // cp: current context (C callee-saved)
-
- // Result returned in r0 or r0+r1 by default.
-
- // NOTE: Invocations of builtins may return failure objects
- // instead of a proper result. The builtin entry handles
- // this by performing a garbage collection and retrying the
- // builtin once.
-
- // Compute the argv pointer in a callee-saved register.
- __ add(r6, sp, Operand(r0, LSL, kPointerSizeLog2));
- __ sub(r6, r6, Operand(kPointerSize));
-
- // Enter the exit frame that transitions from JavaScript to C++.
- FrameScope scope(masm, StackFrame::MANUAL);
- __ EnterExitFrame(save_doubles_);
-
- // Set up argc and the builtin function in callee-saved registers.
- __ mov(r4, Operand(r0));
- __ mov(r5, Operand(r1));
-
- // r4: number of arguments (C callee-saved)
- // r5: pointer to builtin function (C callee-saved)
- // r6: pointer to first argument (C callee-saved)
-
- Label throw_normal_exception;
- Label throw_termination_exception;
- Label throw_out_of_memory_exception;
-
- // Call into the runtime system.
- GenerateCore(masm,
- &throw_normal_exception,
- &throw_termination_exception,
- &throw_out_of_memory_exception,
- false,
- false);
-
- // Do space-specific GC and retry runtime call.
- GenerateCore(masm,
- &throw_normal_exception,
- &throw_termination_exception,
- &throw_out_of_memory_exception,
- true,
- false);
-
- // Do full GC and retry runtime call one final time.
- Failure* failure = Failure::InternalError();
- __ mov(r0, Operand(reinterpret_cast<int32_t>(failure)));
- GenerateCore(masm,
- &throw_normal_exception,
- &throw_termination_exception,
- &throw_out_of_memory_exception,
- true,
- true);
-
- __ bind(&throw_out_of_memory_exception);
- // Set external caught exception to false.
- Isolate* isolate = masm->isolate();
- ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress,
- isolate);
- __ mov(r0, Operand(false, RelocInfo::NONE32));
- __ mov(r2, Operand(external_caught));
- __ str(r0, MemOperand(r2));
-
- // Set pending exception and r0 to out of memory exception.
- Label already_have_failure;
- JumpIfOOM(masm, r0, ip, &already_have_failure);
- Failure* out_of_memory = Failure::OutOfMemoryException(0x1);
- __ mov(r0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
- __ bind(&already_have_failure);
- __ mov(r2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
- isolate)));
- __ str(r0, MemOperand(r2));
- // Fall through to the next label.
-
- __ bind(&throw_termination_exception);
- __ ThrowUncatchable(r0);
-
- __ bind(&throw_normal_exception);
- __ Throw(r0);
-}
-
-
-void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
- // r0: code entry
- // r1: function
- // r2: receiver
- // r3: argc
- // [sp+0]: argv
-
- Label invoke, handler_entry, exit;
-
- // Called from C, so do not pop argc and args on exit (preserve sp)
- // No need to save register-passed args
- // Save callee-saved registers (incl. cp and fp), sp, and lr
- __ stm(db_w, sp, kCalleeSaved | lr.bit());
-
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatures::Scope scope(VFP2);
- // Save callee-saved vfp registers.
- __ vstm(db_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg);
- // Set up the reserved register for 0.0.
- __ vmov(kDoubleRegZero, 0.0);
- }
-
- // Get address of argv, see stm above.
- // r0: code entry
- // r1: function
- // r2: receiver
- // r3: argc
-
- // Set up argv in r4.
- int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize;
- if (CpuFeatures::IsSupported(VFP2)) {
- offset_to_argv += kNumDoubleCalleeSaved * kDoubleSize;
- }
- __ ldr(r4, MemOperand(sp, offset_to_argv));
-
- // Push a frame with special values setup to mark it as an entry frame.
- // r0: code entry
- // r1: function
- // r2: receiver
- // r3: argc
- // r4: argv
- Isolate* isolate = masm->isolate();
- __ mov(r8, Operand(-1)); // Push a bad frame pointer to fail if it is used.
- int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
- __ mov(r7, Operand(Smi::FromInt(marker)));
- __ mov(r6, Operand(Smi::FromInt(marker)));
- __ mov(r5,
- Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate)));
- __ ldr(r5, MemOperand(r5));
- __ Push(r8, r7, r6, r5);
-
- // Set up frame pointer for the frame to be pushed.
- __ add(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
-
- // If this is the outermost JS call, set js_entry_sp value.
- Label non_outermost_js;
- ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate);
- __ mov(r5, Operand(ExternalReference(js_entry_sp)));
- __ ldr(r6, MemOperand(r5));
- __ cmp(r6, Operand::Zero());
- __ b(ne, &non_outermost_js);
- __ str(fp, MemOperand(r5));
- __ mov(ip, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
- Label cont;
- __ b(&cont);
- __ bind(&non_outermost_js);
- __ mov(ip, Operand(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME)));
- __ bind(&cont);
- __ push(ip);
-
- // Jump to a faked try block that does the invoke, with a faked catch
- // block that sets the pending exception.
- __ jmp(&invoke);
-
- // Block literal pool emission whilst taking the position of the handler
- // entry. This avoids making the assumption that literal pools are always
- // emitted after an instruction is emitted, rather than before.
- {
- Assembler::BlockConstPoolScope block_const_pool(masm);
- __ bind(&handler_entry);
- handler_offset_ = handler_entry.pos();
- // Caught exception: Store result (exception) in the pending exception
- // field in the JSEnv and return a failure sentinel. Coming in here the
- // fp will be invalid because the PushTryHandler below sets it to 0 to
- // signal the existence of the JSEntry frame.
- __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
- isolate)));
- }
- __ str(r0, MemOperand(ip));
- __ mov(r0, Operand(reinterpret_cast<int32_t>(Failure::Exception())));
- __ b(&exit);
-
- // Invoke: Link this frame into the handler chain. There's only one
- // handler block in this code object, so its index is 0.
- __ bind(&invoke);
- // Must preserve r0-r4, r5-r7 are available.
- __ PushTryHandler(StackHandler::JS_ENTRY, 0);
- // If an exception not caught by another handler occurs, this handler
- // returns control to the code after the bl(&invoke) above, which
- // restores all kCalleeSaved registers (including cp and fp) to their
- // saved values before returning a failure to C.
-
- // Clear any pending exceptions.
- __ mov(r5, Operand(isolate->factory()->the_hole_value()));
- __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
- isolate)));
- __ str(r5, MemOperand(ip));
-
- // Invoke the function by calling through JS entry trampoline builtin.
- // Notice that we cannot store a reference to the trampoline code directly in
- // this stub, because runtime stubs are not traversed when doing GC.
-
- // Expected registers by Builtins::JSEntryTrampoline
- // r0: code entry
- // r1: function
- // r2: receiver
- // r3: argc
- // r4: argv
- if (is_construct) {
- ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
- isolate);
- __ mov(ip, Operand(construct_entry));
- } else {
- ExternalReference entry(Builtins::kJSEntryTrampoline, isolate);
- __ mov(ip, Operand(entry));
- }
- __ ldr(ip, MemOperand(ip)); // deref address
-
- // Branch and link to JSEntryTrampoline. We don't use the double underscore
- // macro for the add instruction because we don't want the coverage tool
- // inserting instructions here after we read the pc. We block literal pool
- // emission for the same reason.
- {
- Assembler::BlockConstPoolScope block_const_pool(masm);
- __ mov(lr, Operand(pc));
- masm->add(pc, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
- }
-
- // Unlink this frame from the handler chain.
- __ PopTryHandler();
-
- __ bind(&exit); // r0 holds result
- // Check if the current stack frame is marked as the outermost JS frame.
- Label non_outermost_js_2;
- __ pop(r5);
- __ cmp(r5, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
- __ b(ne, &non_outermost_js_2);
- __ mov(r6, Operand::Zero());
- __ mov(r5, Operand(ExternalReference(js_entry_sp)));
- __ str(r6, MemOperand(r5));
- __ bind(&non_outermost_js_2);
-
- // Restore the top frame descriptors from the stack.
- __ pop(r3);
- __ mov(ip,
- Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate)));
- __ str(r3, MemOperand(ip));
-
- // Reset the stack to the callee saved registers.
- __ add(sp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
-
- // Restore callee-saved registers and return.
-#ifdef DEBUG
- if (FLAG_debug_code) {
- __ mov(lr, Operand(pc));
- }
-#endif
-
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatures::Scope scope(VFP2);
- // Restore callee-saved vfp registers.
- __ vldm(ia_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg);
- }
-
- __ ldm(ia_w, sp, kCalleeSaved | pc.bit());
-}
-
-
-// Uses registers r0 to r4.
-// Expected input (depending on whether args are in registers or on the stack):
-// * object: r0 or at sp + 1 * kPointerSize.
-// * function: r1 or at sp.
-//
-// An inlined call site may have been generated before calling this stub.
-// In this case the offset to the inline site to patch is passed on the stack,
-// in the safepoint slot for register r4.
-// (See LCodeGen::DoInstanceOfKnownGlobal)
-void InstanceofStub::Generate(MacroAssembler* masm) {
- // Call site inlining and patching implies arguments in registers.
- ASSERT(HasArgsInRegisters() || !HasCallSiteInlineCheck());
- // ReturnTrueFalse is only implemented for inlined call sites.
- ASSERT(!ReturnTrueFalseObject() || HasCallSiteInlineCheck());
-
- // Fixed register usage throughout the stub:
- const Register object = r0; // Object (lhs).
- Register map = r3; // Map of the object.
- const Register function = r1; // Function (rhs).
- const Register prototype = r4; // Prototype of the function.
- const Register inline_site = r9;
- const Register scratch = r2;
-
- const int32_t kDeltaToLoadBoolResult = 4 * kPointerSize;
-
- Label slow, loop, is_instance, is_not_instance, not_js_object;
-
- if (!HasArgsInRegisters()) {
- __ ldr(object, MemOperand(sp, 1 * kPointerSize));
- __ ldr(function, MemOperand(sp, 0));
- }
-
- // Check that the left hand is a JS object and load map.
- __ JumpIfSmi(object, &not_js_object);
- __ IsObjectJSObjectType(object, map, scratch, &not_js_object);
-
- // If there is a call site cache don't look in the global cache, but do the
- // real lookup and update the call site cache.
- if (!HasCallSiteInlineCheck()) {
- Label miss;
- __ CompareRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
- __ b(ne, &miss);
- __ CompareRoot(map, Heap::kInstanceofCacheMapRootIndex);
- __ b(ne, &miss);
- __ LoadRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
- __ Ret(HasArgsInRegisters() ? 0 : 2);
-
- __ bind(&miss);
- }
-
- // Get the prototype of the function.
- __ TryGetFunctionPrototype(function, prototype, scratch, &slow, true);
-
- // Check that the function prototype is a JS object.
- __ JumpIfSmi(prototype, &slow);
- __ IsObjectJSObjectType(prototype, scratch, scratch, &slow);
-
- // Update the global instanceof or call site inlined cache with the current
- // map and function. The cached answer will be set when it is known below.
- if (!HasCallSiteInlineCheck()) {
- __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
- __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex);
- } else {
- ASSERT(HasArgsInRegisters());
- // Patch the (relocated) inlined map check.
-
- // The offset was stored in r4 safepoint slot.
- // (See LCodeGen::DoDeferredLInstanceOfKnownGlobal)
- __ LoadFromSafepointRegisterSlot(scratch, r4);
- __ sub(inline_site, lr, scratch);
- // Get the map location in scratch and patch it.
- __ GetRelocatedValueLocation(inline_site, scratch);
- __ ldr(scratch, MemOperand(scratch));
- __ str(map, FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
- }
-
- // Register mapping: r3 is object map and r4 is function prototype.
- // Get prototype of object into r2.
- __ ldr(scratch, FieldMemOperand(map, Map::kPrototypeOffset));
-
- // We don't need map any more. Use it as a scratch register.
- Register scratch2 = map;
- map = no_reg;
-
- // Loop through the prototype chain looking for the function prototype.
- __ LoadRoot(scratch2, Heap::kNullValueRootIndex);
- __ bind(&loop);
- __ cmp(scratch, Operand(prototype));
- __ b(eq, &is_instance);
- __ cmp(scratch, scratch2);
- __ b(eq, &is_not_instance);
- __ ldr(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset));
- __ ldr(scratch, FieldMemOperand(scratch, Map::kPrototypeOffset));
- __ jmp(&loop);
-
- __ bind(&is_instance);
- if (!HasCallSiteInlineCheck()) {
- __ mov(r0, Operand(Smi::FromInt(0)));
- __ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
- } else {
- // Patch the call site to return true.
- __ LoadRoot(r0, Heap::kTrueValueRootIndex);
- __ add(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
- // Get the boolean result location in scratch and patch it.
- __ GetRelocatedValueLocation(inline_site, scratch);
- __ str(r0, MemOperand(scratch));
-
- if (!ReturnTrueFalseObject()) {
- __ mov(r0, Operand(Smi::FromInt(0)));
- }
- }
- __ Ret(HasArgsInRegisters() ? 0 : 2);
-
- __ bind(&is_not_instance);
- if (!HasCallSiteInlineCheck()) {
- __ mov(r0, Operand(Smi::FromInt(1)));
- __ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
- } else {
- // Patch the call site to return false.
- __ LoadRoot(r0, Heap::kFalseValueRootIndex);
- __ add(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
- // Get the boolean result location in scratch and patch it.
- __ GetRelocatedValueLocation(inline_site, scratch);
- __ str(r0, MemOperand(scratch));
-
- if (!ReturnTrueFalseObject()) {
- __ mov(r0, Operand(Smi::FromInt(1)));
- }
- }
- __ Ret(HasArgsInRegisters() ? 0 : 2);
-
- Label object_not_null, object_not_null_or_smi;
- __ bind(&not_js_object);
- // Before null, smi and string value checks, check that the rhs is a function
- // as for a non-function rhs an exception needs to be thrown.
- __ JumpIfSmi(function, &slow);
- __ CompareObjectType(function, scratch2, scratch, JS_FUNCTION_TYPE);
- __ b(ne, &slow);
-
- // Null is not instance of anything.
- __ cmp(scratch, Operand(masm->isolate()->factory()->null_value()));
- __ b(ne, &object_not_null);
- __ mov(r0, Operand(Smi::FromInt(1)));
- __ Ret(HasArgsInRegisters() ? 0 : 2);
-
- __ bind(&object_not_null);
- // Smi values are not instances of anything.
- __ JumpIfNotSmi(object, &object_not_null_or_smi);
- __ mov(r0, Operand(Smi::FromInt(1)));
- __ Ret(HasArgsInRegisters() ? 0 : 2);
-
- __ bind(&object_not_null_or_smi);
- // String values are not instances of anything.
- __ IsObjectJSStringType(object, scratch, &slow);
- __ mov(r0, Operand(Smi::FromInt(1)));
- __ Ret(HasArgsInRegisters() ? 0 : 2);
-
- // Slow-case. Tail call builtin.
- __ bind(&slow);
- if (!ReturnTrueFalseObject()) {
- if (HasArgsInRegisters()) {
- __ Push(r0, r1);
- }
- __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
- } else {
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(r0, r1);
- __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
- }
- __ cmp(r0, Operand::Zero());
- __ LoadRoot(r0, Heap::kTrueValueRootIndex, eq);
- __ LoadRoot(r0, Heap::kFalseValueRootIndex, ne);
- __ Ret(HasArgsInRegisters() ? 0 : 2);
- }
-}
-
-
-void ArrayLengthStub::Generate(MacroAssembler* masm) {
- Label miss;
- Register receiver;
- if (kind() == Code::KEYED_LOAD_IC) {
- // ----------- S t a t e -------------
- // -- lr : return address
- // -- r0 : key
- // -- r1 : receiver
- // -----------------------------------
- __ cmp(r0, Operand(masm->isolate()->factory()->length_string()));
- __ b(ne, &miss);
- receiver = r1;
- } else {
- ASSERT(kind() == Code::LOAD_IC);
- // ----------- S t a t e -------------
- // -- r2 : name
- // -- lr : return address
- // -- r0 : receiver
- // -- sp[0] : receiver
- // -----------------------------------
- receiver = r0;
- }
-
- StubCompiler::GenerateLoadArrayLength(masm, receiver, r3, &miss);
- __ bind(&miss);
- StubCompiler::GenerateLoadMiss(masm, kind());
-}
-
-
-void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
- Label miss;
- Register receiver;
- if (kind() == Code::KEYED_LOAD_IC) {
- // ----------- S t a t e -------------
- // -- lr : return address
- // -- r0 : key
- // -- r1 : receiver
- // -----------------------------------
- __ cmp(r0, Operand(masm->isolate()->factory()->prototype_string()));
- __ b(ne, &miss);
- receiver = r1;
- } else {
- ASSERT(kind() == Code::LOAD_IC);
- // ----------- S t a t e -------------
- // -- r2 : name
- // -- lr : return address
- // -- r0 : receiver
- // -- sp[0] : receiver
- // -----------------------------------
- receiver = r0;
- }
-
- StubCompiler::GenerateLoadFunctionPrototype(masm, receiver, r3, r4, &miss);
- __ bind(&miss);
- StubCompiler::GenerateLoadMiss(masm, kind());
-}
-
-
-void StringLengthStub::Generate(MacroAssembler* masm) {
- Label miss;
- Register receiver;
- if (kind() == Code::KEYED_LOAD_IC) {
- // ----------- S t a t e -------------
- // -- lr : return address
- // -- r0 : key
- // -- r1 : receiver
- // -----------------------------------
- __ cmp(r0, Operand(masm->isolate()->factory()->length_string()));
- __ b(ne, &miss);
- receiver = r1;
- } else {
- ASSERT(kind() == Code::LOAD_IC);
- // ----------- S t a t e -------------
- // -- r2 : name
- // -- lr : return address
- // -- r0 : receiver
- // -- sp[0] : receiver
- // -----------------------------------
- receiver = r0;
- }
-
- StubCompiler::GenerateLoadStringLength(masm, receiver, r3, r4, &miss,
- support_wrapper_);
-
- __ bind(&miss);
- StubCompiler::GenerateLoadMiss(masm, kind());
-}
-
-
-void StoreArrayLengthStub::Generate(MacroAssembler* masm) {
- // This accepts as a receiver anything JSArray::SetElementsLength accepts
- // (currently anything except for external arrays which means anything with
- // elements of FixedArray type). Value must be a number, but only smis are
- // accepted as the most common case.
- Label miss;
-
- Register receiver;
- Register value;
- if (kind() == Code::KEYED_STORE_IC) {
- // ----------- S t a t e -------------
- // -- lr : return address
- // -- r0 : value
- // -- r1 : key
- // -- r2 : receiver
- // -----------------------------------
- __ cmp(r1, Operand(masm->isolate()->factory()->length_string()));
- __ b(ne, &miss);
- receiver = r2;
- value = r0;
- } else {
- ASSERT(kind() == Code::STORE_IC);
- // ----------- S t a t e -------------
- // -- lr : return address
- // -- r0 : value
- // -- r1 : receiver
- // -- r2 : key
- // -----------------------------------
- receiver = r1;
- value = r0;
- }
- Register scratch = r3;
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, &miss);
-
- // Check that the object is a JS array.
- __ CompareObjectType(receiver, scratch, scratch, JS_ARRAY_TYPE);
- __ b(ne, &miss);
-
- // Check that elements are FixedArray.
- // We rely on StoreIC_ArrayLength below to deal with all types of
- // fast elements (including COW).
- __ ldr(scratch, FieldMemOperand(receiver, JSArray::kElementsOffset));
- __ CompareObjectType(scratch, scratch, scratch, FIXED_ARRAY_TYPE);
- __ b(ne, &miss);
-
- // Check that the array has fast properties, otherwise the length
- // property might have been redefined.
- __ ldr(scratch, FieldMemOperand(receiver, JSArray::kPropertiesOffset));
- __ ldr(scratch, FieldMemOperand(scratch, FixedArray::kMapOffset));
- __ CompareRoot(scratch, Heap::kHashTableMapRootIndex);
- __ b(eq, &miss);
-
- // Check that value is a smi.
- __ JumpIfNotSmi(value, &miss);
-
- // Prepare tail call to StoreIC_ArrayLength.
- __ Push(receiver, value);
-
- ExternalReference ref =
- ExternalReference(IC_Utility(IC::kStoreIC_ArrayLength), masm->isolate());
- __ TailCallExternalReference(ref, 2, 1);
-
- __ bind(&miss);
-
- StubCompiler::GenerateStoreMiss(masm, kind());
-}
-
-
-Register InstanceofStub::left() { return r0; }
-
-
-Register InstanceofStub::right() { return r1; }
-
-
-void LoadFieldStub::Generate(MacroAssembler* masm) {
- StubCompiler::DoGenerateFastPropertyLoad(masm, r0, reg_, inobject_, index_);
- __ Ret();
-}
-
-
-void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
- // The displacement is the offset of the last parameter (if any)
- // relative to the frame pointer.
- const int kDisplacement =
- StandardFrameConstants::kCallerSPOffset - kPointerSize;
-
- // Check that the key is a smi.
- Label slow;
- __ JumpIfNotSmi(r1, &slow);
-
- // Check if the calling frame is an arguments adaptor frame.
- Label adaptor;
- __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
- __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ b(eq, &adaptor);
-
- // Check index against formal parameters count limit passed in
- // through register r0. Use unsigned comparison to get negative
- // check for free.
- __ cmp(r1, r0);
- __ b(hs, &slow);
-
- // Read the argument from the stack and return it.
- __ sub(r3, r0, r1);
- __ add(r3, fp, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ ldr(r0, MemOperand(r3, kDisplacement));
- __ Jump(lr);
-
- // Arguments adaptor case: Check index against actual arguments
- // limit found in the arguments adaptor frame. Use unsigned
- // comparison to get negative check for free.
- __ bind(&adaptor);
- __ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ cmp(r1, r0);
- __ b(cs, &slow);
-
- // Read the argument from the adaptor frame and return it.
- __ sub(r3, r0, r1);
- __ add(r3, r2, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ ldr(r0, MemOperand(r3, kDisplacement));
- __ Jump(lr);
-
- // Slow-case: Handle non-smi or out-of-bounds access to arguments
- // by calling the runtime system.
- __ bind(&slow);
- __ push(r1);
- __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
-}
-
-
-void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) {
- // sp[0] : number of parameters
- // sp[4] : receiver displacement
- // sp[8] : function
-
- // Check if the calling frame is an arguments adaptor frame.
- Label runtime;
- __ ldr(r3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ ldr(r2, MemOperand(r3, StandardFrameConstants::kContextOffset));
- __ cmp(r2, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ b(ne, &runtime);
-
- // Patch the arguments.length and the parameters pointer in the current frame.
- __ ldr(r2, MemOperand(r3, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ str(r2, MemOperand(sp, 0 * kPointerSize));
- __ add(r3, r3, Operand(r2, LSL, 1));
- __ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset));
- __ str(r3, MemOperand(sp, 1 * kPointerSize));
-
- __ bind(&runtime);
- __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
-}
-
-
-void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
- // Stack layout:
- // sp[0] : number of parameters (tagged)
- // sp[4] : address of receiver argument
- // sp[8] : function
- // Registers used over whole function:
- // r6 : allocated object (tagged)
- // r9 : mapped parameter count (tagged)
-
- __ ldr(r1, MemOperand(sp, 0 * kPointerSize));
- // r1 = parameter count (tagged)
-
- // Check if the calling frame is an arguments adaptor frame.
- Label runtime;
- Label adaptor_frame, try_allocate;
- __ ldr(r3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ ldr(r2, MemOperand(r3, StandardFrameConstants::kContextOffset));
- __ cmp(r2, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ b(eq, &adaptor_frame);
-
- // No adaptor, parameter count = argument count.
- __ mov(r2, r1);
- __ b(&try_allocate);
-
- // We have an adaptor frame. Patch the parameters pointer.
- __ bind(&adaptor_frame);
- __ ldr(r2, MemOperand(r3, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ add(r3, r3, Operand(r2, LSL, 1));
- __ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset));
- __ str(r3, MemOperand(sp, 1 * kPointerSize));
-
- // r1 = parameter count (tagged)
- // r2 = argument count (tagged)
- // Compute the mapped parameter count = min(r1, r2) in r1.
- __ cmp(r1, Operand(r2));
- __ mov(r1, Operand(r2), LeaveCC, gt);
-
- __ bind(&try_allocate);
-
- // Compute the sizes of backing store, parameter map, and arguments object.
- // 1. Parameter map, has 2 extra words containing context and backing store.
- const int kParameterMapHeaderSize =
- FixedArray::kHeaderSize + 2 * kPointerSize;
- // If there are no mapped parameters, we do not need the parameter_map.
- __ cmp(r1, Operand(Smi::FromInt(0)));
- __ mov(r9, Operand::Zero(), LeaveCC, eq);
- __ mov(r9, Operand(r1, LSL, 1), LeaveCC, ne);
- __ add(r9, r9, Operand(kParameterMapHeaderSize), LeaveCC, ne);
-
- // 2. Backing store.
- __ add(r9, r9, Operand(r2, LSL, 1));
- __ add(r9, r9, Operand(FixedArray::kHeaderSize));
-
- // 3. Arguments object.
- __ add(r9, r9, Operand(Heap::kArgumentsObjectSize));
-
- // Do the allocation of all three objects in one go.
- __ AllocateInNewSpace(r9, r0, r3, r4, &runtime, TAG_OBJECT);
-
- // r0 = address of new object(s) (tagged)
- // r2 = argument count (tagged)
- // Get the arguments boilerplate from the current native context into r4.
- const int kNormalOffset =
- Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX);
- const int kAliasedOffset =
- Context::SlotOffset(Context::ALIASED_ARGUMENTS_BOILERPLATE_INDEX);
-
- __ ldr(r4, MemOperand(r8, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ ldr(r4, FieldMemOperand(r4, GlobalObject::kNativeContextOffset));
- __ cmp(r1, Operand::Zero());
- __ ldr(r4, MemOperand(r4, kNormalOffset), eq);
- __ ldr(r4, MemOperand(r4, kAliasedOffset), ne);
-
- // r0 = address of new object (tagged)
- // r1 = mapped parameter count (tagged)
- // r2 = argument count (tagged)
- // r4 = address of boilerplate object (tagged)
- // Copy the JS object part.
- for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
- __ ldr(r3, FieldMemOperand(r4, i));
- __ str(r3, FieldMemOperand(r0, i));
- }
-
- // Set up the callee in-object property.
- STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
- __ ldr(r3, MemOperand(sp, 2 * kPointerSize));
- const int kCalleeOffset = JSObject::kHeaderSize +
- Heap::kArgumentsCalleeIndex * kPointerSize;
- __ str(r3, FieldMemOperand(r0, kCalleeOffset));
-
- // Use the length (smi tagged) and set that as an in-object property too.
- STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
- const int kLengthOffset = JSObject::kHeaderSize +
- Heap::kArgumentsLengthIndex * kPointerSize;
- __ str(r2, FieldMemOperand(r0, kLengthOffset));
-
- // Set up the elements pointer in the allocated arguments object.
- // If we allocated a parameter map, r4 will point there, otherwise
- // it will point to the backing store.
- __ add(r4, r0, Operand(Heap::kArgumentsObjectSize));
- __ str(r4, FieldMemOperand(r0, JSObject::kElementsOffset));
-
- // r0 = address of new object (tagged)
- // r1 = mapped parameter count (tagged)
- // r2 = argument count (tagged)
- // r4 = address of parameter map or backing store (tagged)
- // Initialize parameter map. If there are no mapped arguments, we're done.
- Label skip_parameter_map;
- __ cmp(r1, Operand(Smi::FromInt(0)));
- // Move backing store address to r3, because it is
- // expected there when filling in the unmapped arguments.
- __ mov(r3, r4, LeaveCC, eq);
- __ b(eq, &skip_parameter_map);
-
- __ LoadRoot(r6, Heap::kNonStrictArgumentsElementsMapRootIndex);
- __ str(r6, FieldMemOperand(r4, FixedArray::kMapOffset));
- __ add(r6, r1, Operand(Smi::FromInt(2)));
- __ str(r6, FieldMemOperand(r4, FixedArray::kLengthOffset));
- __ str(r8, FieldMemOperand(r4, FixedArray::kHeaderSize + 0 * kPointerSize));
- __ add(r6, r4, Operand(r1, LSL, 1));
- __ add(r6, r6, Operand(kParameterMapHeaderSize));
- __ str(r6, FieldMemOperand(r4, FixedArray::kHeaderSize + 1 * kPointerSize));
-
- // Copy the parameter slots and the holes in the arguments.
- // We need to fill in mapped_parameter_count slots. They index the context,
- // where parameters are stored in reverse order, at
- // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
- // The mapped parameter thus need to get indices
- // MIN_CONTEXT_SLOTS+parameter_count-1 ..
- // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
- // We loop from right to left.
- Label parameters_loop, parameters_test;
- __ mov(r6, r1);
- __ ldr(r9, MemOperand(sp, 0 * kPointerSize));
- __ add(r9, r9, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
- __ sub(r9, r9, Operand(r1));
- __ LoadRoot(r7, Heap::kTheHoleValueRootIndex);
- __ add(r3, r4, Operand(r6, LSL, 1));
- __ add(r3, r3, Operand(kParameterMapHeaderSize));
-
- // r6 = loop variable (tagged)
- // r1 = mapping index (tagged)
- // r3 = address of backing store (tagged)
- // r4 = address of parameter map (tagged)
- // r5 = temporary scratch (a.o., for address calculation)
- // r7 = the hole value
- __ jmp(&parameters_test);
-
- __ bind(&parameters_loop);
- __ sub(r6, r6, Operand(Smi::FromInt(1)));
- __ mov(r5, Operand(r6, LSL, 1));
- __ add(r5, r5, Operand(kParameterMapHeaderSize - kHeapObjectTag));
- __ str(r9, MemOperand(r4, r5));
- __ sub(r5, r5, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize));
- __ str(r7, MemOperand(r3, r5));
- __ add(r9, r9, Operand(Smi::FromInt(1)));
- __ bind(&parameters_test);
- __ cmp(r6, Operand(Smi::FromInt(0)));
- __ b(ne, &parameters_loop);
-
- __ bind(&skip_parameter_map);
- // r2 = argument count (tagged)
- // r3 = address of backing store (tagged)
- // r5 = scratch
- // Copy arguments header and remaining slots (if there are any).
- __ LoadRoot(r5, Heap::kFixedArrayMapRootIndex);
- __ str(r5, FieldMemOperand(r3, FixedArray::kMapOffset));
- __ str(r2, FieldMemOperand(r3, FixedArray::kLengthOffset));
-
- Label arguments_loop, arguments_test;
- __ mov(r9, r1);
- __ ldr(r4, MemOperand(sp, 1 * kPointerSize));
- __ sub(r4, r4, Operand(r9, LSL, 1));
- __ jmp(&arguments_test);
-
- __ bind(&arguments_loop);
- __ sub(r4, r4, Operand(kPointerSize));
- __ ldr(r6, MemOperand(r4, 0));
- __ add(r5, r3, Operand(r9, LSL, 1));
- __ str(r6, FieldMemOperand(r5, FixedArray::kHeaderSize));
- __ add(r9, r9, Operand(Smi::FromInt(1)));
-
- __ bind(&arguments_test);
- __ cmp(r9, Operand(r2));
- __ b(lt, &arguments_loop);
-
- // Return and remove the on-stack parameters.
- __ add(sp, sp, Operand(3 * kPointerSize));
- __ Ret();
-
- // Do the runtime call to allocate the arguments object.
- // r2 = argument count (tagged)
- __ bind(&runtime);
- __ str(r2, MemOperand(sp, 0 * kPointerSize)); // Patch argument count.
- __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
-}
-
-
-void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
- // sp[0] : number of parameters
- // sp[4] : receiver displacement
- // sp[8] : function
- // Check if the calling frame is an arguments adaptor frame.
- Label adaptor_frame, try_allocate, runtime;
- __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
- __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ b(eq, &adaptor_frame);
-
- // Get the length from the frame.
- __ ldr(r1, MemOperand(sp, 0));
- __ b(&try_allocate);
-
- // Patch the arguments.length and the parameters pointer.
- __ bind(&adaptor_frame);
- __ ldr(r1, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ str(r1, MemOperand(sp, 0));
- __ add(r3, r2, Operand(r1, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset));
- __ str(r3, MemOperand(sp, 1 * kPointerSize));
-
- // Try the new space allocation. Start out with computing the size
- // of the arguments object and the elements array in words.
- Label add_arguments_object;
- __ bind(&try_allocate);
- __ cmp(r1, Operand::Zero());
- __ b(eq, &add_arguments_object);
- __ mov(r1, Operand(r1, LSR, kSmiTagSize));
- __ add(r1, r1, Operand(FixedArray::kHeaderSize / kPointerSize));
- __ bind(&add_arguments_object);
- __ add(r1, r1, Operand(Heap::kArgumentsObjectSizeStrict / kPointerSize));
-
- // Do the allocation of both objects in one go.
- __ AllocateInNewSpace(r1,
- r0,
- r2,
- r3,
- &runtime,
- static_cast<AllocationFlags>(TAG_OBJECT |
- SIZE_IN_WORDS));
-
- // Get the arguments boilerplate from the current native context.
- __ ldr(r4, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ ldr(r4, FieldMemOperand(r4, GlobalObject::kNativeContextOffset));
- __ ldr(r4, MemOperand(r4, Context::SlotOffset(
- Context::STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX)));
-
- // Copy the JS object part.
- __ CopyFields(r0, r4, r3.bit(), JSObject::kHeaderSize / kPointerSize);
-
- // Get the length (smi tagged) and set that as an in-object property too.
- STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
- __ ldr(r1, MemOperand(sp, 0 * kPointerSize));
- __ str(r1, FieldMemOperand(r0, JSObject::kHeaderSize +
- Heap::kArgumentsLengthIndex * kPointerSize));
-
- // If there are no actual arguments, we're done.
- Label done;
- __ cmp(r1, Operand::Zero());
- __ b(eq, &done);
-
- // Get the parameters pointer from the stack.
- __ ldr(r2, MemOperand(sp, 1 * kPointerSize));
-
- // Set up the elements pointer in the allocated arguments object and
- // initialize the header in the elements fixed array.
- __ add(r4, r0, Operand(Heap::kArgumentsObjectSizeStrict));
- __ str(r4, FieldMemOperand(r0, JSObject::kElementsOffset));
- __ LoadRoot(r3, Heap::kFixedArrayMapRootIndex);
- __ str(r3, FieldMemOperand(r4, FixedArray::kMapOffset));
- __ str(r1, FieldMemOperand(r4, FixedArray::kLengthOffset));
- // Untag the length for the loop.
- __ mov(r1, Operand(r1, LSR, kSmiTagSize));
-
- // Copy the fixed array slots.
- Label loop;
- // Set up r4 to point to the first array slot.
- __ add(r4, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ bind(&loop);
- // Pre-decrement r2 with kPointerSize on each iteration.
- // Pre-decrement in order to skip receiver.
- __ ldr(r3, MemOperand(r2, kPointerSize, NegPreIndex));
- // Post-increment r4 with kPointerSize on each iteration.
- __ str(r3, MemOperand(r4, kPointerSize, PostIndex));
- __ sub(r1, r1, Operand(1));
- __ cmp(r1, Operand::Zero());
- __ b(ne, &loop);
-
- // Return and remove the on-stack parameters.
- __ bind(&done);
- __ add(sp, sp, Operand(3 * kPointerSize));
- __ Ret();
-
- // Do the runtime call to allocate the arguments object.
- __ bind(&runtime);
- __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1);
-}
-
-
-void RegExpExecStub::Generate(MacroAssembler* masm) {
- // Just jump directly to runtime if native RegExp is not selected at compile
- // time or if regexp entry in generated code is turned off runtime switch or
- // at compilation.
-#ifdef V8_INTERPRETED_REGEXP
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
-#else // V8_INTERPRETED_REGEXP
-
- // Stack frame on entry.
- // sp[0]: last_match_info (expected JSArray)
- // sp[4]: previous index
- // sp[8]: subject string
- // sp[12]: JSRegExp object
-
- const int kLastMatchInfoOffset = 0 * kPointerSize;
- const int kPreviousIndexOffset = 1 * kPointerSize;
- const int kSubjectOffset = 2 * kPointerSize;
- const int kJSRegExpOffset = 3 * kPointerSize;
-
- Label runtime;
- // Allocation of registers for this function. These are in callee save
- // registers and will be preserved by the call to the native RegExp code, as
- // this code is called using the normal C calling convention. When calling
- // directly from generated code the native RegExp code will not do a GC and
- // therefore the content of these registers are safe to use after the call.
- Register subject = r4;
- Register regexp_data = r5;
- Register last_match_info_elements = r6;
-
- // Ensure that a RegExp stack is allocated.
- Isolate* isolate = masm->isolate();
- ExternalReference address_of_regexp_stack_memory_address =
- ExternalReference::address_of_regexp_stack_memory_address(isolate);
- ExternalReference address_of_regexp_stack_memory_size =
- ExternalReference::address_of_regexp_stack_memory_size(isolate);
- __ mov(r0, Operand(address_of_regexp_stack_memory_size));
- __ ldr(r0, MemOperand(r0, 0));
- __ cmp(r0, Operand::Zero());
- __ b(eq, &runtime);
-
- // Check that the first argument is a JSRegExp object.
- __ ldr(r0, MemOperand(sp, kJSRegExpOffset));
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfSmi(r0, &runtime);
- __ CompareObjectType(r0, r1, r1, JS_REGEXP_TYPE);
- __ b(ne, &runtime);
-
- // Check that the RegExp has been compiled (data contains a fixed array).
- __ ldr(regexp_data, FieldMemOperand(r0, JSRegExp::kDataOffset));
- if (FLAG_debug_code) {
- __ tst(regexp_data, Operand(kSmiTagMask));
- __ Check(ne, "Unexpected type for RegExp data, FixedArray expected");
- __ CompareObjectType(regexp_data, r0, r0, FIXED_ARRAY_TYPE);
- __ Check(eq, "Unexpected type for RegExp data, FixedArray expected");
- }
-
- // regexp_data: RegExp data (FixedArray)
- // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
- __ ldr(r0, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset));
- __ cmp(r0, Operand(Smi::FromInt(JSRegExp::IRREGEXP)));
- __ b(ne, &runtime);
-
- // regexp_data: RegExp data (FixedArray)
- // Check that the number of captures fit in the static offsets vector buffer.
- __ ldr(r2,
- FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
- // Check (number_of_captures + 1) * 2 <= offsets vector size
- // Or number_of_captures * 2 <= offsets vector size - 2
- // Multiplying by 2 comes for free since r2 is smi-tagged.
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
- STATIC_ASSERT(Isolate::kJSRegexpStaticOffsetsVectorSize >= 2);
- __ cmp(r2, Operand(Isolate::kJSRegexpStaticOffsetsVectorSize - 2));
- __ b(hi, &runtime);
-
- // Reset offset for possibly sliced string.
- __ mov(r9, Operand::Zero());
- __ ldr(subject, MemOperand(sp, kSubjectOffset));
- __ JumpIfSmi(subject, &runtime);
- __ mov(r3, subject); // Make a copy of the original subject string.
- __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
- __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
- // subject: subject string
- // r3: subject string
- // r0: subject string instance type
- // regexp_data: RegExp data (FixedArray)
- // Handle subject string according to its encoding and representation:
- // (1) Sequential string? If yes, go to (5).
- // (2) Anything but sequential or cons? If yes, go to (6).
- // (3) Cons string. If the string is flat, replace subject with first string.
- // Otherwise bailout.
- // (4) Is subject external? If yes, go to (7).
- // (5) Sequential string. Load regexp code according to encoding.
- // (E) Carry on.
- /// [...]
-
- // Deferred code at the end of the stub:
- // (6) Not a long external string? If yes, go to (8).
- // (7) External string. Make it, offset-wise, look like a sequential string.
- // Go to (5).
- // (8) Short external string or not a string? If yes, bail out to runtime.
- // (9) Sliced string. Replace subject with parent. Go to (4).
-
- Label seq_string /* 5 */, external_string /* 7 */,
- check_underlying /* 4 */, not_seq_nor_cons /* 6 */,
- not_long_external /* 8 */;
-
- // (1) Sequential string? If yes, go to (5).
- __ and_(r1,
- r0,
- Operand(kIsNotStringMask |
- kStringRepresentationMask |
- kShortExternalStringMask),
- SetCC);
- STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
- __ b(eq, &seq_string); // Go to (5).
-
- // (2) Anything but sequential or cons? If yes, go to (6).
- STATIC_ASSERT(kConsStringTag < kExternalStringTag);
- STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
- STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
- STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
- __ cmp(r1, Operand(kExternalStringTag));
- __ b(ge, &not_seq_nor_cons); // Go to (6).
-
- // (3) Cons string. Check that it's flat.
- // Replace subject with first string and reload instance type.
- __ ldr(r0, FieldMemOperand(subject, ConsString::kSecondOffset));
- __ CompareRoot(r0, Heap::kempty_stringRootIndex);
- __ b(ne, &runtime);
- __ ldr(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
-
- // (4) Is subject external? If yes, go to (7).
- __ bind(&check_underlying);
- __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
- __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
- STATIC_ASSERT(kSeqStringTag == 0);
- __ tst(r0, Operand(kStringRepresentationMask));
- // The underlying external string is never a short external string.
- STATIC_CHECK(ExternalString::kMaxShortLength < ConsString::kMinLength);
- STATIC_CHECK(ExternalString::kMaxShortLength < SlicedString::kMinLength);
- __ b(ne, &external_string); // Go to (7).
-
- // (5) Sequential string. Load regexp code according to encoding.
- __ bind(&seq_string);
- // subject: sequential subject string (or look-alike, external string)
- // r3: original subject string
- // Load previous index and check range before r3 is overwritten. We have to
- // use r3 instead of subject here because subject might have been only made
- // to look like a sequential string when it actually is an external string.
- __ ldr(r1, MemOperand(sp, kPreviousIndexOffset));
- __ JumpIfNotSmi(r1, &runtime);
- __ ldr(r3, FieldMemOperand(r3, String::kLengthOffset));
- __ cmp(r3, Operand(r1));
- __ b(ls, &runtime);
- __ mov(r1, Operand(r1, ASR, kSmiTagSize));
-
- STATIC_ASSERT(4 == kOneByteStringTag);
- STATIC_ASSERT(kTwoByteStringTag == 0);
- __ and_(r0, r0, Operand(kStringEncodingMask));
- __ mov(r3, Operand(r0, ASR, 2), SetCC);
- __ ldr(r7, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset), ne);
- __ ldr(r7, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset), eq);
-
- // (E) Carry on. String handling is done.
- // r7: irregexp code
- // Check that the irregexp code has been generated for the actual string
- // encoding. If it has, the field contains a code object otherwise it contains
- // a smi (code flushing support).
- __ JumpIfSmi(r7, &runtime);
-
- // r1: previous index
- // r3: encoding of subject string (1 if ASCII, 0 if two_byte);
- // r7: code
- // subject: Subject string
- // regexp_data: RegExp data (FixedArray)
- // All checks done. Now push arguments for native regexp code.
- __ IncrementCounter(isolate->counters()->regexp_entry_native(), 1, r0, r2);
-
- // Isolates: note we add an additional parameter here (isolate pointer).
- const int kRegExpExecuteArguments = 9;
- const int kParameterRegisters = 4;
- __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters);
-
- // Stack pointer now points to cell where return address is to be written.
- // Arguments are before that on the stack or in registers.
-
- // Argument 9 (sp[20]): Pass current isolate address.
- __ mov(r0, Operand(ExternalReference::isolate_address()));
- __ str(r0, MemOperand(sp, 5 * kPointerSize));
-
- // Argument 8 (sp[16]): Indicate that this is a direct call from JavaScript.
- __ mov(r0, Operand(1));
- __ str(r0, MemOperand(sp, 4 * kPointerSize));
-
- // Argument 7 (sp[12]): Start (high end) of backtracking stack memory area.
- __ mov(r0, Operand(address_of_regexp_stack_memory_address));
- __ ldr(r0, MemOperand(r0, 0));
- __ mov(r2, Operand(address_of_regexp_stack_memory_size));
- __ ldr(r2, MemOperand(r2, 0));
- __ add(r0, r0, Operand(r2));
- __ str(r0, MemOperand(sp, 3 * kPointerSize));
-
- // Argument 6: Set the number of capture registers to zero to force global
- // regexps to behave as non-global. This does not affect non-global regexps.
- __ mov(r0, Operand::Zero());
- __ str(r0, MemOperand(sp, 2 * kPointerSize));
-
- // Argument 5 (sp[4]): static offsets vector buffer.
- __ mov(r0,
- Operand(ExternalReference::address_of_static_offsets_vector(isolate)));
- __ str(r0, MemOperand(sp, 1 * kPointerSize));
-
- // For arguments 4 and 3 get string length, calculate start of string data and
- // calculate the shift of the index (0 for ASCII and 1 for two byte).
- __ add(r8, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag));
- __ eor(r3, r3, Operand(1));
- // Load the length from the original subject string from the previous stack
- // frame. Therefore we have to use fp, which points exactly to two pointer
- // sizes below the previous sp. (Because creating a new stack frame pushes
- // the previous fp onto the stack and moves up sp by 2 * kPointerSize.)
- __ ldr(subject, MemOperand(fp, kSubjectOffset + 2 * kPointerSize));
- // If slice offset is not 0, load the length from the original sliced string.
- // Argument 4, r3: End of string data
- // Argument 3, r2: Start of string data
- // Prepare start and end index of the input.
- __ add(r9, r8, Operand(r9, LSL, r3));
- __ add(r2, r9, Operand(r1, LSL, r3));
-
- __ ldr(r8, FieldMemOperand(subject, String::kLengthOffset));
- __ mov(r8, Operand(r8, ASR, kSmiTagSize));
- __ add(r3, r9, Operand(r8, LSL, r3));
-
- // Argument 2 (r1): Previous index.
- // Already there
-
- // Argument 1 (r0): Subject string.
- __ mov(r0, subject);
-
- // Locate the code entry and call it.
- __ add(r7, r7, Operand(Code::kHeaderSize - kHeapObjectTag));
- DirectCEntryStub stub;
- stub.GenerateCall(masm, r7);
-
- __ LeaveExitFrame(false, no_reg);
-
- // r0: result
- // subject: subject string (callee saved)
- // regexp_data: RegExp data (callee saved)
- // last_match_info_elements: Last match info elements (callee saved)
- // Check the result.
- Label success;
- __ cmp(r0, Operand(1));
- // We expect exactly one result since we force the called regexp to behave
- // as non-global.
- __ b(eq, &success);
- Label failure;
- __ cmp(r0, Operand(NativeRegExpMacroAssembler::FAILURE));
- __ b(eq, &failure);
- __ cmp(r0, Operand(NativeRegExpMacroAssembler::EXCEPTION));
- // If not exception it can only be retry. Handle that in the runtime system.
- __ b(ne, &runtime);
- // Result must now be exception. If there is no pending exception already a
- // stack overflow (on the backtrack stack) was detected in RegExp code but
- // haven't created the exception yet. Handle that in the runtime system.
- // TODO(592): Rerunning the RegExp to get the stack overflow exception.
- __ mov(r1, Operand(isolate->factory()->the_hole_value()));
- __ mov(r2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
- isolate)));
- __ ldr(r0, MemOperand(r2, 0));
- __ cmp(r0, r1);
- __ b(eq, &runtime);
-
- __ str(r1, MemOperand(r2, 0)); // Clear pending exception.
-
- // Check if the exception is a termination. If so, throw as uncatchable.
- __ CompareRoot(r0, Heap::kTerminationExceptionRootIndex);
-
- Label termination_exception;
- __ b(eq, &termination_exception);
-
- __ Throw(r0);
-
- __ bind(&termination_exception);
- __ ThrowUncatchable(r0);
-
- __ bind(&failure);
- // For failure and exception return null.
- __ mov(r0, Operand(masm->isolate()->factory()->null_value()));
- __ add(sp, sp, Operand(4 * kPointerSize));
- __ Ret();
-
- // Process the result from the native regexp code.
- __ bind(&success);
- __ ldr(r1,
- FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
- // Calculate number of capture registers (number_of_captures + 1) * 2.
- // Multiplying by 2 comes for free since r1 is smi-tagged.
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
- __ add(r1, r1, Operand(2)); // r1 was a smi.
-
- __ ldr(r0, MemOperand(sp, kLastMatchInfoOffset));
- __ JumpIfSmi(r0, &runtime);
- __ CompareObjectType(r0, r2, r2, JS_ARRAY_TYPE);
- __ b(ne, &runtime);
- // Check that the JSArray is in fast case.
- __ ldr(last_match_info_elements,
- FieldMemOperand(r0, JSArray::kElementsOffset));
- __ ldr(r0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
- __ CompareRoot(r0, Heap::kFixedArrayMapRootIndex);
- __ b(ne, &runtime);
- // Check that the last match info has space for the capture registers and the
- // additional information.
- __ ldr(r0,
- FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
- __ add(r2, r1, Operand(RegExpImpl::kLastMatchOverhead));
- __ cmp(r2, Operand(r0, ASR, kSmiTagSize));
- __ b(gt, &runtime);
-
- // r1: number of capture registers
- // r4: subject string
- // Store the capture count.
- __ mov(r2, Operand(r1, LSL, kSmiTagSize + kSmiShiftSize)); // To smi.
- __ str(r2, FieldMemOperand(last_match_info_elements,
- RegExpImpl::kLastCaptureCountOffset));
- // Store last subject and last input.
- __ str(subject,
- FieldMemOperand(last_match_info_elements,
- RegExpImpl::kLastSubjectOffset));
- __ mov(r2, subject);
- __ RecordWriteField(last_match_info_elements,
- RegExpImpl::kLastSubjectOffset,
- subject,
- r7,
- kLRHasNotBeenSaved,
- kDontSaveFPRegs);
- __ mov(subject, r2);
- __ str(subject,
- FieldMemOperand(last_match_info_elements,
- RegExpImpl::kLastInputOffset));
- __ RecordWriteField(last_match_info_elements,
- RegExpImpl::kLastInputOffset,
- subject,
- r7,
- kLRHasNotBeenSaved,
- kDontSaveFPRegs);
-
- // Get the static offsets vector filled by the native regexp code.
- ExternalReference address_of_static_offsets_vector =
- ExternalReference::address_of_static_offsets_vector(isolate);
- __ mov(r2, Operand(address_of_static_offsets_vector));
-
- // r1: number of capture registers
- // r2: offsets vector
- Label next_capture, done;
- // Capture register counter starts from number of capture registers and
- // counts down until wraping after zero.
- __ add(r0,
- last_match_info_elements,
- Operand(RegExpImpl::kFirstCaptureOffset - kHeapObjectTag));
- __ bind(&next_capture);
- __ sub(r1, r1, Operand(1), SetCC);
- __ b(mi, &done);
- // Read the value from the static offsets vector buffer.
- __ ldr(r3, MemOperand(r2, kPointerSize, PostIndex));
- // Store the smi value in the last match info.
- __ mov(r3, Operand(r3, LSL, kSmiTagSize));
- __ str(r3, MemOperand(r0, kPointerSize, PostIndex));
- __ jmp(&next_capture);
- __ bind(&done);
-
- // Return last match info.
- __ ldr(r0, MemOperand(sp, kLastMatchInfoOffset));
- __ add(sp, sp, Operand(4 * kPointerSize));
- __ Ret();
-
- // Do the runtime call to execute the regexp.
- __ bind(&runtime);
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
-
- // Deferred code for string handling.
- // (6) Not a long external string? If yes, go to (8).
- __ bind(&not_seq_nor_cons);
- // Compare flags are still set.
- __ b(gt, &not_long_external); // Go to (8).
-
- // (7) External string. Make it, offset-wise, look like a sequential string.
- __ bind(&external_string);
- __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
- __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
- if (FLAG_debug_code) {
- // Assert that we do not have a cons or slice (indirect strings) here.
- // Sequential strings have already been ruled out.
- __ tst(r0, Operand(kIsIndirectStringMask));
- __ Assert(eq, "external string expected, but not found");
- }
- __ ldr(subject,
- FieldMemOperand(subject, ExternalString::kResourceDataOffset));
- // Move the pointer so that offset-wise, it looks like a sequential string.
- STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
- __ sub(subject,
- subject,
- Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
- __ jmp(&seq_string); // Go to (5).
-
- // (8) Short external string or not a string? If yes, bail out to runtime.
- __ bind(&not_long_external);
- STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0);
- __ tst(r1, Operand(kIsNotStringMask | kShortExternalStringMask));
- __ b(ne, &runtime);
-
- // (9) Sliced string. Replace subject with parent. Go to (4).
- // Load offset into r9 and replace subject string with parent.
- __ ldr(r9, FieldMemOperand(subject, SlicedString::kOffsetOffset));
- __ mov(r9, Operand(r9, ASR, kSmiTagSize));
- __ ldr(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
- __ jmp(&check_underlying); // Go to (4).
-#endif // V8_INTERPRETED_REGEXP
-}
-
-
-void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
- const int kMaxInlineLength = 100;
- Label slowcase;
- Label done;
- Factory* factory = masm->isolate()->factory();
-
- __ ldr(r1, MemOperand(sp, kPointerSize * 2));
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize == 1);
- __ JumpIfNotSmi(r1, &slowcase);
- __ cmp(r1, Operand(Smi::FromInt(kMaxInlineLength)));
- __ b(hi, &slowcase);
- // Smi-tagging is equivalent to multiplying by 2.
- // Allocate RegExpResult followed by FixedArray with size in ebx.
- // JSArray: [Map][empty properties][Elements][Length-smi][index][input]
- // Elements: [Map][Length][..elements..]
- // Size of JSArray with two in-object properties and the header of a
- // FixedArray.
- int objects_size =
- (JSRegExpResult::kSize + FixedArray::kHeaderSize) / kPointerSize;
- __ mov(r5, Operand(r1, LSR, kSmiTagSize + kSmiShiftSize));
- __ add(r2, r5, Operand(objects_size));
- __ AllocateInNewSpace(
- r2, // In: Size, in words.
- r0, // Out: Start of allocation (tagged).
- r3, // Scratch register.
- r4, // Scratch register.
- &slowcase,
- static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
- // r0: Start of allocated area, object-tagged.
- // r1: Number of elements in array, as smi.
- // r5: Number of elements, untagged.
-
- // Set JSArray map to global.regexp_result_map().
- // Set empty properties FixedArray.
- // Set elements to point to FixedArray allocated right after the JSArray.
- // Interleave operations for better latency.
- __ ldr(r2, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
- __ add(r3, r0, Operand(JSRegExpResult::kSize));
- __ mov(r4, Operand(factory->empty_fixed_array()));
- __ ldr(r2, FieldMemOperand(r2, GlobalObject::kNativeContextOffset));
- __ str(r3, FieldMemOperand(r0, JSObject::kElementsOffset));
- __ ldr(r2, ContextOperand(r2, Context::REGEXP_RESULT_MAP_INDEX));
- __ str(r4, FieldMemOperand(r0, JSObject::kPropertiesOffset));
- __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
-
- // Set input, index and length fields from arguments.
- __ ldr(r1, MemOperand(sp, kPointerSize * 0));
- __ ldr(r2, MemOperand(sp, kPointerSize * 1));
- __ ldr(r6, MemOperand(sp, kPointerSize * 2));
- __ str(r1, FieldMemOperand(r0, JSRegExpResult::kInputOffset));
- __ str(r2, FieldMemOperand(r0, JSRegExpResult::kIndexOffset));
- __ str(r6, FieldMemOperand(r0, JSArray::kLengthOffset));
-
- // Fill out the elements FixedArray.
- // r0: JSArray, tagged.
- // r3: FixedArray, tagged.
- // r5: Number of elements in array, untagged.
-
- // Set map.
- __ mov(r2, Operand(factory->fixed_array_map()));
- __ str(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
- // Set FixedArray length.
- __ mov(r6, Operand(r5, LSL, kSmiTagSize));
- __ str(r6, FieldMemOperand(r3, FixedArray::kLengthOffset));
- // Fill contents of fixed-array with undefined.
- __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
- __ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- // Fill fixed array elements with undefined.
- // r0: JSArray, tagged.
- // r2: undefined.
- // r3: Start of elements in FixedArray.
- // r5: Number of elements to fill.
- Label loop;
- __ cmp(r5, Operand::Zero());
- __ bind(&loop);
- __ b(le, &done); // Jump if r5 is negative or zero.
- __ sub(r5, r5, Operand(1), SetCC);
- __ str(r2, MemOperand(r3, r5, LSL, kPointerSizeLog2));
- __ jmp(&loop);
-
- __ bind(&done);
- __ add(sp, sp, Operand(3 * kPointerSize));
- __ Ret();
-
- __ bind(&slowcase);
- __ TailCallRuntime(Runtime::kRegExpConstructResult, 3, 1);
-}
-
-
-static void GenerateRecordCallTargetNoArray(MacroAssembler* masm) {
- // Cache the called function in a global property cell. Cache states
- // are uninitialized, monomorphic (indicated by a JSFunction), and
- // megamorphic.
- // r1 : the function to call
- // r2 : cache cell for call target
- ASSERT(!FLAG_optimize_constructed_arrays);
- Label done;
-
- ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()),
- masm->isolate()->heap()->undefined_value());
- ASSERT_EQ(*TypeFeedbackCells::UninitializedSentinel(masm->isolate()),
- masm->isolate()->heap()->the_hole_value());
-
- // Load the cache state into r3.
- __ ldr(r3, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset));
-
- // A monomorphic cache hit or an already megamorphic state: invoke the
- // function without changing the state.
- __ cmp(r3, r1);
- __ b(eq, &done);
- __ CompareRoot(r3, Heap::kUndefinedValueRootIndex);
- __ b(eq, &done);
-
- // A monomorphic miss (i.e, here the cache is not uninitialized) goes
- // megamorphic.
- __ CompareRoot(r3, Heap::kTheHoleValueRootIndex);
- // MegamorphicSentinel is an immortal immovable object (undefined) so no
- // write-barrier is needed.
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex, ne);
- __ str(ip, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset), ne);
-
- // An uninitialized cache is patched with the function.
- __ str(r1, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset), eq);
- // No need for a write barrier here - cells are rescanned.
-
- __ bind(&done);
-}
-
-
-static void GenerateRecordCallTarget(MacroAssembler* masm) {
- // Cache the called function in a global property cell. Cache states
- // are uninitialized, monomorphic (indicated by a JSFunction), and
- // megamorphic.
- // r1 : the function to call
- // r2 : cache cell for call target
- ASSERT(FLAG_optimize_constructed_arrays);
- Label initialize, done, miss, megamorphic, not_array_function;
-
- ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()),
- masm->isolate()->heap()->undefined_value());
- ASSERT_EQ(*TypeFeedbackCells::UninitializedSentinel(masm->isolate()),
- masm->isolate()->heap()->the_hole_value());
-
- // Load the cache state into r3.
- __ ldr(r3, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset));
-
- // A monomorphic cache hit or an already megamorphic state: invoke the
- // function without changing the state.
- __ cmp(r3, r1);
- __ b(eq, &done);
- __ CompareRoot(r3, Heap::kUndefinedValueRootIndex);
- __ b(eq, &done);
-
- // Special handling of the Array() function, which caches not only the
- // monomorphic Array function but the initial ElementsKind with special
- // sentinels
- Handle<Object> terminal_kind_sentinel =
- TypeFeedbackCells::MonomorphicArraySentinel(masm->isolate(),
- LAST_FAST_ELEMENTS_KIND);
- __ cmp(r3, Operand(terminal_kind_sentinel));
- __ b(ne, &miss);
- // Make sure the function is the Array() function
- __ LoadArrayFunction(r3);
- __ cmp(r1, r3);
- __ b(ne, &megamorphic);
- __ jmp(&done);
-
- __ bind(&miss);
-
- // A monomorphic miss (i.e, here the cache is not uninitialized) goes
- // megamorphic.
- __ CompareRoot(r3, Heap::kTheHoleValueRootIndex);
- __ b(eq, &initialize);
- // MegamorphicSentinel is an immortal immovable object (undefined) so no
- // write-barrier is needed.
- __ bind(&megamorphic);
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ str(ip, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset));
-
- // An uninitialized cache is patched with the function or sentinel to
- // indicate the ElementsKind if function is the Array constructor.
- __ bind(&initialize);
- // Make sure the function is the Array() function
- __ LoadArrayFunction(r3);
- __ cmp(r1, r3);
- __ b(ne, &not_array_function);
-
- // The target function is the Array constructor, install a sentinel value in
- // the constructor's type info cell that will track the initial ElementsKind
- // that should be used for the array when its constructed.
- Handle<Object> initial_kind_sentinel =
- TypeFeedbackCells::MonomorphicArraySentinel(masm->isolate(),
- GetInitialFastElementsKind());
- __ mov(r3, Operand(initial_kind_sentinel));
- __ str(r3, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset));
- __ b(&done);
-
- __ bind(&not_array_function);
- __ str(r1, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset));
- // No need for a write barrier here - cells are rescanned.
-
- __ bind(&done);
-}
-
-
-void CallFunctionStub::Generate(MacroAssembler* masm) {
- // r1 : the function to call
- // r2 : cache cell for call target
- Label slow, non_function;
-
- // The receiver might implicitly be the global object. This is
- // indicated by passing the hole as the receiver to the call
- // function stub.
- if (ReceiverMightBeImplicit()) {
- Label call;
- // Get the receiver from the stack.
- // function, receiver [, arguments]
- __ ldr(r4, MemOperand(sp, argc_ * kPointerSize));
- // Call as function is indicated with the hole.
- __ CompareRoot(r4, Heap::kTheHoleValueRootIndex);
- __ b(ne, &call);
- // Patch the receiver on the stack with the global receiver object.
- __ ldr(r3,
- MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ ldr(r3, FieldMemOperand(r3, GlobalObject::kGlobalReceiverOffset));
- __ str(r3, MemOperand(sp, argc_ * kPointerSize));
- __ bind(&call);
- }
-
- // Check that the function is really a JavaScript function.
- // r1: pushed function (to be verified)
- __ JumpIfSmi(r1, &non_function);
- // Get the map of the function object.
- __ CompareObjectType(r1, r3, r3, JS_FUNCTION_TYPE);
- __ b(ne, &slow);
-
- if (RecordCallTarget()) {
- if (FLAG_optimize_constructed_arrays) {
- GenerateRecordCallTarget(masm);
- } else {
- GenerateRecordCallTargetNoArray(masm);
- }
- }
-
- // Fast-case: Invoke the function now.
- // r1: pushed function
- ParameterCount actual(argc_);
-
- if (ReceiverMightBeImplicit()) {
- Label call_as_function;
- __ CompareRoot(r4, Heap::kTheHoleValueRootIndex);
- __ b(eq, &call_as_function);
- __ InvokeFunction(r1,
- actual,
- JUMP_FUNCTION,
- NullCallWrapper(),
- CALL_AS_METHOD);
- __ bind(&call_as_function);
- }
- __ InvokeFunction(r1,
- actual,
- JUMP_FUNCTION,
- NullCallWrapper(),
- CALL_AS_FUNCTION);
-
- // Slow-case: Non-function called.
- __ bind(&slow);
- if (RecordCallTarget()) {
- // If there is a call target cache, mark it megamorphic in the
- // non-function case. MegamorphicSentinel is an immortal immovable
- // object (undefined) so no write barrier is needed.
- ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()),
- masm->isolate()->heap()->undefined_value());
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ str(ip, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset));
- }
- // Check for function proxy.
- __ cmp(r3, Operand(JS_FUNCTION_PROXY_TYPE));
- __ b(ne, &non_function);
- __ push(r1); // put proxy as additional argument
- __ mov(r0, Operand(argc_ + 1, RelocInfo::NONE32));
- __ mov(r2, Operand::Zero());
- __ GetBuiltinEntry(r3, Builtins::CALL_FUNCTION_PROXY);
- __ SetCallKind(r5, CALL_AS_METHOD);
- {
- Handle<Code> adaptor =
- masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
- __ Jump(adaptor, RelocInfo::CODE_TARGET);
- }
-
- // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
- // of the original receiver from the call site).
- __ bind(&non_function);
- __ str(r1, MemOperand(sp, argc_ * kPointerSize));
- __ mov(r0, Operand(argc_)); // Set up the number of arguments.
- __ mov(r2, Operand::Zero());
- __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION);
- __ SetCallKind(r5, CALL_AS_METHOD);
- __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
- RelocInfo::CODE_TARGET);
-}
-
-
-void CallConstructStub::Generate(MacroAssembler* masm) {
- // r0 : number of arguments
- // r1 : the function to call
- // r2 : cache cell for call target
- Label slow, non_function_call;
-
- // Check that the function is not a smi.
- __ JumpIfSmi(r1, &non_function_call);
- // Check that the function is a JSFunction.
- __ CompareObjectType(r1, r3, r3, JS_FUNCTION_TYPE);
- __ b(ne, &slow);
-
- if (RecordCallTarget()) {
- if (FLAG_optimize_constructed_arrays) {
- GenerateRecordCallTarget(masm);
- } else {
- GenerateRecordCallTargetNoArray(masm);
- }
- }
-
- // Jump to the function-specific construct stub.
- Register jmp_reg = FLAG_optimize_constructed_arrays ? r3 : r2;
- __ ldr(jmp_reg, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
- __ ldr(jmp_reg, FieldMemOperand(jmp_reg,
- SharedFunctionInfo::kConstructStubOffset));
- __ add(pc, jmp_reg, Operand(Code::kHeaderSize - kHeapObjectTag));
-
- // r0: number of arguments
- // r1: called object
- // r3: object type
- Label do_call;
- __ bind(&slow);
- __ cmp(r3, Operand(JS_FUNCTION_PROXY_TYPE));
- __ b(ne, &non_function_call);
- __ GetBuiltinEntry(r3, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
- __ jmp(&do_call);
-
- __ bind(&non_function_call);
- __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
- __ bind(&do_call);
- // Set expected number of arguments to zero (not changing r0).
- __ mov(r2, Operand::Zero());
- __ SetCallKind(r5, CALL_AS_METHOD);
- __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
- RelocInfo::CODE_TARGET);
-}
-
-
-// StringCharCodeAtGenerator
-void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
- Label flat_string;
- Label ascii_string;
- Label got_char_code;
- Label sliced_string;
-
- // If the receiver is a smi trigger the non-string case.
- __ JumpIfSmi(object_, receiver_not_string_);
-
- // Fetch the instance type of the receiver into result register.
- __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
- __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
- // If the receiver is not a string trigger the non-string case.
- __ tst(result_, Operand(kIsNotStringMask));
- __ b(ne, receiver_not_string_);
-
- // If the index is non-smi trigger the non-smi case.
- __ JumpIfNotSmi(index_, &index_not_smi_);
- __ bind(&got_smi_index_);
-
- // Check for index out of range.
- __ ldr(ip, FieldMemOperand(object_, String::kLengthOffset));
- __ cmp(ip, Operand(index_));
- __ b(ls, index_out_of_range_);
-
- __ mov(index_, Operand(index_, ASR, kSmiTagSize));
-
- StringCharLoadGenerator::Generate(masm,
- object_,
- index_,
- result_,
- &call_runtime_);
-
- __ mov(result_, Operand(result_, LSL, kSmiTagSize));
- __ bind(&exit_);
-}
-
-
-void StringCharCodeAtGenerator::GenerateSlow(
- MacroAssembler* masm,
- const RuntimeCallHelper& call_helper) {
- __ Abort("Unexpected fallthrough to CharCodeAt slow case");
-
- // Index is not a smi.
- __ bind(&index_not_smi_);
- // If index is a heap number, try converting it to an integer.
- __ CheckMap(index_,
- result_,
- Heap::kHeapNumberMapRootIndex,
- index_not_number_,
- DONT_DO_SMI_CHECK);
- call_helper.BeforeCall(masm);
- __ push(object_);
- __ push(index_); // Consumed by runtime conversion function.
- if (index_flags_ == STRING_INDEX_IS_NUMBER) {
- __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
- } else {
- ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
- // NumberToSmi discards numbers that are not exact integers.
- __ CallRuntime(Runtime::kNumberToSmi, 1);
- }
- // Save the conversion result before the pop instructions below
- // have a chance to overwrite it.
- __ Move(index_, r0);
- __ pop(object_);
- // Reload the instance type.
- __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
- __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
- call_helper.AfterCall(masm);
- // If index is still not a smi, it must be out of range.
- __ JumpIfNotSmi(index_, index_out_of_range_);
- // Otherwise, return to the fast path.
- __ jmp(&got_smi_index_);
-
- // Call runtime. We get here when the receiver is a string and the
- // index is a number, but the code of getting the actual character
- // is too complex (e.g., when the string needs to be flattened).
- __ bind(&call_runtime_);
- call_helper.BeforeCall(masm);
- __ mov(index_, Operand(index_, LSL, kSmiTagSize));
- __ Push(object_, index_);
- __ CallRuntime(Runtime::kStringCharCodeAt, 2);
- __ Move(result_, r0);
- call_helper.AfterCall(masm);
- __ jmp(&exit_);
-
- __ Abort("Unexpected fallthrough from CharCodeAt slow case");
-}
-
-
-// -------------------------------------------------------------------------
-// StringCharFromCodeGenerator
-
-void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
- // Fast case of Heap::LookupSingleCharacterStringFromCode.
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiShiftSize == 0);
- ASSERT(IsPowerOf2(String::kMaxOneByteCharCode + 1));
- __ tst(code_,
- Operand(kSmiTagMask |
- ((~String::kMaxOneByteCharCode) << kSmiTagSize)));
- __ b(ne, &slow_case_);
-
- __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
- // At this point code register contains smi tagged ASCII char code.
- STATIC_ASSERT(kSmiTag == 0);
- __ add(result_, result_, Operand(code_, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ ldr(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
- __ CompareRoot(result_, Heap::kUndefinedValueRootIndex);
- __ b(eq, &slow_case_);
- __ bind(&exit_);
-}
-
-
-void StringCharFromCodeGenerator::GenerateSlow(
- MacroAssembler* masm,
- const RuntimeCallHelper& call_helper) {
- __ Abort("Unexpected fallthrough to CharFromCode slow case");
-
- __ bind(&slow_case_);
- call_helper.BeforeCall(masm);
- __ push(code_);
- __ CallRuntime(Runtime::kCharFromCode, 1);
- __ Move(result_, r0);
- call_helper.AfterCall(masm);
- __ jmp(&exit_);
-
- __ Abort("Unexpected fallthrough from CharFromCode slow case");
-}
-
-
-void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
- Register dest,
- Register src,
- Register count,
- Register scratch,
- bool ascii) {
- Label loop;
- Label done;
- // This loop just copies one character at a time, as it is only used for very
- // short strings.
- if (!ascii) {
- __ add(count, count, Operand(count), SetCC);
- } else {
- __ cmp(count, Operand::Zero());
- }
- __ b(eq, &done);
-
- __ bind(&loop);
- __ ldrb(scratch, MemOperand(src, 1, PostIndex));
- // Perform sub between load and dependent store to get the load time to
- // complete.
- __ sub(count, count, Operand(1), SetCC);
- __ strb(scratch, MemOperand(dest, 1, PostIndex));
- // last iteration.
- __ b(gt, &loop);
-
- __ bind(&done);
-}
-
-
-enum CopyCharactersFlags {
- COPY_ASCII = 1,
- DEST_ALWAYS_ALIGNED = 2
-};
-
-
-void StringHelper::GenerateCopyCharactersLong(MacroAssembler* masm,
- Register dest,
- Register src,
- Register count,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Register scratch5,
- int flags) {
- bool ascii = (flags & COPY_ASCII) != 0;
- bool dest_always_aligned = (flags & DEST_ALWAYS_ALIGNED) != 0;
-
- if (dest_always_aligned && FLAG_debug_code) {
- // Check that destination is actually word aligned if the flag says
- // that it is.
- __ tst(dest, Operand(kPointerAlignmentMask));
- __ Check(eq, "Destination of copy not aligned.");
- }
-
- const int kReadAlignment = 4;
- const int kReadAlignmentMask = kReadAlignment - 1;
- // Ensure that reading an entire aligned word containing the last character
- // of a string will not read outside the allocated area (because we pad up
- // to kObjectAlignment).
- STATIC_ASSERT(kObjectAlignment >= kReadAlignment);
- // Assumes word reads and writes are little endian.
- // Nothing to do for zero characters.
- Label done;
- if (!ascii) {
- __ add(count, count, Operand(count), SetCC);
- } else {
- __ cmp(count, Operand::Zero());
- }
- __ b(eq, &done);
-
- // Assume that you cannot read (or write) unaligned.
- Label byte_loop;
- // Must copy at least eight bytes, otherwise just do it one byte at a time.
- __ cmp(count, Operand(8));
- __ add(count, dest, Operand(count));
- Register limit = count; // Read until src equals this.
- __ b(lt, &byte_loop);
-
- if (!dest_always_aligned) {
- // Align dest by byte copying. Copies between zero and three bytes.
- __ and_(scratch4, dest, Operand(kReadAlignmentMask), SetCC);
- Label dest_aligned;
- __ b(eq, &dest_aligned);
- __ cmp(scratch4, Operand(2));
- __ ldrb(scratch1, MemOperand(src, 1, PostIndex));
- __ ldrb(scratch2, MemOperand(src, 1, PostIndex), le);
- __ ldrb(scratch3, MemOperand(src, 1, PostIndex), lt);
- __ strb(scratch1, MemOperand(dest, 1, PostIndex));
- __ strb(scratch2, MemOperand(dest, 1, PostIndex), le);
- __ strb(scratch3, MemOperand(dest, 1, PostIndex), lt);
- __ bind(&dest_aligned);
- }
-
- Label simple_loop;
-
- __ sub(scratch4, dest, Operand(src));
- __ and_(scratch4, scratch4, Operand(0x03), SetCC);
- __ b(eq, &simple_loop);
- // Shift register is number of bits in a source word that
- // must be combined with bits in the next source word in order
- // to create a destination word.
-
- // Complex loop for src/dst that are not aligned the same way.
- {
- Label loop;
- __ mov(scratch4, Operand(scratch4, LSL, 3));
- Register left_shift = scratch4;
- __ and_(src, src, Operand(~3)); // Round down to load previous word.
- __ ldr(scratch1, MemOperand(src, 4, PostIndex));
- // Store the "shift" most significant bits of scratch in the least
- // signficant bits (i.e., shift down by (32-shift)).
- __ rsb(scratch2, left_shift, Operand(32));
- Register right_shift = scratch2;
- __ mov(scratch1, Operand(scratch1, LSR, right_shift));
-
- __ bind(&loop);
- __ ldr(scratch3, MemOperand(src, 4, PostIndex));
- __ sub(scratch5, limit, Operand(dest));
- __ orr(scratch1, scratch1, Operand(scratch3, LSL, left_shift));
- __ str(scratch1, MemOperand(dest, 4, PostIndex));
- __ mov(scratch1, Operand(scratch3, LSR, right_shift));
- // Loop if four or more bytes left to copy.
- // Compare to eight, because we did the subtract before increasing dst.
- __ sub(scratch5, scratch5, Operand(8), SetCC);
- __ b(ge, &loop);
- }
- // There is now between zero and three bytes left to copy (negative that
- // number is in scratch5), and between one and three bytes already read into
- // scratch1 (eight times that number in scratch4). We may have read past
- // the end of the string, but because objects are aligned, we have not read
- // past the end of the object.
- // Find the minimum of remaining characters to move and preloaded characters
- // and write those as bytes.
- __ add(scratch5, scratch5, Operand(4), SetCC);
- __ b(eq, &done);
- __ cmp(scratch4, Operand(scratch5, LSL, 3), ne);
- // Move minimum of bytes read and bytes left to copy to scratch4.
- __ mov(scratch5, Operand(scratch4, LSR, 3), LeaveCC, lt);
- // Between one and three (value in scratch5) characters already read into
- // scratch ready to write.
- __ cmp(scratch5, Operand(2));
- __ strb(scratch1, MemOperand(dest, 1, PostIndex));
- __ mov(scratch1, Operand(scratch1, LSR, 8), LeaveCC, ge);
- __ strb(scratch1, MemOperand(dest, 1, PostIndex), ge);
- __ mov(scratch1, Operand(scratch1, LSR, 8), LeaveCC, gt);
- __ strb(scratch1, MemOperand(dest, 1, PostIndex), gt);
- // Copy any remaining bytes.
- __ b(&byte_loop);
-
- // Simple loop.
- // Copy words from src to dst, until less than four bytes left.
- // Both src and dest are word aligned.
- __ bind(&simple_loop);
- {
- Label loop;
- __ bind(&loop);
- __ ldr(scratch1, MemOperand(src, 4, PostIndex));
- __ sub(scratch3, limit, Operand(dest));
- __ str(scratch1, MemOperand(dest, 4, PostIndex));
- // Compare to 8, not 4, because we do the substraction before increasing
- // dest.
- __ cmp(scratch3, Operand(8));
- __ b(ge, &loop);
- }
-
- // Copy bytes from src to dst until dst hits limit.
- __ bind(&byte_loop);
- __ cmp(dest, Operand(limit));
- __ ldrb(scratch1, MemOperand(src, 1, PostIndex), lt);
- __ b(ge, &done);
- __ strb(scratch1, MemOperand(dest, 1, PostIndex));
- __ b(&byte_loop);
-
- __ bind(&done);
-}
-
-
-void StringHelper::GenerateTwoCharacterStringTableProbe(MacroAssembler* masm,
- Register c1,
- Register c2,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Register scratch5,
- Label* not_found) {
- // Register scratch3 is the general scratch register in this function.
- Register scratch = scratch3;
-
- // Make sure that both characters are not digits as such strings has a
- // different hash algorithm. Don't try to look for these in the string table.
- Label not_array_index;
- __ sub(scratch, c1, Operand(static_cast<int>('0')));
- __ cmp(scratch, Operand(static_cast<int>('9' - '0')));
- __ b(hi, &not_array_index);
- __ sub(scratch, c2, Operand(static_cast<int>('0')));
- __ cmp(scratch, Operand(static_cast<int>('9' - '0')));
-
- // If check failed combine both characters into single halfword.
- // This is required by the contract of the method: code at the
- // not_found branch expects this combination in c1 register
- __ orr(c1, c1, Operand(c2, LSL, kBitsPerByte), LeaveCC, ls);
- __ b(ls, not_found);
-
- __ bind(&not_array_index);
- // Calculate the two character string hash.
- Register hash = scratch1;
- StringHelper::GenerateHashInit(masm, hash, c1);
- StringHelper::GenerateHashAddCharacter(masm, hash, c2);
- StringHelper::GenerateHashGetHash(masm, hash);
-
- // Collect the two characters in a register.
- Register chars = c1;
- __ orr(chars, chars, Operand(c2, LSL, kBitsPerByte));
-
- // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
- // hash: hash of two character string.
-
- // Load string table
- // Load address of first element of the string table.
- Register string_table = c2;
- __ LoadRoot(string_table, Heap::kStringTableRootIndex);
-
- Register undefined = scratch4;
- __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
-
- // Calculate capacity mask from the string table capacity.
- Register mask = scratch2;
- __ ldr(mask, FieldMemOperand(string_table, StringTable::kCapacityOffset));
- __ mov(mask, Operand(mask, ASR, 1));
- __ sub(mask, mask, Operand(1));
-
- // Calculate untagged address of the first element of the string table.
- Register first_string_table_element = string_table;
- __ add(first_string_table_element, string_table,
- Operand(StringTable::kElementsStartOffset - kHeapObjectTag));
-
- // Registers
- // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
- // hash: hash of two character string
- // mask: capacity mask
- // first_string_table_element: address of the first element of
- // the string table
- // undefined: the undefined object
- // scratch: -
-
- // Perform a number of probes in the string table.
- const int kProbes = 4;
- Label found_in_string_table;
- Label next_probe[kProbes];
- Register candidate = scratch5; // Scratch register contains candidate.
- for (int i = 0; i < kProbes; i++) {
- // Calculate entry in string table.
- if (i > 0) {
- __ add(candidate, hash, Operand(StringTable::GetProbeOffset(i)));
- } else {
- __ mov(candidate, hash);
- }
-
- __ and_(candidate, candidate, Operand(mask));
-
- // Load the entry from the symble table.
- STATIC_ASSERT(StringTable::kEntrySize == 1);
- __ ldr(candidate,
- MemOperand(first_string_table_element,
- candidate,
- LSL,
- kPointerSizeLog2));
-
- // If entry is undefined no string with this hash can be found.
- Label is_string;
- __ CompareObjectType(candidate, scratch, scratch, ODDBALL_TYPE);
- __ b(ne, &is_string);
-
- __ cmp(undefined, candidate);
- __ b(eq, not_found);
- // Must be the hole (deleted entry).
- if (FLAG_debug_code) {
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ cmp(ip, candidate);
- __ Assert(eq, "oddball in string table is not undefined or the hole");
- }
- __ jmp(&next_probe[i]);
-
- __ bind(&is_string);
-
- // Check that the candidate is a non-external ASCII string. The instance
- // type is still in the scratch register from the CompareObjectType
- // operation.
- __ JumpIfInstanceTypeIsNotSequentialAscii(scratch, scratch, &next_probe[i]);
-
- // If length is not 2 the string is not a candidate.
- __ ldr(scratch, FieldMemOperand(candidate, String::kLengthOffset));
- __ cmp(scratch, Operand(Smi::FromInt(2)));
- __ b(ne, &next_probe[i]);
-
- // Check if the two characters match.
- // Assumes that word load is little endian.
- __ ldrh(scratch, FieldMemOperand(candidate, SeqOneByteString::kHeaderSize));
- __ cmp(chars, scratch);
- __ b(eq, &found_in_string_table);
- __ bind(&next_probe[i]);
- }
-
- // No matching 2 character string found by probing.
- __ jmp(not_found);
-
- // Scratch register contains result when we fall through to here.
- Register result = candidate;
- __ bind(&found_in_string_table);
- __ Move(r0, result);
-}
-
-
-void StringHelper::GenerateHashInit(MacroAssembler* masm,
- Register hash,
- Register character) {
- // hash = character + (character << 10);
- __ LoadRoot(hash, Heap::kHashSeedRootIndex);
- // Untag smi seed and add the character.
- __ add(hash, character, Operand(hash, LSR, kSmiTagSize));
- // hash += hash << 10;
- __ add(hash, hash, Operand(hash, LSL, 10));
- // hash ^= hash >> 6;
- __ eor(hash, hash, Operand(hash, LSR, 6));
-}
-
-
-void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
- Register hash,
- Register character) {
- // hash += character;
- __ add(hash, hash, Operand(character));
- // hash += hash << 10;
- __ add(hash, hash, Operand(hash, LSL, 10));
- // hash ^= hash >> 6;
- __ eor(hash, hash, Operand(hash, LSR, 6));
-}
-
-
-void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
- Register hash) {
- // hash += hash << 3;
- __ add(hash, hash, Operand(hash, LSL, 3));
- // hash ^= hash >> 11;
- __ eor(hash, hash, Operand(hash, LSR, 11));
- // hash += hash << 15;
- __ add(hash, hash, Operand(hash, LSL, 15));
-
- __ and_(hash, hash, Operand(String::kHashBitMask), SetCC);
-
- // if (hash == 0) hash = 27;
- __ mov(hash, Operand(StringHasher::kZeroHash), LeaveCC, eq);
-}
-
-
-void SubStringStub::Generate(MacroAssembler* masm) {
- Label runtime;
-
- // Stack frame on entry.
- // lr: return address
- // sp[0]: to
- // sp[4]: from
- // sp[8]: string
-
- // This stub is called from the native-call %_SubString(...), so
- // nothing can be assumed about the arguments. It is tested that:
- // "string" is a sequential string,
- // both "from" and "to" are smis, and
- // 0 <= from <= to <= string.length.
- // If any of these assumptions fail, we call the runtime system.
-
- const int kToOffset = 0 * kPointerSize;
- const int kFromOffset = 1 * kPointerSize;
- const int kStringOffset = 2 * kPointerSize;
-
- __ Ldrd(r2, r3, MemOperand(sp, kToOffset));
- STATIC_ASSERT(kFromOffset == kToOffset + 4);
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
-
- // Arithmetic shift right by one un-smi-tags. In this case we rotate right
- // instead because we bail out on non-smi values: ROR and ASR are equivalent
- // for smis but they set the flags in a way that's easier to optimize.
- __ mov(r2, Operand(r2, ROR, 1), SetCC);
- __ mov(r3, Operand(r3, ROR, 1), SetCC, cc);
- // If either to or from had the smi tag bit set, then C is set now, and N
- // has the same value: we rotated by 1, so the bottom bit is now the top bit.
- // We want to bailout to runtime here if From is negative. In that case, the
- // next instruction is not executed and we fall through to bailing out to
- // runtime.
- // Executed if both r2 and r3 are untagged integers.
- __ sub(r2, r2, Operand(r3), SetCC, cc);
- // One of the above un-smis or the above SUB could have set N==1.
- __ b(mi, &runtime); // Either "from" or "to" is not an smi, or from > to.
-
- // Make sure first argument is a string.
- __ ldr(r0, MemOperand(sp, kStringOffset));
- STATIC_ASSERT(kSmiTag == 0);
- // Do a JumpIfSmi, but fold its jump into the subsequent string test.
- __ tst(r0, Operand(kSmiTagMask));
- Condition is_string = masm->IsObjectStringType(r0, r1, ne);
- ASSERT(is_string == eq);
- __ b(NegateCondition(is_string), &runtime);
-
- Label single_char;
- __ cmp(r2, Operand(1));
- __ b(eq, &single_char);
-
- // Short-cut for the case of trivial substring.
- Label return_r0;
- // r0: original string
- // r2: result string length
- __ ldr(r4, FieldMemOperand(r0, String::kLengthOffset));
- __ cmp(r2, Operand(r4, ASR, 1));
- // Return original string.
- __ b(eq, &return_r0);
- // Longer than original string's length or negative: unsafe arguments.
- __ b(hi, &runtime);
- // Shorter than original string's length: an actual substring.
-
- // Deal with different string types: update the index if necessary
- // and put the underlying string into r5.
- // r0: original string
- // r1: instance type
- // r2: length
- // r3: from index (untagged)
- Label underlying_unpacked, sliced_string, seq_or_external_string;
- // If the string is not indirect, it can only be sequential or external.
- STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
- STATIC_ASSERT(kIsIndirectStringMask != 0);
- __ tst(r1, Operand(kIsIndirectStringMask));
- __ b(eq, &seq_or_external_string);
-
- __ tst(r1, Operand(kSlicedNotConsMask));
- __ b(ne, &sliced_string);
- // Cons string. Check whether it is flat, then fetch first part.
- __ ldr(r5, FieldMemOperand(r0, ConsString::kSecondOffset));
- __ CompareRoot(r5, Heap::kempty_stringRootIndex);
- __ b(ne, &runtime);
- __ ldr(r5, FieldMemOperand(r0, ConsString::kFirstOffset));
- // Update instance type.
- __ ldr(r1, FieldMemOperand(r5, HeapObject::kMapOffset));
- __ ldrb(r1, FieldMemOperand(r1, Map::kInstanceTypeOffset));
- __ jmp(&underlying_unpacked);
-
- __ bind(&sliced_string);
- // Sliced string. Fetch parent and correct start index by offset.
- __ ldr(r5, FieldMemOperand(r0, SlicedString::kParentOffset));
- __ ldr(r4, FieldMemOperand(r0, SlicedString::kOffsetOffset));
- __ add(r3, r3, Operand(r4, ASR, 1)); // Add offset to index.
- // Update instance type.
- __ ldr(r1, FieldMemOperand(r5, HeapObject::kMapOffset));
- __ ldrb(r1, FieldMemOperand(r1, Map::kInstanceTypeOffset));
- __ jmp(&underlying_unpacked);
-
- __ bind(&seq_or_external_string);
- // Sequential or external string. Just move string to the expected register.
- __ mov(r5, r0);
-
- __ bind(&underlying_unpacked);
-
- if (FLAG_string_slices) {
- Label copy_routine;
- // r5: underlying subject string
- // r1: instance type of underlying subject string
- // r2: length
- // r3: adjusted start index (untagged)
- __ cmp(r2, Operand(SlicedString::kMinLength));
- // Short slice. Copy instead of slicing.
- __ b(lt, &copy_routine);
- // Allocate new sliced string. At this point we do not reload the instance
- // type including the string encoding because we simply rely on the info
- // provided by the original string. It does not matter if the original
- // string's encoding is wrong because we always have to recheck encoding of
- // the newly created string's parent anyways due to externalized strings.
- Label two_byte_slice, set_slice_header;
- STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
- STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
- __ tst(r1, Operand(kStringEncodingMask));
- __ b(eq, &two_byte_slice);
- __ AllocateAsciiSlicedString(r0, r2, r6, r7, &runtime);
- __ jmp(&set_slice_header);
- __ bind(&two_byte_slice);
- __ AllocateTwoByteSlicedString(r0, r2, r6, r7, &runtime);
- __ bind(&set_slice_header);
- __ mov(r3, Operand(r3, LSL, 1));
- __ str(r5, FieldMemOperand(r0, SlicedString::kParentOffset));
- __ str(r3, FieldMemOperand(r0, SlicedString::kOffsetOffset));
- __ jmp(&return_r0);
-
- __ bind(&copy_routine);
- }
-
- // r5: underlying subject string
- // r1: instance type of underlying subject string
- // r2: length
- // r3: adjusted start index (untagged)
- Label two_byte_sequential, sequential_string, allocate_result;
- STATIC_ASSERT(kExternalStringTag != 0);
- STATIC_ASSERT(kSeqStringTag == 0);
- __ tst(r1, Operand(kExternalStringTag));
- __ b(eq, &sequential_string);
-
- // Handle external string.
- // Rule out short external strings.
- STATIC_CHECK(kShortExternalStringTag != 0);
- __ tst(r1, Operand(kShortExternalStringTag));
- __ b(ne, &runtime);
- __ ldr(r5, FieldMemOperand(r5, ExternalString::kResourceDataOffset));
- // r5 already points to the first character of underlying string.
- __ jmp(&allocate_result);
-
- __ bind(&sequential_string);
- // Locate first character of underlying subject string.
- STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
- __ add(r5, r5, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
-
- __ bind(&allocate_result);
- // Sequential acii string. Allocate the result.
- STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
- __ tst(r1, Operand(kStringEncodingMask));
- __ b(eq, &two_byte_sequential);
-
- // Allocate and copy the resulting ASCII string.
- __ AllocateAsciiString(r0, r2, r4, r6, r7, &runtime);
-
- // Locate first character of substring to copy.
- __ add(r5, r5, r3);
- // Locate first character of result.
- __ add(r1, r0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
-
- // r0: result string
- // r1: first character of result string
- // r2: result string length
- // r5: first character of substring to copy
- STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
- StringHelper::GenerateCopyCharactersLong(masm, r1, r5, r2, r3, r4, r6, r7, r9,
- COPY_ASCII | DEST_ALWAYS_ALIGNED);
- __ jmp(&return_r0);
-
- // Allocate and copy the resulting two-byte string.
- __ bind(&two_byte_sequential);
- __ AllocateTwoByteString(r0, r2, r4, r6, r7, &runtime);
-
- // Locate first character of substring to copy.
- STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
- __ add(r5, r5, Operand(r3, LSL, 1));
- // Locate first character of result.
- __ add(r1, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
-
- // r0: result string.
- // r1: first character of result.
- // r2: result length.
- // r5: first character of substring to copy.
- STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
- StringHelper::GenerateCopyCharactersLong(
- masm, r1, r5, r2, r3, r4, r6, r7, r9, DEST_ALWAYS_ALIGNED);
-
- __ bind(&return_r0);
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->sub_string_native(), 1, r3, r4);
- __ Drop(3);
- __ Ret();
-
- // Just jump to runtime to create the sub string.
- __ bind(&runtime);
- __ TailCallRuntime(Runtime::kSubString, 3, 1);
-
- __ bind(&single_char);
- // r0: original string
- // r1: instance type
- // r2: length
- // r3: from index (untagged)
- __ SmiTag(r3, r3);
- StringCharAtGenerator generator(
- r0, r3, r2, r0, &runtime, &runtime, &runtime, STRING_INDEX_IS_NUMBER);
- generator.GenerateFast(masm);
- __ Drop(3);
- __ Ret();
- generator.SkipSlow(masm, &runtime);
-}
-
-
-void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
- Register left,
- Register right,
- Register scratch1,
- Register scratch2,
- Register scratch3) {
- Register length = scratch1;
-
- // Compare lengths.
- Label strings_not_equal, check_zero_length;
- __ ldr(length, FieldMemOperand(left, String::kLengthOffset));
- __ ldr(scratch2, FieldMemOperand(right, String::kLengthOffset));
- __ cmp(length, scratch2);
- __ b(eq, &check_zero_length);
- __ bind(&strings_not_equal);
- __ mov(r0, Operand(Smi::FromInt(NOT_EQUAL)));
- __ Ret();
-
- // Check if the length is zero.
- Label compare_chars;
- __ bind(&check_zero_length);
- STATIC_ASSERT(kSmiTag == 0);
- __ cmp(length, Operand::Zero());
- __ b(ne, &compare_chars);
- __ mov(r0, Operand(Smi::FromInt(EQUAL)));
- __ Ret();
-
- // Compare characters.
- __ bind(&compare_chars);
- GenerateAsciiCharsCompareLoop(masm,
- left, right, length, scratch2, scratch3,
- &strings_not_equal);
-
- // Characters are equal.
- __ mov(r0, Operand(Smi::FromInt(EQUAL)));
- __ Ret();
-}
-
-
-void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
- Register left,
- Register right,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4) {
- Label result_not_equal, compare_lengths;
- // Find minimum length and length difference.
- __ ldr(scratch1, FieldMemOperand(left, String::kLengthOffset));
- __ ldr(scratch2, FieldMemOperand(right, String::kLengthOffset));
- __ sub(scratch3, scratch1, Operand(scratch2), SetCC);
- Register length_delta = scratch3;
- __ mov(scratch1, scratch2, LeaveCC, gt);
- Register min_length = scratch1;
- STATIC_ASSERT(kSmiTag == 0);
- __ cmp(min_length, Operand::Zero());
- __ b(eq, &compare_lengths);
-
- // Compare loop.
- GenerateAsciiCharsCompareLoop(masm,
- left, right, min_length, scratch2, scratch4,
- &result_not_equal);
-
- // Compare lengths - strings up to min-length are equal.
- __ bind(&compare_lengths);
- ASSERT(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
- // Use length_delta as result if it's zero.
- __ mov(r0, Operand(length_delta), SetCC);
- __ bind(&result_not_equal);
- // Conditionally update the result based either on length_delta or
- // the last comparion performed in the loop above.
- __ mov(r0, Operand(Smi::FromInt(GREATER)), LeaveCC, gt);
- __ mov(r0, Operand(Smi::FromInt(LESS)), LeaveCC, lt);
- __ Ret();
-}
-
-
-void StringCompareStub::GenerateAsciiCharsCompareLoop(
- MacroAssembler* masm,
- Register left,
- Register right,
- Register length,
- Register scratch1,
- Register scratch2,
- Label* chars_not_equal) {
- // Change index to run from -length to -1 by adding length to string
- // start. This means that loop ends when index reaches zero, which
- // doesn't need an additional compare.
- __ SmiUntag(length);
- __ add(scratch1, length,
- Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
- __ add(left, left, Operand(scratch1));
- __ add(right, right, Operand(scratch1));
- __ rsb(length, length, Operand::Zero());
- Register index = length; // index = -length;
-
- // Compare loop.
- Label loop;
- __ bind(&loop);
- __ ldrb(scratch1, MemOperand(left, index));
- __ ldrb(scratch2, MemOperand(right, index));
- __ cmp(scratch1, scratch2);
- __ b(ne, chars_not_equal);
- __ add(index, index, Operand(1), SetCC);
- __ b(ne, &loop);
-}
-
-
-void StringCompareStub::Generate(MacroAssembler* masm) {
- Label runtime;
-
- Counters* counters = masm->isolate()->counters();
-
- // Stack frame on entry.
- // sp[0]: right string
- // sp[4]: left string
- __ Ldrd(r0 , r1, MemOperand(sp)); // Load right in r0, left in r1.
-
- Label not_same;
- __ cmp(r0, r1);
- __ b(ne, &not_same);
- STATIC_ASSERT(EQUAL == 0);
- STATIC_ASSERT(kSmiTag == 0);
- __ mov(r0, Operand(Smi::FromInt(EQUAL)));
- __ IncrementCounter(counters->string_compare_native(), 1, r1, r2);
- __ add(sp, sp, Operand(2 * kPointerSize));
- __ Ret();
-
- __ bind(&not_same);
-
- // Check that both objects are sequential ASCII strings.
- __ JumpIfNotBothSequentialAsciiStrings(r1, r0, r2, r3, &runtime);
-
- // Compare flat ASCII strings natively. Remove arguments from stack first.
- __ IncrementCounter(counters->string_compare_native(), 1, r2, r3);
- __ add(sp, sp, Operand(2 * kPointerSize));
- GenerateCompareFlatAsciiStrings(masm, r1, r0, r2, r3, r4, r5);
-
- // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
- // tagged as a small integer.
- __ bind(&runtime);
- __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
-}
-
-
-void StringAddStub::Generate(MacroAssembler* masm) {
- Label call_runtime, call_builtin;
- Builtins::JavaScript builtin_id = Builtins::ADD;
-
- Counters* counters = masm->isolate()->counters();
-
- // Stack on entry:
- // sp[0]: second argument (right).
- // sp[4]: first argument (left).
-
- // Load the two arguments.
- __ ldr(r0, MemOperand(sp, 1 * kPointerSize)); // First argument.
- __ ldr(r1, MemOperand(sp, 0 * kPointerSize)); // Second argument.
-
- // Make sure that both arguments are strings if not known in advance.
- if (flags_ == NO_STRING_ADD_FLAGS) {
- __ JumpIfEitherSmi(r0, r1, &call_runtime);
- // Load instance types.
- __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
- __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
- __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
- STATIC_ASSERT(kStringTag == 0);
- // If either is not a string, go to runtime.
- __ tst(r4, Operand(kIsNotStringMask));
- __ tst(r5, Operand(kIsNotStringMask), eq);
- __ b(ne, &call_runtime);
- } else {
- // Here at least one of the arguments is definitely a string.
- // We convert the one that is not known to be a string.
- if ((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) == 0) {
- ASSERT((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) != 0);
- GenerateConvertArgument(
- masm, 1 * kPointerSize, r0, r2, r3, r4, r5, &call_builtin);
- builtin_id = Builtins::STRING_ADD_RIGHT;
- } else if ((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) == 0) {
- ASSERT((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) != 0);
- GenerateConvertArgument(
- masm, 0 * kPointerSize, r1, r2, r3, r4, r5, &call_builtin);
- builtin_id = Builtins::STRING_ADD_LEFT;
- }
- }
-
- // Both arguments are strings.
- // r0: first string
- // r1: second string
- // r4: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
- // r5: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
- {
- Label strings_not_empty;
- // Check if either of the strings are empty. In that case return the other.
- __ ldr(r2, FieldMemOperand(r0, String::kLengthOffset));
- __ ldr(r3, FieldMemOperand(r1, String::kLengthOffset));
- STATIC_ASSERT(kSmiTag == 0);
- __ cmp(r2, Operand(Smi::FromInt(0))); // Test if first string is empty.
- __ mov(r0, Operand(r1), LeaveCC, eq); // If first is empty, return second.
- STATIC_ASSERT(kSmiTag == 0);
- // Else test if second string is empty.
- __ cmp(r3, Operand(Smi::FromInt(0)), ne);
- __ b(ne, &strings_not_empty); // If either string was empty, return r0.
-
- __ IncrementCounter(counters->string_add_native(), 1, r2, r3);
- __ add(sp, sp, Operand(2 * kPointerSize));
- __ Ret();
-
- __ bind(&strings_not_empty);
- }
-
- __ mov(r2, Operand(r2, ASR, kSmiTagSize));
- __ mov(r3, Operand(r3, ASR, kSmiTagSize));
- // Both strings are non-empty.
- // r0: first string
- // r1: second string
- // r2: length of first string
- // r3: length of second string
- // r4: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
- // r5: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
- // Look at the length of the result of adding the two strings.
- Label string_add_flat_result, longer_than_two;
- // Adding two lengths can't overflow.
- STATIC_ASSERT(String::kMaxLength < String::kMaxLength * 2);
- __ add(r6, r2, Operand(r3));
- // Use the string table when adding two one character strings, as it
- // helps later optimizations to return a string here.
- __ cmp(r6, Operand(2));
- __ b(ne, &longer_than_two);
-
- // Check that both strings are non-external ASCII strings.
- if (flags_ != NO_STRING_ADD_FLAGS) {
- __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
- __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
- __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
- }
- __ JumpIfBothInstanceTypesAreNotSequentialAscii(r4, r5, r6, r7,
- &call_runtime);
-
- // Get the two characters forming the sub string.
- __ ldrb(r2, FieldMemOperand(r0, SeqOneByteString::kHeaderSize));
- __ ldrb(r3, FieldMemOperand(r1, SeqOneByteString::kHeaderSize));
-
- // Try to lookup two character string in string table. If it is not found
- // just allocate a new one.
- Label make_two_character_string;
- StringHelper::GenerateTwoCharacterStringTableProbe(
- masm, r2, r3, r6, r7, r4, r5, r9, &make_two_character_string);
- __ IncrementCounter(counters->string_add_native(), 1, r2, r3);
- __ add(sp, sp, Operand(2 * kPointerSize));
- __ Ret();
-
- __ bind(&make_two_character_string);
- // Resulting string has length 2 and first chars of two strings
- // are combined into single halfword in r2 register.
- // So we can fill resulting string without two loops by a single
- // halfword store instruction (which assumes that processor is
- // in a little endian mode)
- __ mov(r6, Operand(2));
- __ AllocateAsciiString(r0, r6, r4, r5, r9, &call_runtime);
- __ strh(r2, FieldMemOperand(r0, SeqOneByteString::kHeaderSize));
- __ IncrementCounter(counters->string_add_native(), 1, r2, r3);
- __ add(sp, sp, Operand(2 * kPointerSize));
- __ Ret();
-
- __ bind(&longer_than_two);
- // Check if resulting string will be flat.
- __ cmp(r6, Operand(ConsString::kMinLength));
- __ b(lt, &string_add_flat_result);
- // Handle exceptionally long strings in the runtime system.
- STATIC_ASSERT((String::kMaxLength & 0x80000000) == 0);
- ASSERT(IsPowerOf2(String::kMaxLength + 1));
- // kMaxLength + 1 is representable as shifted literal, kMaxLength is not.
- __ cmp(r6, Operand(String::kMaxLength + 1));
- __ b(hs, &call_runtime);
-
- // If result is not supposed to be flat, allocate a cons string object.
- // If both strings are ASCII the result is an ASCII cons string.
- if (flags_ != NO_STRING_ADD_FLAGS) {
- __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
- __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
- __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
- }
- Label non_ascii, allocated, ascii_data;
- STATIC_ASSERT(kTwoByteStringTag == 0);
- __ tst(r4, Operand(kStringEncodingMask));
- __ tst(r5, Operand(kStringEncodingMask), ne);
- __ b(eq, &non_ascii);
-
- // Allocate an ASCII cons string.
- __ bind(&ascii_data);
- __ AllocateAsciiConsString(r7, r6, r4, r5, &call_runtime);
- __ bind(&allocated);
- // Fill the fields of the cons string.
- __ str(r0, FieldMemOperand(r7, ConsString::kFirstOffset));
- __ str(r1, FieldMemOperand(r7, ConsString::kSecondOffset));
- __ mov(r0, Operand(r7));
- __ IncrementCounter(counters->string_add_native(), 1, r2, r3);
- __ add(sp, sp, Operand(2 * kPointerSize));
- __ Ret();
-
- __ bind(&non_ascii);
- // At least one of the strings is two-byte. Check whether it happens
- // to contain only ASCII characters.
- // r4: first instance type.
- // r5: second instance type.
- __ tst(r4, Operand(kAsciiDataHintMask));
- __ tst(r5, Operand(kAsciiDataHintMask), ne);
- __ b(ne, &ascii_data);
- __ eor(r4, r4, Operand(r5));
- STATIC_ASSERT(kOneByteStringTag != 0 && kAsciiDataHintTag != 0);
- __ and_(r4, r4, Operand(kOneByteStringTag | kAsciiDataHintTag));
- __ cmp(r4, Operand(kOneByteStringTag | kAsciiDataHintTag));
- __ b(eq, &ascii_data);
-
- // Allocate a two byte cons string.
- __ AllocateTwoByteConsString(r7, r6, r4, r5, &call_runtime);
- __ jmp(&allocated);
-
- // We cannot encounter sliced strings or cons strings here since:
- STATIC_ASSERT(SlicedString::kMinLength >= ConsString::kMinLength);
- // Handle creating a flat result from either external or sequential strings.
- // Locate the first characters' locations.
- // r0: first string
- // r1: second string
- // r2: length of first string
- // r3: length of second string
- // r4: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
- // r5: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
- // r6: sum of lengths.
- Label first_prepared, second_prepared;
- __ bind(&string_add_flat_result);
- if (flags_ != NO_STRING_ADD_FLAGS) {
- __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
- __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
- __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
- }
-
- // Check whether both strings have same encoding
- __ eor(r7, r4, Operand(r5));
- __ tst(r7, Operand(kStringEncodingMask));
- __ b(ne, &call_runtime);
-
- STATIC_ASSERT(kSeqStringTag == 0);
- __ tst(r4, Operand(kStringRepresentationMask));
- STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize);
- __ add(r7,
- r0,
- Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag),
- LeaveCC,
- eq);
- __ b(eq, &first_prepared);
- // External string: rule out short external string and load string resource.
- STATIC_ASSERT(kShortExternalStringTag != 0);
- __ tst(r4, Operand(kShortExternalStringMask));
- __ b(ne, &call_runtime);
- __ ldr(r7, FieldMemOperand(r0, ExternalString::kResourceDataOffset));
- __ bind(&first_prepared);
-
- STATIC_ASSERT(kSeqStringTag == 0);
- __ tst(r5, Operand(kStringRepresentationMask));
- STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize);
- __ add(r1,
- r1,
- Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag),
- LeaveCC,
- eq);
- __ b(eq, &second_prepared);
- // External string: rule out short external string and load string resource.
- STATIC_ASSERT(kShortExternalStringTag != 0);
- __ tst(r5, Operand(kShortExternalStringMask));
- __ b(ne, &call_runtime);
- __ ldr(r1, FieldMemOperand(r1, ExternalString::kResourceDataOffset));
- __ bind(&second_prepared);
-
- Label non_ascii_string_add_flat_result;
- // r7: first character of first string
- // r1: first character of second string
- // r2: length of first string.
- // r3: length of second string.
- // r6: sum of lengths.
- // Both strings have the same encoding.
- STATIC_ASSERT(kTwoByteStringTag == 0);
- __ tst(r5, Operand(kStringEncodingMask));
- __ b(eq, &non_ascii_string_add_flat_result);
-
- __ AllocateAsciiString(r0, r6, r4, r5, r9, &call_runtime);
- __ add(r6, r0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
- // r0: result string.
- // r7: first character of first string.
- // r1: first character of second string.
- // r2: length of first string.
- // r3: length of second string.
- // r6: first character of result.
- StringHelper::GenerateCopyCharacters(masm, r6, r7, r2, r4, true);
- // r6: next character of result.
- StringHelper::GenerateCopyCharacters(masm, r6, r1, r3, r4, true);
- __ IncrementCounter(counters->string_add_native(), 1, r2, r3);
- __ add(sp, sp, Operand(2 * kPointerSize));
- __ Ret();
-
- __ bind(&non_ascii_string_add_flat_result);
- __ AllocateTwoByteString(r0, r6, r4, r5, r9, &call_runtime);
- __ add(r6, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
- // r0: result string.
- // r7: first character of first string.
- // r1: first character of second string.
- // r2: length of first string.
- // r3: length of second string.
- // r6: first character of result.
- StringHelper::GenerateCopyCharacters(masm, r6, r7, r2, r4, false);
- // r6: next character of result.
- StringHelper::GenerateCopyCharacters(masm, r6, r1, r3, r4, false);
- __ IncrementCounter(counters->string_add_native(), 1, r2, r3);
- __ add(sp, sp, Operand(2 * kPointerSize));
- __ Ret();
-
- // Just jump to runtime to add the two strings.
- __ bind(&call_runtime);
- __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
-
- if (call_builtin.is_linked()) {
- __ bind(&call_builtin);
- __ InvokeBuiltin(builtin_id, JUMP_FUNCTION);
- }
-}
-
-
-void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
- int stack_offset,
- Register arg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Label* slow) {
- // First check if the argument is already a string.
- Label not_string, done;
- __ JumpIfSmi(arg, &not_string);
- __ CompareObjectType(arg, scratch1, scratch1, FIRST_NONSTRING_TYPE);
- __ b(lt, &done);
-
- // Check the number to string cache.
- Label not_cached;
- __ bind(&not_string);
- // Puts the cached result into scratch1.
- NumberToStringStub::GenerateLookupNumberStringCache(masm,
- arg,
- scratch1,
- scratch2,
- scratch3,
- scratch4,
- false,
- &not_cached);
- __ mov(arg, scratch1);
- __ str(arg, MemOperand(sp, stack_offset));
- __ jmp(&done);
-
- // Check if the argument is a safe string wrapper.
- __ bind(&not_cached);
- __ JumpIfSmi(arg, slow);
- __ CompareObjectType(
- arg, scratch1, scratch2, JS_VALUE_TYPE); // map -> scratch1.
- __ b(ne, slow);
- __ ldrb(scratch2, FieldMemOperand(scratch1, Map::kBitField2Offset));
- __ and_(scratch2,
- scratch2, Operand(1 << Map::kStringWrapperSafeForDefaultValueOf));
- __ cmp(scratch2,
- Operand(1 << Map::kStringWrapperSafeForDefaultValueOf));
- __ b(ne, slow);
- __ ldr(arg, FieldMemOperand(arg, JSValue::kValueOffset));
- __ str(arg, MemOperand(sp, stack_offset));
-
- __ bind(&done);
-}
-
-
-void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::SMI);
- Label miss;
- __ orr(r2, r1, r0);
- __ JumpIfNotSmi(r2, &miss);
-
- if (GetCondition() == eq) {
- // For equality we do not care about the sign of the result.
- __ sub(r0, r0, r1, SetCC);
- } else {
- // Untag before subtracting to avoid handling overflow.
- __ SmiUntag(r1);
- __ sub(r0, r1, SmiUntagOperand(r0));
- }
- __ Ret();
-
- __ bind(&miss);
- GenerateMiss(masm);
-}
-
-
-void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::NUMBER);
-
- Label generic_stub;
- Label unordered, maybe_undefined1, maybe_undefined2;
- Label miss;
-
- if (left_ == CompareIC::SMI) {
- __ JumpIfNotSmi(r1, &miss);
- }
- if (right_ == CompareIC::SMI) {
- __ JumpIfNotSmi(r0, &miss);
- }
-
- // Inlining the double comparison and falling back to the general compare
- // stub if NaN is involved or VFP2 is unsupported.
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatures::Scope scope(VFP2);
-
- // Load left and right operand.
- Label done, left, left_smi, right_smi;
- __ JumpIfSmi(r0, &right_smi);
- __ CheckMap(r0, r2, Heap::kHeapNumberMapRootIndex, &maybe_undefined1,
- DONT_DO_SMI_CHECK);
- __ sub(r2, r0, Operand(kHeapObjectTag));
- __ vldr(d1, r2, HeapNumber::kValueOffset);
- __ b(&left);
- __ bind(&right_smi);
- __ SmiUntag(r2, r0); // Can't clobber r0 yet.
- SwVfpRegister single_scratch = d2.low();
- __ vmov(single_scratch, r2);
- __ vcvt_f64_s32(d1, single_scratch);
-
- __ bind(&left);
- __ JumpIfSmi(r1, &left_smi);
- __ CheckMap(r1, r2, Heap::kHeapNumberMapRootIndex, &maybe_undefined2,
- DONT_DO_SMI_CHECK);
- __ sub(r2, r1, Operand(kHeapObjectTag));
- __ vldr(d0, r2, HeapNumber::kValueOffset);
- __ b(&done);
- __ bind(&left_smi);
- __ SmiUntag(r2, r1); // Can't clobber r1 yet.
- single_scratch = d3.low();
- __ vmov(single_scratch, r2);
- __ vcvt_f64_s32(d0, single_scratch);
-
- __ bind(&done);
- // Compare operands.
- __ VFPCompareAndSetFlags(d0, d1);
-
- // Don't base result on status bits when a NaN is involved.
- __ b(vs, &unordered);
-
- // Return a result of -1, 0, or 1, based on status bits.
- __ mov(r0, Operand(EQUAL), LeaveCC, eq);
- __ mov(r0, Operand(LESS), LeaveCC, lt);
- __ mov(r0, Operand(GREATER), LeaveCC, gt);
- __ Ret();
- }
-
- __ bind(&unordered);
- __ bind(&generic_stub);
- ICCompareStub stub(op_, CompareIC::GENERIC, CompareIC::GENERIC,
- CompareIC::GENERIC);
- __ Jump(stub.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
-
- __ bind(&maybe_undefined1);
- if (Token::IsOrderedRelationalCompareOp(op_)) {
- __ CompareRoot(r0, Heap::kUndefinedValueRootIndex);
- __ b(ne, &miss);
- __ JumpIfSmi(r1, &unordered);
- __ CompareObjectType(r1, r2, r2, HEAP_NUMBER_TYPE);
- __ b(ne, &maybe_undefined2);
- __ jmp(&unordered);
- }
-
- __ bind(&maybe_undefined2);
- if (Token::IsOrderedRelationalCompareOp(op_)) {
- __ CompareRoot(r1, Heap::kUndefinedValueRootIndex);
- __ b(eq, &unordered);
- }
-
- __ bind(&miss);
- GenerateMiss(masm);
-}
-
-
-void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::INTERNALIZED_STRING);
- Label miss;
-
- // Registers containing left and right operands respectively.
- Register left = r1;
- Register right = r0;
- Register tmp1 = r2;
- Register tmp2 = r3;
-
- // Check that both operands are heap objects.
- __ JumpIfEitherSmi(left, right, &miss);
-
- // Check that both operands are internalized strings.
- __ ldr(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
- __ ldr(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
- __ ldrb(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
- __ ldrb(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
- STATIC_ASSERT(kInternalizedTag != 0);
- __ and_(tmp1, tmp1, Operand(tmp2));
- __ tst(tmp1, Operand(kIsInternalizedMask));
- __ b(eq, &miss);
-
- // Internalized strings are compared by identity.
- __ cmp(left, right);
- // Make sure r0 is non-zero. At this point input operands are
- // guaranteed to be non-zero.
- ASSERT(right.is(r0));
- STATIC_ASSERT(EQUAL == 0);
- STATIC_ASSERT(kSmiTag == 0);
- __ mov(r0, Operand(Smi::FromInt(EQUAL)), LeaveCC, eq);
- __ Ret();
-
- __ bind(&miss);
- GenerateMiss(masm);
-}
-
-
-void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::UNIQUE_NAME);
- ASSERT(GetCondition() == eq);
- Label miss;
-
- // Registers containing left and right operands respectively.
- Register left = r1;
- Register right = r0;
- Register tmp1 = r2;
- Register tmp2 = r3;
-
- // Check that both operands are heap objects.
- __ JumpIfEitherSmi(left, right, &miss);
-
- // Check that both operands are unique names. This leaves the instance
- // types loaded in tmp1 and tmp2.
- STATIC_ASSERT(kInternalizedTag != 0);
- __ ldr(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
- __ ldr(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
- __ ldrb(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
- __ ldrb(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
-
- Label succeed1;
- __ tst(tmp1, Operand(kIsInternalizedMask));
- __ b(ne, &succeed1);
- __ cmp(tmp1, Operand(SYMBOL_TYPE));
- __ b(ne, &miss);
- __ bind(&succeed1);
-
- Label succeed2;
- __ tst(tmp2, Operand(kIsInternalizedMask));
- __ b(ne, &succeed2);
- __ cmp(tmp2, Operand(SYMBOL_TYPE));
- __ b(ne, &miss);
- __ bind(&succeed2);
-
- // Unique names are compared by identity.
- __ cmp(left, right);
- // Make sure r0 is non-zero. At this point input operands are
- // guaranteed to be non-zero.
- ASSERT(right.is(r0));
- STATIC_ASSERT(EQUAL == 0);
- STATIC_ASSERT(kSmiTag == 0);
- __ mov(r0, Operand(Smi::FromInt(EQUAL)), LeaveCC, eq);
- __ Ret();
-
- __ bind(&miss);
- GenerateMiss(masm);
-}
-
-
-void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::STRING);
- Label miss;
-
- bool equality = Token::IsEqualityOp(op_);
-
- // Registers containing left and right operands respectively.
- Register left = r1;
- Register right = r0;
- Register tmp1 = r2;
- Register tmp2 = r3;
- Register tmp3 = r4;
- Register tmp4 = r5;
-
- // Check that both operands are heap objects.
- __ JumpIfEitherSmi(left, right, &miss);
-
- // Check that both operands are strings. This leaves the instance
- // types loaded in tmp1 and tmp2.
- __ ldr(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
- __ ldr(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
- __ ldrb(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
- __ ldrb(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
- STATIC_ASSERT(kNotStringTag != 0);
- __ orr(tmp3, tmp1, tmp2);
- __ tst(tmp3, Operand(kIsNotStringMask));
- __ b(ne, &miss);
-
- // Fast check for identical strings.
- __ cmp(left, right);
- STATIC_ASSERT(EQUAL == 0);
- STATIC_ASSERT(kSmiTag == 0);
- __ mov(r0, Operand(Smi::FromInt(EQUAL)), LeaveCC, eq);
- __ Ret(eq);
-
- // Handle not identical strings.
-
- // Check that both strings are internalized strings. If they are, we're done
- // because we already know they are not identical.
- if (equality) {
- ASSERT(GetCondition() == eq);
- STATIC_ASSERT(kInternalizedTag != 0);
- __ and_(tmp3, tmp1, Operand(tmp2));
- __ tst(tmp3, Operand(kIsInternalizedMask));
- // Make sure r0 is non-zero. At this point input operands are
- // guaranteed to be non-zero.
- ASSERT(right.is(r0));
- __ Ret(ne);
- }
-
- // Check that both strings are sequential ASCII.
- Label runtime;
- __ JumpIfBothInstanceTypesAreNotSequentialAscii(
- tmp1, tmp2, tmp3, tmp4, &runtime);
-
- // Compare flat ASCII strings. Returns when done.
- if (equality) {
- StringCompareStub::GenerateFlatAsciiStringEquals(
- masm, left, right, tmp1, tmp2, tmp3);
- } else {
- StringCompareStub::GenerateCompareFlatAsciiStrings(
- masm, left, right, tmp1, tmp2, tmp3, tmp4);
- }
-
- // Handle more complex cases in runtime.
- __ bind(&runtime);
- __ Push(left, right);
- if (equality) {
- __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
- } else {
- __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
- }
-
- __ bind(&miss);
- GenerateMiss(masm);
-}
-
-
-void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::OBJECT);
- Label miss;
- __ and_(r2, r1, Operand(r0));
- __ JumpIfSmi(r2, &miss);
-
- __ CompareObjectType(r0, r2, r3, JS_OBJECT_TYPE);
- __ b(ne, &miss);
- __ ldrb(r2, FieldMemOperand(r2, Map::kBitField2Offset));
- __ and_(r2, r2, Operand(1 << Map::kUseUserObjectComparison));
- __ cmp(r2, Operand(1 << Map::kUseUserObjectComparison));
- __ b(eq, &miss);
- __ CompareObjectType(r1, r2, r3, JS_OBJECT_TYPE);
- __ b(ne, &miss);
- __ ldrb(r2, FieldMemOperand(r2, Map::kBitField2Offset));
- __ and_(r2, r2, Operand(1 << Map::kUseUserObjectComparison));
- __ cmp(r2, Operand(1 << Map::kUseUserObjectComparison));
- __ b(eq, &miss);
-
- ASSERT(GetCondition() == eq);
- __ sub(r0, r0, Operand(r1));
- __ Ret();
-
- __ bind(&miss);
- GenerateMiss(masm);
-}
-
-
-void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) {
- Label miss;
- __ and_(r2, r1, Operand(r0));
- __ JumpIfSmi(r2, &miss);
- __ ldr(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
- __ cmp(r2, Operand(known_map_));
- __ b(ne, &miss);
- __ ldrb(r2, FieldMemOperand(r2, Map::kBitField2Offset));
- __ and_(r2, r2, Operand(1 << Map::kUseUserObjectComparison));
- __ cmp(r2, Operand(1 << Map::kUseUserObjectComparison));
- __ b(eq, &miss);
- __ cmp(r3, Operand(known_map_));
- __ b(ne, &miss);
- __ ldrb(r3, FieldMemOperand(r3, Map::kBitField2Offset));
- __ and_(r3, r3, Operand(1 << Map::kUseUserObjectComparison));
- __ cmp(r3, Operand(1 << Map::kUseUserObjectComparison));
- __ b(eq, &miss);
-
- __ sub(r0, r0, Operand(r1));
- __ Ret();
-
- __ bind(&miss);
- GenerateMiss(masm);
-}
-
-
-
-void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
- {
- // Call the runtime system in a fresh internal frame.
- ExternalReference miss =
- ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate());
-
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(r1, r0);
- __ push(lr);
- __ Push(r1, r0);
- __ mov(ip, Operand(Smi::FromInt(op_)));
- __ push(ip);
- __ CallExternalReference(miss, 3);
- // Compute the entry point of the rewritten stub.
- __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
- // Restore registers.
- __ pop(lr);
- __ pop(r0);
- __ pop(r1);
- }
-
- __ Jump(r2);
-}
-
-
-void DirectCEntryStub::Generate(MacroAssembler* masm) {
- __ ldr(pc, MemOperand(sp, 0));
-}
-
-
-void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
- ExternalReference function) {
- __ mov(r2, Operand(function));
- GenerateCall(masm, r2);
-}
-
-
-void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
- Register target) {
- intptr_t code =
- reinterpret_cast<intptr_t>(GetCode(masm->isolate()).location());
- __ mov(lr, Operand(code, RelocInfo::CODE_TARGET));
-
- // Prevent literal pool emission during calculation of return address.
- Assembler::BlockConstPoolScope block_const_pool(masm);
-
- // Push return address (accessible to GC through exit frame pc).
- // Note that using pc with str is deprecated.
- Label start;
- __ bind(&start);
- __ add(ip, pc, Operand(Assembler::kInstrSize));
- __ str(ip, MemOperand(sp, 0));
- __ Jump(target); // Call the C++ function.
- ASSERT_EQ(Assembler::kInstrSize + Assembler::kPcLoadDelta,
- masm->SizeOfCodeGeneratedSince(&start));
-}
-
-
-void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
- Label* miss,
- Label* done,
- Register receiver,
- Register properties,
- Handle<String> name,
- Register scratch0) {
- // If names of slots in range from 1 to kProbes - 1 for the hash value are
- // not equal to the name and kProbes-th slot is not used (its name is the
- // undefined value), it guarantees the hash table doesn't contain the
- // property. It's true even if some slots represent deleted properties
- // (their names are the hole value).
- for (int i = 0; i < kInlinedProbes; i++) {
- // scratch0 points to properties hash.
- // Compute the masked index: (hash + i + i * i) & mask.
- Register index = scratch0;
- // Capacity is smi 2^n.
- __ ldr(index, FieldMemOperand(properties, kCapacityOffset));
- __ sub(index, index, Operand(1));
- __ and_(index, index, Operand(
- Smi::FromInt(name->Hash() + StringDictionary::GetProbeOffset(i))));
-
- // Scale the index by multiplying by the entry size.
- ASSERT(StringDictionary::kEntrySize == 3);
- __ add(index, index, Operand(index, LSL, 1)); // index *= 3.
-
- Register entity_name = scratch0;
- // Having undefined at this place means the name is not contained.
- ASSERT_EQ(kSmiTagSize, 1);
- Register tmp = properties;
- __ add(tmp, properties, Operand(index, LSL, 1));
- __ ldr(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
-
- ASSERT(!tmp.is(entity_name));
- __ LoadRoot(tmp, Heap::kUndefinedValueRootIndex);
- __ cmp(entity_name, tmp);
- __ b(eq, done);
-
- if (i != kInlinedProbes - 1) {
- // Load the hole ready for use below:
- __ LoadRoot(tmp, Heap::kTheHoleValueRootIndex);
-
- // Stop if found the property.
- __ cmp(entity_name, Operand(Handle<String>(name)));
- __ b(eq, miss);
-
- Label the_hole;
- __ cmp(entity_name, tmp);
- __ b(eq, &the_hole);
-
- // Check if the entry name is not an internalized string.
- __ ldr(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
- __ ldrb(entity_name,
- FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
- __ tst(entity_name, Operand(kIsInternalizedMask));
- __ b(eq, miss);
-
- __ bind(&the_hole);
-
- // Restore the properties.
- __ ldr(properties,
- FieldMemOperand(receiver, JSObject::kPropertiesOffset));
- }
- }
-
- const int spill_mask =
- (lr.bit() | r6.bit() | r5.bit() | r4.bit() | r3.bit() |
- r2.bit() | r1.bit() | r0.bit());
-
- __ stm(db_w, sp, spill_mask);
- __ ldr(r0, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
- __ mov(r1, Operand(Handle<String>(name)));
- StringDictionaryLookupStub stub(NEGATIVE_LOOKUP);
- __ CallStub(&stub);
- __ cmp(r0, Operand::Zero());
- __ ldm(ia_w, sp, spill_mask);
-
- __ b(eq, done);
- __ b(ne, miss);
-}
-
-
-// Probe the string dictionary in the |elements| register. Jump to the
-// |done| label if a property with the given name is found. Jump to
-// the |miss| label otherwise.
-// If lookup was successful |scratch2| will be equal to elements + 4 * index.
-void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
- Label* miss,
- Label* done,
- Register elements,
- Register name,
- Register scratch1,
- Register scratch2) {
- ASSERT(!elements.is(scratch1));
- ASSERT(!elements.is(scratch2));
- ASSERT(!name.is(scratch1));
- ASSERT(!name.is(scratch2));
-
- __ AssertString(name);
-
- // Compute the capacity mask.
- __ ldr(scratch1, FieldMemOperand(elements, kCapacityOffset));
- __ mov(scratch1, Operand(scratch1, ASR, kSmiTagSize)); // convert smi to int
- __ sub(scratch1, scratch1, Operand(1));
-
- // Generate an unrolled loop that performs a few probes before
- // giving up. Measurements done on Gmail indicate that 2 probes
- // cover ~93% of loads from dictionaries.
- for (int i = 0; i < kInlinedProbes; i++) {
- // Compute the masked index: (hash + i + i * i) & mask.
- __ ldr(scratch2, FieldMemOperand(name, String::kHashFieldOffset));
- if (i > 0) {
- // Add the probe offset (i + i * i) left shifted to avoid right shifting
- // the hash in a separate instruction. The value hash + i + i * i is right
- // shifted in the following and instruction.
- ASSERT(StringDictionary::GetProbeOffset(i) <
- 1 << (32 - String::kHashFieldOffset));
- __ add(scratch2, scratch2, Operand(
- StringDictionary::GetProbeOffset(i) << String::kHashShift));
- }
- __ and_(scratch2, scratch1, Operand(scratch2, LSR, String::kHashShift));
-
- // Scale the index by multiplying by the element size.
- ASSERT(StringDictionary::kEntrySize == 3);
- // scratch2 = scratch2 * 3.
- __ add(scratch2, scratch2, Operand(scratch2, LSL, 1));
-
- // Check if the key is identical to the name.
- __ add(scratch2, elements, Operand(scratch2, LSL, 2));
- __ ldr(ip, FieldMemOperand(scratch2, kElementsStartOffset));
- __ cmp(name, Operand(ip));
- __ b(eq, done);
- }
-
- const int spill_mask =
- (lr.bit() | r6.bit() | r5.bit() | r4.bit() |
- r3.bit() | r2.bit() | r1.bit() | r0.bit()) &
- ~(scratch1.bit() | scratch2.bit());
-
- __ stm(db_w, sp, spill_mask);
- if (name.is(r0)) {
- ASSERT(!elements.is(r1));
- __ Move(r1, name);
- __ Move(r0, elements);
- } else {
- __ Move(r0, elements);
- __ Move(r1, name);
- }
- StringDictionaryLookupStub stub(POSITIVE_LOOKUP);
- __ CallStub(&stub);
- __ cmp(r0, Operand::Zero());
- __ mov(scratch2, Operand(r2));
- __ ldm(ia_w, sp, spill_mask);
-
- __ b(ne, done);
- __ b(eq, miss);
-}
-
-
-void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
- // This stub overrides SometimesSetsUpAFrame() to return false. That means
- // we cannot call anything that could cause a GC from this stub.
- // Registers:
- // result: StringDictionary to probe
- // r1: key
- // : StringDictionary to probe.
- // index_: will hold an index of entry if lookup is successful.
- // might alias with result_.
- // Returns:
- // result_ is zero if lookup failed, non zero otherwise.
-
- Register result = r0;
- Register dictionary = r0;
- Register key = r1;
- Register index = r2;
- Register mask = r3;
- Register hash = r4;
- Register undefined = r5;
- Register entry_key = r6;
-
- Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
-
- __ ldr(mask, FieldMemOperand(dictionary, kCapacityOffset));
- __ mov(mask, Operand(mask, ASR, kSmiTagSize));
- __ sub(mask, mask, Operand(1));
-
- __ ldr(hash, FieldMemOperand(key, String::kHashFieldOffset));
-
- __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
-
- for (int i = kInlinedProbes; i < kTotalProbes; i++) {
- // Compute the masked index: (hash + i + i * i) & mask.
- // Capacity is smi 2^n.
- if (i > 0) {
- // Add the probe offset (i + i * i) left shifted to avoid right shifting
- // the hash in a separate instruction. The value hash + i + i * i is right
- // shifted in the following and instruction.
- ASSERT(StringDictionary::GetProbeOffset(i) <
- 1 << (32 - String::kHashFieldOffset));
- __ add(index, hash, Operand(
- StringDictionary::GetProbeOffset(i) << String::kHashShift));
- } else {
- __ mov(index, Operand(hash));
- }
- __ and_(index, mask, Operand(index, LSR, String::kHashShift));
-
- // Scale the index by multiplying by the entry size.
- ASSERT(StringDictionary::kEntrySize == 3);
- __ add(index, index, Operand(index, LSL, 1)); // index *= 3.
-
- ASSERT_EQ(kSmiTagSize, 1);
- __ add(index, dictionary, Operand(index, LSL, 2));
- __ ldr(entry_key, FieldMemOperand(index, kElementsStartOffset));
-
- // Having undefined at this place means the name is not contained.
- __ cmp(entry_key, Operand(undefined));
- __ b(eq, &not_in_dictionary);
-
- // Stop if found the property.
- __ cmp(entry_key, Operand(key));
- __ b(eq, &in_dictionary);
-
- if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) {
- // Check if the entry name is not an internalized string.
- __ ldr(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
- __ ldrb(entry_key,
- FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
- __ tst(entry_key, Operand(kIsInternalizedMask));
- __ b(eq, &maybe_in_dictionary);
- }
- }
-
- __ bind(&maybe_in_dictionary);
- // If we are doing negative lookup then probing failure should be
- // treated as a lookup success. For positive lookup probing failure
- // should be treated as lookup failure.
- if (mode_ == POSITIVE_LOOKUP) {
- __ mov(result, Operand::Zero());
- __ Ret();
- }
-
- __ bind(&in_dictionary);
- __ mov(result, Operand(1));
- __ Ret();
-
- __ bind(&not_in_dictionary);
- __ mov(result, Operand::Zero());
- __ Ret();
-}
-
-
-struct AheadOfTimeWriteBarrierStubList {
- Register object, value, address;
- RememberedSetAction action;
-};
-
-#define REG(Name) { kRegister_ ## Name ## _Code }
-
-static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
- // Used in RegExpExecStub.
- { REG(r6), REG(r4), REG(r7), EMIT_REMEMBERED_SET },
- // Used in CompileArrayPushCall.
- // Also used in StoreIC::GenerateNormal via GenerateDictionaryStore.
- // Also used in KeyedStoreIC::GenerateGeneric.
- { REG(r3), REG(r4), REG(r5), EMIT_REMEMBERED_SET },
- // Used in CompileStoreGlobal.
- { REG(r4), REG(r1), REG(r2), OMIT_REMEMBERED_SET },
- // Used in StoreStubCompiler::CompileStoreField via GenerateStoreField.
- { REG(r1), REG(r2), REG(r3), EMIT_REMEMBERED_SET },
- { REG(r3), REG(r2), REG(r1), EMIT_REMEMBERED_SET },
- // Used in KeyedStoreStubCompiler::CompileStoreField via GenerateStoreField.
- { REG(r2), REG(r1), REG(r3), EMIT_REMEMBERED_SET },
- { REG(r3), REG(r1), REG(r2), EMIT_REMEMBERED_SET },
- // KeyedStoreStubCompiler::GenerateStoreFastElement.
- { REG(r3), REG(r2), REG(r4), EMIT_REMEMBERED_SET },
- { REG(r2), REG(r3), REG(r4), EMIT_REMEMBERED_SET },
- // ElementsTransitionGenerator::GenerateMapChangeElementTransition
- // and ElementsTransitionGenerator::GenerateSmiToDouble
- // and ElementsTransitionGenerator::GenerateDoubleToObject
- { REG(r2), REG(r3), REG(r9), EMIT_REMEMBERED_SET },
- { REG(r2), REG(r3), REG(r9), OMIT_REMEMBERED_SET },
- // ElementsTransitionGenerator::GenerateDoubleToObject
- { REG(r6), REG(r2), REG(r0), EMIT_REMEMBERED_SET },
- { REG(r2), REG(r6), REG(r9), EMIT_REMEMBERED_SET },
- // StoreArrayLiteralElementStub::Generate
- { REG(r5), REG(r0), REG(r6), EMIT_REMEMBERED_SET },
- // FastNewClosureStub::Generate
- { REG(r2), REG(r4), REG(r1), EMIT_REMEMBERED_SET },
- // Null termination.
- { REG(no_reg), REG(no_reg), REG(no_reg), EMIT_REMEMBERED_SET}
-};
-
-#undef REG
-
-
-bool RecordWriteStub::IsPregenerated() {
- for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
- !entry->object.is(no_reg);
- entry++) {
- if (object_.is(entry->object) &&
- value_.is(entry->value) &&
- address_.is(entry->address) &&
- remembered_set_action_ == entry->action &&
- save_fp_regs_mode_ == kDontSaveFPRegs) {
- return true;
- }
- }
- return false;
-}
-
-
-bool StoreBufferOverflowStub::IsPregenerated() {
- return save_doubles_ == kDontSaveFPRegs || ISOLATE->fp_stubs_generated();
-}
-
-
-void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
- Isolate* isolate) {
- StoreBufferOverflowStub stub1(kDontSaveFPRegs);
- stub1.GetCode(isolate)->set_is_pregenerated(true);
-}
-
-
-void RecordWriteStub::GenerateFixedRegStubsAheadOfTime(Isolate* isolate) {
- for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
- !entry->object.is(no_reg);
- entry++) {
- RecordWriteStub stub(entry->object,
- entry->value,
- entry->address,
- entry->action,
- kDontSaveFPRegs);
- stub.GetCode(isolate)->set_is_pregenerated(true);
- }
-}
-
-
-bool CodeStub::CanUseFPRegisters() {
- return CpuFeatures::IsSupported(VFP2);
-}
-
-
-// Takes the input in 3 registers: address_ value_ and object_. A pointer to
-// the value has just been written into the object, now this stub makes sure
-// we keep the GC informed. The word in the object where the value has been
-// written is in the address register.
-void RecordWriteStub::Generate(MacroAssembler* masm) {
- Label skip_to_incremental_noncompacting;
- Label skip_to_incremental_compacting;
-
- // The first two instructions are generated with labels so as to get the
- // offset fixed up correctly by the bind(Label*) call. We patch it back and
- // forth between a compare instructions (a nop in this position) and the
- // real branch when we start and stop incremental heap marking.
- // See RecordWriteStub::Patch for details.
- {
- // Block literal pool emission, as the position of these two instructions
- // is assumed by the patching code.
- Assembler::BlockConstPoolScope block_const_pool(masm);
- __ b(&skip_to_incremental_noncompacting);
- __ b(&skip_to_incremental_compacting);
- }
-
- if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
- __ RememberedSetHelper(object_,
- address_,
- value_,
- save_fp_regs_mode_,
- MacroAssembler::kReturnAtEnd);
- }
- __ Ret();
-
- __ bind(&skip_to_incremental_noncompacting);
- GenerateIncremental(masm, INCREMENTAL);
-
- __ bind(&skip_to_incremental_compacting);
- GenerateIncremental(masm, INCREMENTAL_COMPACTION);
-
- // Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
- // Will be checked in IncrementalMarking::ActivateGeneratedStub.
- ASSERT(Assembler::GetBranchOffset(masm->instr_at(0)) < (1 << 12));
- ASSERT(Assembler::GetBranchOffset(masm->instr_at(4)) < (1 << 12));
- PatchBranchIntoNop(masm, 0);
- PatchBranchIntoNop(masm, Assembler::kInstrSize);
-}
-
-
-void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
- regs_.Save(masm);
-
- if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
- Label dont_need_remembered_set;
-
- __ ldr(regs_.scratch0(), MemOperand(regs_.address(), 0));
- __ JumpIfNotInNewSpace(regs_.scratch0(), // Value.
- regs_.scratch0(),
- &dont_need_remembered_set);
-
- __ CheckPageFlag(regs_.object(),
- regs_.scratch0(),
- 1 << MemoryChunk::SCAN_ON_SCAVENGE,
- ne,
- &dont_need_remembered_set);
-
- // First notify the incremental marker if necessary, then update the
- // remembered set.
- CheckNeedsToInformIncrementalMarker(
- masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
- InformIncrementalMarker(masm, mode);
- regs_.Restore(masm);
- __ RememberedSetHelper(object_,
- address_,
- value_,
- save_fp_regs_mode_,
- MacroAssembler::kReturnAtEnd);
-
- __ bind(&dont_need_remembered_set);
- }
-
- CheckNeedsToInformIncrementalMarker(
- masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
- InformIncrementalMarker(masm, mode);
- regs_.Restore(masm);
- __ Ret();
-}
-
-
-void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
- regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
- int argument_count = 3;
- __ PrepareCallCFunction(argument_count, regs_.scratch0());
- Register address =
- r0.is(regs_.address()) ? regs_.scratch0() : regs_.address();
- ASSERT(!address.is(regs_.object()));
- ASSERT(!address.is(r0));
- __ Move(address, regs_.address());
- __ Move(r0, regs_.object());
- __ Move(r1, address);
- __ mov(r2, Operand(ExternalReference::isolate_address()));
-
- AllowExternalCallThatCantCauseGC scope(masm);
- if (mode == INCREMENTAL_COMPACTION) {
- __ CallCFunction(
- ExternalReference::incremental_evacuation_record_write_function(
- masm->isolate()),
- argument_count);
- } else {
- ASSERT(mode == INCREMENTAL);
- __ CallCFunction(
- ExternalReference::incremental_marking_record_write_function(
- masm->isolate()),
- argument_count);
- }
- regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_);
-}
-
-
-void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
- MacroAssembler* masm,
- OnNoNeedToInformIncrementalMarker on_no_need,
- Mode mode) {
- Label on_black;
- Label need_incremental;
- Label need_incremental_pop_scratch;
-
- __ and_(regs_.scratch0(), regs_.object(), Operand(~Page::kPageAlignmentMask));
- __ ldr(regs_.scratch1(),
- MemOperand(regs_.scratch0(),
- MemoryChunk::kWriteBarrierCounterOffset));
- __ sub(regs_.scratch1(), regs_.scratch1(), Operand(1), SetCC);
- __ str(regs_.scratch1(),
- MemOperand(regs_.scratch0(),
- MemoryChunk::kWriteBarrierCounterOffset));
- __ b(mi, &need_incremental);
-
- // Let's look at the color of the object: If it is not black we don't have
- // to inform the incremental marker.
- __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
-
- regs_.Restore(masm);
- if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
- __ RememberedSetHelper(object_,
- address_,
- value_,
- save_fp_regs_mode_,
- MacroAssembler::kReturnAtEnd);
- } else {
- __ Ret();
- }
-
- __ bind(&on_black);
-
- // Get the value from the slot.
- __ ldr(regs_.scratch0(), MemOperand(regs_.address(), 0));
-
- if (mode == INCREMENTAL_COMPACTION) {
- Label ensure_not_white;
-
- __ CheckPageFlag(regs_.scratch0(), // Contains value.
- regs_.scratch1(), // Scratch.
- MemoryChunk::kEvacuationCandidateMask,
- eq,
- &ensure_not_white);
-
- __ CheckPageFlag(regs_.object(),
- regs_.scratch1(), // Scratch.
- MemoryChunk::kSkipEvacuationSlotsRecordingMask,
- eq,
- &need_incremental);
-
- __ bind(&ensure_not_white);
- }
-
- // We need extra registers for this, so we push the object and the address
- // register temporarily.
- __ Push(regs_.object(), regs_.address());
- __ EnsureNotWhite(regs_.scratch0(), // The value.
- regs_.scratch1(), // Scratch.
- regs_.object(), // Scratch.
- regs_.address(), // Scratch.
- &need_incremental_pop_scratch);
- __ Pop(regs_.object(), regs_.address());
-
- regs_.Restore(masm);
- if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
- __ RememberedSetHelper(object_,
- address_,
- value_,
- save_fp_regs_mode_,
- MacroAssembler::kReturnAtEnd);
- } else {
- __ Ret();
- }
-
- __ bind(&need_incremental_pop_scratch);
- __ Pop(regs_.object(), regs_.address());
-
- __ bind(&need_incremental);
-
- // Fall through when we need to inform the incremental marker.
-}
-
-
-void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r0 : element value to store
- // -- r1 : array literal
- // -- r2 : map of array literal
- // -- r3 : element index as smi
- // -- r4 : array literal index in function as smi
- // -----------------------------------
-
- Label element_done;
- Label double_elements;
- Label smi_element;
- Label slow_elements;
- Label fast_elements;
-
- __ CheckFastElements(r2, r5, &double_elements);
- // FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS
- __ JumpIfSmi(r0, &smi_element);
- __ CheckFastSmiElements(r2, r5, &fast_elements);
-
- // Store into the array literal requires a elements transition. Call into
- // the runtime.
- __ bind(&slow_elements);
- // call.
- __ Push(r1, r3, r0);
- __ ldr(r5, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ ldr(r5, FieldMemOperand(r5, JSFunction::kLiteralsOffset));
- __ Push(r5, r4);
- __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
-
- // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object.
- __ bind(&fast_elements);
- __ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset));
- __ add(r6, r5, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ add(r6, r6, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ str(r0, MemOperand(r6, 0));
- // Update the write barrier for the array store.
- __ RecordWrite(r5, r6, r0, kLRHasNotBeenSaved, kDontSaveFPRegs,
- EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
- __ Ret();
-
- // Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS,
- // and value is Smi.
- __ bind(&smi_element);
- __ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset));
- __ add(r6, r5, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ str(r0, FieldMemOperand(r6, FixedArray::kHeaderSize));
- __ Ret();
-
- // Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS.
- __ bind(&double_elements);
- __ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset));
- __ StoreNumberToDoubleElements(r0, r3,
- // Overwrites all regs after this.
- r5, r6, r7, r9, r2,
- &slow_elements);
- __ Ret();
-}
-
-
-void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
- ASSERT(!Serializer::enabled());
- bool save_fp_regs = CpuFeatures::IsSupported(VFP2);
- CEntryStub ces(1, save_fp_regs ? kSaveFPRegs : kDontSaveFPRegs);
- __ Call(ces.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
- int parameter_count_offset =
- StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
- __ ldr(r1, MemOperand(fp, parameter_count_offset));
- masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
- __ mov(r1, Operand(r1, LSL, kPointerSizeLog2));
- __ add(sp, sp, r1);
- __ Ret();
-}
-
-
-void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
- if (entry_hook_ != NULL) {
- PredictableCodeSizeScope predictable(masm, 4 * Assembler::kInstrSize);
- ProfileEntryHookStub stub;
- __ push(lr);
- __ CallStub(&stub);
- __ pop(lr);
- }
-}
-
-
-void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
- // The entry hook is a "push lr" instruction, followed by a call.
- const int32_t kReturnAddressDistanceFromFunctionStart =
- 3 * Assembler::kInstrSize;
-
- // Save live volatile registers.
- __ Push(lr, r5, r1);
- const int32_t kNumSavedRegs = 3;
-
- // Compute the function's address for the first argument.
- __ sub(r0, lr, Operand(kReturnAddressDistanceFromFunctionStart));
-
- // The caller's return address is above the saved temporaries.
- // Grab that for the second argument to the hook.
- __ add(r1, sp, Operand(kNumSavedRegs * kPointerSize));
-
- // Align the stack if necessary.
- int frame_alignment = masm->ActivationFrameAlignment();
- if (frame_alignment > kPointerSize) {
- __ mov(r5, sp);
- ASSERT(IsPowerOf2(frame_alignment));
- __ and_(sp, sp, Operand(-frame_alignment));
- }
-
-#if defined(V8_HOST_ARCH_ARM)
- __ mov(ip, Operand(reinterpret_cast<int32_t>(&entry_hook_)));
- __ ldr(ip, MemOperand(ip));
-#else
- // Under the simulator we need to indirect the entry hook through a
- // trampoline function at a known address.
- Address trampoline_address = reinterpret_cast<Address>(
- reinterpret_cast<intptr_t>(EntryHookTrampoline));
- ApiFunction dispatcher(trampoline_address);
- __ mov(ip, Operand(ExternalReference(&dispatcher,
- ExternalReference::BUILTIN_CALL,
- masm->isolate())));
-#endif
- __ Call(ip);
-
- // Restore the stack pointer if needed.
- if (frame_alignment > kPointerSize) {
- __ mov(sp, r5);
- }
-
- __ Pop(lr, r5, r1);
- __ Ret();
-}
-
-#undef __
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_ARM
diff --git a/src/3rdparty/v8/src/arm/code-stubs-arm.h b/src/3rdparty/v8/src/arm/code-stubs-arm.h
deleted file mode 100644
index f952756..0000000
--- a/src/3rdparty/v8/src/arm/code-stubs-arm.h
+++ /dev/null
@@ -1,800 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_ARM_CODE_STUBS_ARM_H_
-#define V8_ARM_CODE_STUBS_ARM_H_
-
-#include "ic-inl.h"
-
-namespace v8 {
-namespace internal {
-
-
-// Compute a transcendental math function natively, or call the
-// TranscendentalCache runtime function.
-class TranscendentalCacheStub: public PlatformCodeStub {
- public:
- enum ArgumentType {
- TAGGED = 0 << TranscendentalCache::kTranscendentalTypeBits,
- UNTAGGED = 1 << TranscendentalCache::kTranscendentalTypeBits
- };
-
- TranscendentalCacheStub(TranscendentalCache::Type type,
- ArgumentType argument_type)
- : type_(type), argument_type_(argument_type) { }
- void Generate(MacroAssembler* masm);
- private:
- TranscendentalCache::Type type_;
- ArgumentType argument_type_;
- void GenerateCallCFunction(MacroAssembler* masm, Register scratch);
-
- Major MajorKey() { return TranscendentalCache; }
- int MinorKey() { return type_ | argument_type_; }
- Runtime::FunctionId RuntimeFunction();
-};
-
-
-class StoreBufferOverflowStub: public PlatformCodeStub {
- public:
- explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp)
- : save_doubles_(save_fp) { }
-
- void Generate(MacroAssembler* masm);
-
- virtual bool IsPregenerated();
- static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
- virtual bool SometimesSetsUpAFrame() { return false; }
-
- private:
- SaveFPRegsMode save_doubles_;
-
- Major MajorKey() { return StoreBufferOverflow; }
- int MinorKey() { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; }
-};
-
-
-class UnaryOpStub: public PlatformCodeStub {
- public:
- UnaryOpStub(Token::Value op,
- UnaryOverwriteMode mode,
- UnaryOpIC::TypeInfo operand_type = UnaryOpIC::UNINITIALIZED)
- : op_(op),
- mode_(mode),
- operand_type_(operand_type) {
- }
-
- private:
- Token::Value op_;
- UnaryOverwriteMode mode_;
-
- // Operand type information determined at runtime.
- UnaryOpIC::TypeInfo operand_type_;
-
- virtual void PrintName(StringStream* stream);
-
- class ModeBits: public BitField<UnaryOverwriteMode, 0, 1> {};
- class OpBits: public BitField<Token::Value, 1, 7> {};
- class OperandTypeInfoBits: public BitField<UnaryOpIC::TypeInfo, 8, 3> {};
-
- Major MajorKey() { return UnaryOp; }
- int MinorKey() {
- return ModeBits::encode(mode_)
- | OpBits::encode(op_)
- | OperandTypeInfoBits::encode(operand_type_);
- }
-
- // Note: A lot of the helper functions below will vanish when we use virtual
- // function instead of switch more often.
- void Generate(MacroAssembler* masm);
-
- void GenerateTypeTransition(MacroAssembler* masm);
-
- void GenerateSmiStub(MacroAssembler* masm);
- void GenerateSmiStubSub(MacroAssembler* masm);
- void GenerateSmiStubBitNot(MacroAssembler* masm);
- void GenerateSmiCodeSub(MacroAssembler* masm, Label* non_smi, Label* slow);
- void GenerateSmiCodeBitNot(MacroAssembler* masm, Label* slow);
-
- void GenerateNumberStub(MacroAssembler* masm);
- void GenerateNumberStubSub(MacroAssembler* masm);
- void GenerateNumberStubBitNot(MacroAssembler* masm);
- void GenerateHeapNumberCodeSub(MacroAssembler* masm, Label* slow);
- void GenerateHeapNumberCodeBitNot(MacroAssembler* masm, Label* slow);
-
- void GenerateGenericStub(MacroAssembler* masm);
- void GenerateGenericStubSub(MacroAssembler* masm);
- void GenerateGenericStubBitNot(MacroAssembler* masm);
- void GenerateGenericCodeFallback(MacroAssembler* masm);
-
- virtual int GetCodeKind() { return Code::UNARY_OP_IC; }
-
- virtual InlineCacheState GetICState() {
- return UnaryOpIC::ToState(operand_type_);
- }
-
- virtual void FinishCode(Handle<Code> code) {
- code->set_unary_op_type(operand_type_);
- }
-};
-
-
-class StringHelper : public AllStatic {
- public:
- // Generate code for copying characters using a simple loop. This should only
- // be used in places where the number of characters is small and the
- // additional setup and checking in GenerateCopyCharactersLong adds too much
- // overhead. Copying of overlapping regions is not supported.
- // Dest register ends at the position after the last character written.
- static void GenerateCopyCharacters(MacroAssembler* masm,
- Register dest,
- Register src,
- Register count,
- Register scratch,
- bool ascii);
-
- // Generate code for copying a large number of characters. This function
- // is allowed to spend extra time setting up conditions to make copying
- // faster. Copying of overlapping regions is not supported.
- // Dest register ends at the position after the last character written.
- static void GenerateCopyCharactersLong(MacroAssembler* masm,
- Register dest,
- Register src,
- Register count,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Register scratch5,
- int flags);
-
-
- // Probe the string table for a two character string. If the string is
- // not found by probing a jump to the label not_found is performed. This jump
- // does not guarantee that the string is not in the string table. If the
- // string is found the code falls through with the string in register r0.
- // Contents of both c1 and c2 registers are modified. At the exit c1 is
- // guaranteed to contain halfword with low and high bytes equal to
- // initial contents of c1 and c2 respectively.
- static void GenerateTwoCharacterStringTableProbe(MacroAssembler* masm,
- Register c1,
- Register c2,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Register scratch5,
- Label* not_found);
-
- // Generate string hash.
- static void GenerateHashInit(MacroAssembler* masm,
- Register hash,
- Register character);
-
- static void GenerateHashAddCharacter(MacroAssembler* masm,
- Register hash,
- Register character);
-
- static void GenerateHashGetHash(MacroAssembler* masm,
- Register hash);
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
-};
-
-
-// Flag that indicates how to generate code for the stub StringAddStub.
-enum StringAddFlags {
- NO_STRING_ADD_FLAGS = 0,
- // Omit left string check in stub (left is definitely a string).
- NO_STRING_CHECK_LEFT_IN_STUB = 1 << 0,
- // Omit right string check in stub (right is definitely a string).
- NO_STRING_CHECK_RIGHT_IN_STUB = 1 << 1,
- // Omit both string checks in stub.
- NO_STRING_CHECK_IN_STUB =
- NO_STRING_CHECK_LEFT_IN_STUB | NO_STRING_CHECK_RIGHT_IN_STUB
-};
-
-
-class StringAddStub: public PlatformCodeStub {
- public:
- explicit StringAddStub(StringAddFlags flags) : flags_(flags) {}
-
- private:
- Major MajorKey() { return StringAdd; }
- int MinorKey() { return flags_; }
-
- void Generate(MacroAssembler* masm);
-
- void GenerateConvertArgument(MacroAssembler* masm,
- int stack_offset,
- Register arg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Label* slow);
-
- const StringAddFlags flags_;
-};
-
-
-class SubStringStub: public PlatformCodeStub {
- public:
- SubStringStub() {}
-
- private:
- Major MajorKey() { return SubString; }
- int MinorKey() { return 0; }
-
- void Generate(MacroAssembler* masm);
-};
-
-
-
-class StringCompareStub: public PlatformCodeStub {
- public:
- StringCompareStub() { }
-
- // Compares two flat ASCII strings and returns result in r0.
- static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
- Register left,
- Register right,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4);
-
- // Compares two flat ASCII strings for equality and returns result
- // in r0.
- static void GenerateFlatAsciiStringEquals(MacroAssembler* masm,
- Register left,
- Register right,
- Register scratch1,
- Register scratch2,
- Register scratch3);
-
- private:
- virtual Major MajorKey() { return StringCompare; }
- virtual int MinorKey() { return 0; }
- virtual void Generate(MacroAssembler* masm);
-
- static void GenerateAsciiCharsCompareLoop(MacroAssembler* masm,
- Register left,
- Register right,
- Register length,
- Register scratch1,
- Register scratch2,
- Label* chars_not_equal);
-};
-
-
-// This stub can convert a signed int32 to a heap number (double). It does
-// not work for int32s that are in Smi range! No GC occurs during this stub
-// so you don't have to set up the frame.
-class WriteInt32ToHeapNumberStub : public PlatformCodeStub {
- public:
- WriteInt32ToHeapNumberStub(Register the_int,
- Register the_heap_number,
- Register scratch)
- : the_int_(the_int),
- the_heap_number_(the_heap_number),
- scratch_(scratch) { }
-
- bool IsPregenerated();
- static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
-
- private:
- Register the_int_;
- Register the_heap_number_;
- Register scratch_;
-
- // Minor key encoding in 16 bits.
- class IntRegisterBits: public BitField<int, 0, 4> {};
- class HeapNumberRegisterBits: public BitField<int, 4, 4> {};
- class ScratchRegisterBits: public BitField<int, 8, 4> {};
-
- Major MajorKey() { return WriteInt32ToHeapNumber; }
- int MinorKey() {
- // Encode the parameters in a unique 16 bit value.
- return IntRegisterBits::encode(the_int_.code())
- | HeapNumberRegisterBits::encode(the_heap_number_.code())
- | ScratchRegisterBits::encode(scratch_.code());
- }
-
- void Generate(MacroAssembler* masm);
-};
-
-
-class NumberToStringStub: public PlatformCodeStub {
- public:
- NumberToStringStub() { }
-
- // Generate code to do a lookup in the number string cache. If the number in
- // the register object is found in the cache the generated code falls through
- // with the result in the result register. The object and the result register
- // can be the same. If the number is not found in the cache the code jumps to
- // the label not_found with only the content of register object unchanged.
- static void GenerateLookupNumberStringCache(MacroAssembler* masm,
- Register object,
- Register result,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- bool object_is_smi,
- Label* not_found);
-
- private:
- Major MajorKey() { return NumberToString; }
- int MinorKey() { return 0; }
-
- void Generate(MacroAssembler* masm);
-};
-
-
-class RecordWriteStub: public PlatformCodeStub {
- public:
- RecordWriteStub(Register object,
- Register value,
- Register address,
- RememberedSetAction remembered_set_action,
- SaveFPRegsMode fp_mode)
- : object_(object),
- value_(value),
- address_(address),
- remembered_set_action_(remembered_set_action),
- save_fp_regs_mode_(fp_mode),
- regs_(object, // An input reg.
- address, // An input reg.
- value) { // One scratch reg.
- }
-
- enum Mode {
- STORE_BUFFER_ONLY,
- INCREMENTAL,
- INCREMENTAL_COMPACTION
- };
-
- virtual bool IsPregenerated();
- static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
- virtual bool SometimesSetsUpAFrame() { return false; }
-
- static void PatchBranchIntoNop(MacroAssembler* masm, int pos) {
- masm->instr_at_put(pos, (masm->instr_at(pos) & ~B27) | (B24 | B20));
- ASSERT(Assembler::IsTstImmediate(masm->instr_at(pos)));
- }
-
- static void PatchNopIntoBranch(MacroAssembler* masm, int pos) {
- masm->instr_at_put(pos, (masm->instr_at(pos) & ~(B24 | B20)) | B27);
- ASSERT(Assembler::IsBranch(masm->instr_at(pos)));
- }
-
- static Mode GetMode(Code* stub) {
- Instr first_instruction = Assembler::instr_at(stub->instruction_start());
- Instr second_instruction = Assembler::instr_at(stub->instruction_start() +
- Assembler::kInstrSize);
-
- if (Assembler::IsBranch(first_instruction)) {
- return INCREMENTAL;
- }
-
- ASSERT(Assembler::IsTstImmediate(first_instruction));
-
- if (Assembler::IsBranch(second_instruction)) {
- return INCREMENTAL_COMPACTION;
- }
-
- ASSERT(Assembler::IsTstImmediate(second_instruction));
-
- return STORE_BUFFER_ONLY;
- }
-
- static void Patch(Code* stub, Mode mode) {
- MacroAssembler masm(NULL,
- stub->instruction_start(),
- stub->instruction_size());
- switch (mode) {
- case STORE_BUFFER_ONLY:
- ASSERT(GetMode(stub) == INCREMENTAL ||
- GetMode(stub) == INCREMENTAL_COMPACTION);
- PatchBranchIntoNop(&masm, 0);
- PatchBranchIntoNop(&masm, Assembler::kInstrSize);
- break;
- case INCREMENTAL:
- ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
- PatchNopIntoBranch(&masm, 0);
- break;
- case INCREMENTAL_COMPACTION:
- ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
- PatchNopIntoBranch(&masm, Assembler::kInstrSize);
- break;
- }
- ASSERT(GetMode(stub) == mode);
- CPU::FlushICache(stub->instruction_start(), 2 * Assembler::kInstrSize);
- }
-
- private:
- // This is a helper class for freeing up 3 scratch registers. The input is
- // two registers that must be preserved and one scratch register provided by
- // the caller.
- class RegisterAllocation {
- public:
- RegisterAllocation(Register object,
- Register address,
- Register scratch0)
- : object_(object),
- address_(address),
- scratch0_(scratch0) {
- ASSERT(!AreAliased(scratch0, object, address, no_reg));
- scratch1_ = GetRegThatIsNotOneOf(object_, address_, scratch0_);
- }
-
- void Save(MacroAssembler* masm) {
- ASSERT(!AreAliased(object_, address_, scratch1_, scratch0_));
- // We don't have to save scratch0_ because it was given to us as
- // a scratch register.
- masm->push(scratch1_);
- }
-
- void Restore(MacroAssembler* masm) {
- masm->pop(scratch1_);
- }
-
- // If we have to call into C then we need to save and restore all caller-
- // saved registers that were not already preserved. The scratch registers
- // will be restored by other means so we don't bother pushing them here.
- void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) {
- masm->stm(db_w, sp, (kCallerSaved | lr.bit()) & ~scratch1_.bit());
- if (mode == kSaveFPRegs) {
- // Number of d-regs not known at snapshot time.
- ASSERT(!Serializer::enabled());
- CpuFeatures::Scope scope(VFP2);
- masm->sub(sp,
- sp,
- Operand(kDoubleSize * (DwVfpRegister::NumRegisters() - 1)));
- // Save all VFP registers except d0.
- // TODO(hans): We should probably save d0 too. And maybe use vstm.
- for (int i = DwVfpRegister::NumRegisters() - 1; i > 0; i--) {
- DwVfpRegister reg = DwVfpRegister::from_code(i);
- masm->vstr(reg, MemOperand(sp, (i - 1) * kDoubleSize));
- }
- }
- }
-
- inline void RestoreCallerSaveRegisters(MacroAssembler*masm,
- SaveFPRegsMode mode) {
- if (mode == kSaveFPRegs) {
- // Number of d-regs not known at snapshot time.
- ASSERT(!Serializer::enabled());
- CpuFeatures::Scope scope(VFP2);
- // Restore all VFP registers except d0.
- // TODO(hans): We should probably restore d0 too. And maybe use vldm.
- for (int i = DwVfpRegister::NumRegisters() - 1; i > 0; i--) {
- DwVfpRegister reg = DwVfpRegister::from_code(i);
- masm->vldr(reg, MemOperand(sp, (i - 1) * kDoubleSize));
- }
- masm->add(sp,
- sp,
- Operand(kDoubleSize * (DwVfpRegister::NumRegisters() - 1)));
- }
- masm->ldm(ia_w, sp, (kCallerSaved | lr.bit()) & ~scratch1_.bit());
- }
-
- inline Register object() { return object_; }
- inline Register address() { return address_; }
- inline Register scratch0() { return scratch0_; }
- inline Register scratch1() { return scratch1_; }
-
- private:
- Register object_;
- Register address_;
- Register scratch0_;
- Register scratch1_;
-
- Register GetRegThatIsNotOneOf(Register r1,
- Register r2,
- Register r3) {
- for (int i = 0; i < Register::NumAllocatableRegisters(); i++) {
- Register candidate = Register::FromAllocationIndex(i);
- if (candidate.is(r1)) continue;
- if (candidate.is(r2)) continue;
- if (candidate.is(r3)) continue;
- return candidate;
- }
- UNREACHABLE();
- return no_reg;
- }
- friend class RecordWriteStub;
- };
-
- enum OnNoNeedToInformIncrementalMarker {
- kReturnOnNoNeedToInformIncrementalMarker,
- kUpdateRememberedSetOnNoNeedToInformIncrementalMarker
- };
-
- void Generate(MacroAssembler* masm);
- void GenerateIncremental(MacroAssembler* masm, Mode mode);
- void CheckNeedsToInformIncrementalMarker(
- MacroAssembler* masm,
- OnNoNeedToInformIncrementalMarker on_no_need,
- Mode mode);
- void InformIncrementalMarker(MacroAssembler* masm, Mode mode);
-
- Major MajorKey() { return RecordWrite; }
-
- int MinorKey() {
- return ObjectBits::encode(object_.code()) |
- ValueBits::encode(value_.code()) |
- AddressBits::encode(address_.code()) |
- RememberedSetActionBits::encode(remembered_set_action_) |
- SaveFPRegsModeBits::encode(save_fp_regs_mode_);
- }
-
- void Activate(Code* code) {
- code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
- }
-
- class ObjectBits: public BitField<int, 0, 4> {};
- class ValueBits: public BitField<int, 4, 4> {};
- class AddressBits: public BitField<int, 8, 4> {};
- class RememberedSetActionBits: public BitField<RememberedSetAction, 12, 1> {};
- class SaveFPRegsModeBits: public BitField<SaveFPRegsMode, 13, 1> {};
-
- Register object_;
- Register value_;
- Register address_;
- RememberedSetAction remembered_set_action_;
- SaveFPRegsMode save_fp_regs_mode_;
- Label slow_;
- RegisterAllocation regs_;
-};
-
-
-// Enter C code from generated RegExp code in a way that allows
-// the C code to fix the return address in case of a GC.
-// Currently only needed on ARM.
-class RegExpCEntryStub: public PlatformCodeStub {
- public:
- RegExpCEntryStub() {}
- virtual ~RegExpCEntryStub() {}
- void Generate(MacroAssembler* masm);
-
- private:
- Major MajorKey() { return RegExpCEntry; }
- int MinorKey() { return 0; }
-
- bool NeedsImmovableCode() { return true; }
-};
-
-
-// Trampoline stub to call into native code. To call safely into native code
-// in the presence of compacting GC (which can move code objects) we need to
-// keep the code which called into native pinned in the memory. Currently the
-// simplest approach is to generate such stub early enough so it can never be
-// moved by GC
-class DirectCEntryStub: public PlatformCodeStub {
- public:
- DirectCEntryStub() {}
- void Generate(MacroAssembler* masm);
- void GenerateCall(MacroAssembler* masm, ExternalReference function);
- void GenerateCall(MacroAssembler* masm, Register target);
-
- private:
- Major MajorKey() { return DirectCEntry; }
- int MinorKey() { return 0; }
-
- bool NeedsImmovableCode() { return true; }
-};
-
-
-class FloatingPointHelper : public AllStatic {
- public:
- enum Destination {
- kVFPRegisters,
- kCoreRegisters
- };
-
-
- // Loads smis from r0 and r1 (right and left in binary operations) into
- // floating point registers. Depending on the destination the values ends up
- // either d7 and d6 or in r2/r3 and r0/r1 respectively. If the destination is
- // floating point registers VFP3 must be supported. If core registers are
- // requested when VFP3 is supported d6 and d7 will be scratched.
- static void LoadSmis(MacroAssembler* masm,
- Destination destination,
- Register scratch1,
- Register scratch2);
-
- // Convert the smi or heap number in object to an int32 using the rules
- // for ToInt32 as described in ECMAScript 9.5.: the value is truncated
- // and brought into the range -2^31 .. +2^31 - 1.
- static void ConvertNumberToInt32(MacroAssembler* masm,
- Register object,
- Register dst,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- DwVfpRegister double_scratch,
- Label* not_int32);
-
- // Converts the integer (untagged smi) in |int_scratch| to a double, storing
- // the result either in |double_dst| or |dst2:dst1|, depending on
- // |destination|.
- // Warning: The value in |int_scratch| will be changed in the process!
- static void ConvertIntToDouble(MacroAssembler* masm,
- Register int_scratch,
- Destination destination,
- DwVfpRegister double_dst,
- Register dst1,
- Register dst2,
- Register scratch2,
- SwVfpRegister single_scratch);
-
- // Load the number from object into double_dst in the double format.
- // Control will jump to not_int32 if the value cannot be exactly represented
- // by a 32-bit integer.
- // Floating point value in the 32-bit integer range that are not exact integer
- // won't be loaded.
- static void LoadNumberAsInt32Double(MacroAssembler* masm,
- Register object,
- Destination destination,
- DwVfpRegister double_dst,
- DwVfpRegister double_scratch,
- Register dst1,
- Register dst2,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- SwVfpRegister single_scratch,
- Label* not_int32);
-
- // Loads the number from object into dst as a 32-bit integer.
- // Control will jump to not_int32 if the object cannot be exactly represented
- // by a 32-bit integer.
- // Floating point value in the 32-bit integer range that are not exact integer
- // won't be converted.
- // scratch3 is not used when VFP3 is supported.
- static void LoadNumberAsInt32(MacroAssembler* masm,
- Register object,
- Register dst,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- DwVfpRegister double_scratch0,
- DwVfpRegister double_scratch1,
- Label* not_int32);
-
- // Generate non VFP3 code to check if a double can be exactly represented by a
- // 32-bit integer. This does not check for 0 or -0, which need
- // to be checked for separately.
- // Control jumps to not_int32 if the value is not a 32-bit integer, and falls
- // through otherwise.
- // src1 and src2 will be cloberred.
- //
- // Expected input:
- // - src1: higher (exponent) part of the double value.
- // - src2: lower (mantissa) part of the double value.
- // Output status:
- // - dst: 32 higher bits of the mantissa. (mantissa[51:20])
- // - src2: contains 1.
- // - other registers are clobbered.
- static void DoubleIs32BitInteger(MacroAssembler* masm,
- Register src1,
- Register src2,
- Register dst,
- Register scratch,
- Label* not_int32);
-
- // Generates code to call a C function to do a double operation using core
- // registers. (Used when VFP3 is not supported.)
- // This code never falls through, but returns with a heap number containing
- // the result in r0.
- // Register heapnumber_result must be a heap number in which the
- // result of the operation will be stored.
- // Requires the following layout on entry:
- // r0: Left value (least significant part of mantissa).
- // r1: Left value (sign, exponent, top of mantissa).
- // r2: Right value (least significant part of mantissa).
- // r3: Right value (sign, exponent, top of mantissa).
- static void CallCCodeForDoubleOperation(MacroAssembler* masm,
- Token::Value op,
- Register heap_number_result,
- Register scratch);
-
- // Loads the objects from |object| into floating point registers.
- // Depending on |destination| the value ends up either in |dst| or
- // in |dst1|/|dst2|. If |destination| is kVFPRegisters, then VFP3
- // must be supported. If kCoreRegisters are requested and VFP3 is
- // supported, |dst| will be scratched. If |object| is neither smi nor
- // heap number, |not_number| is jumped to with |object| still intact.
- static void LoadNumber(MacroAssembler* masm,
- FloatingPointHelper::Destination destination,
- Register object,
- DwVfpRegister dst,
- Register dst1,
- Register dst2,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Label* not_number);
-};
-
-
-class StringDictionaryLookupStub: public PlatformCodeStub {
- public:
- enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
-
- explicit StringDictionaryLookupStub(LookupMode mode) : mode_(mode) { }
-
- void Generate(MacroAssembler* masm);
-
- static void GenerateNegativeLookup(MacroAssembler* masm,
- Label* miss,
- Label* done,
- Register receiver,
- Register properties,
- Handle<String> name,
- Register scratch0);
-
- static void GeneratePositiveLookup(MacroAssembler* masm,
- Label* miss,
- Label* done,
- Register elements,
- Register name,
- Register r0,
- Register r1);
-
- virtual bool SometimesSetsUpAFrame() { return false; }
-
- private:
- static const int kInlinedProbes = 4;
- static const int kTotalProbes = 20;
-
- static const int kCapacityOffset =
- StringDictionary::kHeaderSize +
- StringDictionary::kCapacityIndex * kPointerSize;
-
- static const int kElementsStartOffset =
- StringDictionary::kHeaderSize +
- StringDictionary::kElementsStartIndex * kPointerSize;
-
- Major MajorKey() { return StringDictionaryLookup; }
-
- int MinorKey() {
- return LookupModeBits::encode(mode_);
- }
-
- class LookupModeBits: public BitField<LookupMode, 0, 1> {};
-
- LookupMode mode_;
-};
-
-
-} } // namespace v8::internal
-
-#endif // V8_ARM_CODE_STUBS_ARM_H_
diff --git a/src/3rdparty/v8/src/arm/codegen-arm.cc b/src/3rdparty/v8/src/arm/codegen-arm.cc
deleted file mode 100644
index 6e3c635..0000000
--- a/src/3rdparty/v8/src/arm/codegen-arm.cc
+++ /dev/null
@@ -1,708 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_ARM)
-
-#include "codegen.h"
-#include "macro-assembler.h"
-#include "simulator-arm.h"
-
-namespace v8 {
-namespace internal {
-
-
-UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type) {
- switch (type) {
- case TranscendentalCache::SIN: return &sin;
- case TranscendentalCache::COS: return &cos;
- case TranscendentalCache::TAN: return &tan;
- case TranscendentalCache::LOG: return &log;
- default: UNIMPLEMENTED();
- }
- return NULL;
-}
-
-
-#define __ masm.
-
-
-#if defined(USE_SIMULATOR)
-byte* fast_exp_arm_machine_code = NULL;
-double fast_exp_simulator(double x) {
- return Simulator::current(Isolate::Current())->CallFP(
- fast_exp_arm_machine_code, x, 0);
-}
-#endif
-
-
-UnaryMathFunction CreateExpFunction() {
- if (!CpuFeatures::IsSupported(VFP2)) return &exp;
- if (!FLAG_fast_math) return &exp;
- size_t actual_size;
- byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
- if (buffer == NULL) return &exp;
- ExternalReference::InitializeMathExpData();
-
- MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
-
- {
- CpuFeatures::Scope use_vfp(VFP2);
- DwVfpRegister input = d0;
- DwVfpRegister result = d1;
- DwVfpRegister double_scratch1 = d2;
- DwVfpRegister double_scratch2 = d3;
- Register temp1 = r4;
- Register temp2 = r5;
- Register temp3 = r6;
-
- if (masm.use_eabi_hardfloat()) {
- // Input value is in d0 anyway, nothing to do.
- } else {
- __ vmov(input, r0, r1);
- }
- __ Push(temp3, temp2, temp1);
- MathExpGenerator::EmitMathExp(
- &masm, input, result, double_scratch1, double_scratch2,
- temp1, temp2, temp3);
- __ Pop(temp3, temp2, temp1);
- if (masm.use_eabi_hardfloat()) {
- __ vmov(d0, result);
- } else {
- __ vmov(r0, r1, result);
- }
- __ Ret();
- }
-
- CodeDesc desc;
- masm.GetCode(&desc);
- ASSERT(!RelocInfo::RequiresRelocation(desc));
-
- CPU::FlushICache(buffer, actual_size);
- OS::ProtectCode(buffer, actual_size);
-
-#if !defined(USE_SIMULATOR)
- return FUNCTION_CAST<UnaryMathFunction>(buffer);
-#else
- fast_exp_arm_machine_code = buffer;
- return &fast_exp_simulator;
-#endif
-}
-
-
-#undef __
-
-
-UnaryMathFunction CreateSqrtFunction() {
- return &sqrt;
-}
-
-// -------------------------------------------------------------------------
-// Platform-specific RuntimeCallHelper functions.
-
-void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
- masm->EnterFrame(StackFrame::INTERNAL);
- ASSERT(!masm->has_frame());
- masm->set_has_frame(true);
-}
-
-
-void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
- masm->LeaveFrame(StackFrame::INTERNAL);
- ASSERT(masm->has_frame());
- masm->set_has_frame(false);
-}
-
-
-// -------------------------------------------------------------------------
-// Code generators
-
-#define __ ACCESS_MASM(masm)
-
-void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
- MacroAssembler* masm, AllocationSiteMode mode,
- Label* allocation_site_info_found) {
- // ----------- S t a t e -------------
- // -- r0 : value
- // -- r1 : key
- // -- r2 : receiver
- // -- lr : return address
- // -- r3 : target map, scratch for subsequent call
- // -- r4 : scratch (elements)
- // -----------------------------------
- if (mode == TRACK_ALLOCATION_SITE) {
- ASSERT(allocation_site_info_found != NULL);
- __ TestJSArrayForAllocationSiteInfo(r2, r4);
- __ b(eq, allocation_site_info_found);
- }
-
- // Set transitioned map.
- __ str(r3, FieldMemOperand(r2, HeapObject::kMapOffset));
- __ RecordWriteField(r2,
- HeapObject::kMapOffset,
- r3,
- r9,
- kLRHasNotBeenSaved,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
-}
-
-
-void ElementsTransitionGenerator::GenerateSmiToDouble(
- MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
- // ----------- S t a t e -------------
- // -- r0 : value
- // -- r1 : key
- // -- r2 : receiver
- // -- lr : return address
- // -- r3 : target map, scratch for subsequent call
- // -- r4 : scratch (elements)
- // -----------------------------------
- Label loop, entry, convert_hole, gc_required, only_change_map, done;
- bool vfp2_supported = CpuFeatures::IsSupported(VFP2);
-
- if (mode == TRACK_ALLOCATION_SITE) {
- __ TestJSArrayForAllocationSiteInfo(r2, r4);
- __ b(eq, fail);
- }
-
- // Check for empty arrays, which only require a map transition and no changes
- // to the backing store.
- __ ldr(r4, FieldMemOperand(r2, JSObject::kElementsOffset));
- __ CompareRoot(r4, Heap::kEmptyFixedArrayRootIndex);
- __ b(eq, &only_change_map);
-
- __ push(lr);
- __ ldr(r5, FieldMemOperand(r4, FixedArray::kLengthOffset));
- // r4: source FixedArray
- // r5: number of elements (smi-tagged)
-
- // Allocate new FixedDoubleArray.
- // Use lr as a temporary register.
- __ mov(lr, Operand(r5, LSL, 2));
- __ add(lr, lr, Operand(FixedDoubleArray::kHeaderSize));
- __ AllocateInNewSpace(lr, r6, r7, r9, &gc_required, DOUBLE_ALIGNMENT);
- // r6: destination FixedDoubleArray, not tagged as heap object.
-
- // Set destination FixedDoubleArray's length and map.
- __ LoadRoot(r9, Heap::kFixedDoubleArrayMapRootIndex);
- __ str(r5, MemOperand(r6, FixedDoubleArray::kLengthOffset));
- // Update receiver's map.
- __ str(r9, MemOperand(r6, HeapObject::kMapOffset));
-
- __ str(r3, FieldMemOperand(r2, HeapObject::kMapOffset));
- __ RecordWriteField(r2,
- HeapObject::kMapOffset,
- r3,
- r9,
- kLRHasBeenSaved,
- kDontSaveFPRegs,
- OMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- // Replace receiver's backing store with newly created FixedDoubleArray.
- __ add(r3, r6, Operand(kHeapObjectTag));
- __ str(r3, FieldMemOperand(r2, JSObject::kElementsOffset));
- __ RecordWriteField(r2,
- JSObject::kElementsOffset,
- r3,
- r9,
- kLRHasBeenSaved,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
-
- // Prepare for conversion loop.
- __ add(r3, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ add(r7, r6, Operand(FixedDoubleArray::kHeaderSize));
- __ add(r6, r7, Operand(r5, LSL, 2));
- __ mov(r4, Operand(kHoleNanLower32));
- __ mov(r5, Operand(kHoleNanUpper32));
- // r3: begin of source FixedArray element fields, not tagged
- // r4: kHoleNanLower32
- // r5: kHoleNanUpper32
- // r6: end of destination FixedDoubleArray, not tagged
- // r7: begin of FixedDoubleArray element fields, not tagged
- if (!vfp2_supported) __ Push(r1, r0);
-
- __ b(&entry);
-
- __ bind(&only_change_map);
- __ str(r3, FieldMemOperand(r2, HeapObject::kMapOffset));
- __ RecordWriteField(r2,
- HeapObject::kMapOffset,
- r3,
- r9,
- kLRHasNotBeenSaved,
- kDontSaveFPRegs,
- OMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- __ b(&done);
-
- // Call into runtime if GC is required.
- __ bind(&gc_required);
- __ pop(lr);
- __ b(fail);
-
- // Convert and copy elements.
- __ bind(&loop);
- __ ldr(r9, MemOperand(r3, 4, PostIndex));
- // r9: current element
- __ UntagAndJumpIfNotSmi(r9, r9, &convert_hole);
-
- // Normal smi, convert to double and store.
- if (vfp2_supported) {
- CpuFeatures::Scope scope(VFP2);
- __ vmov(s0, r9);
- __ vcvt_f64_s32(d0, s0);
- __ vstr(d0, r7, 0);
- __ add(r7, r7, Operand(8));
- } else {
- FloatingPointHelper::ConvertIntToDouble(masm,
- r9,
- FloatingPointHelper::kCoreRegisters,
- d0,
- r0,
- r1,
- lr,
- s0);
- __ Strd(r0, r1, MemOperand(r7, 8, PostIndex));
- }
- __ b(&entry);
-
- // Hole found, store the-hole NaN.
- __ bind(&convert_hole);
- if (FLAG_debug_code) {
- // Restore a "smi-untagged" heap object.
- __ SmiTag(r9);
- __ orr(r9, r9, Operand(1));
- __ CompareRoot(r9, Heap::kTheHoleValueRootIndex);
- __ Assert(eq, "object found in smi-only array");
- }
- __ Strd(r4, r5, MemOperand(r7, 8, PostIndex));
-
- __ bind(&entry);
- __ cmp(r7, r6);
- __ b(lt, &loop);
-
- if (!vfp2_supported) __ Pop(r1, r0);
- __ pop(lr);
- __ bind(&done);
-}
-
-
-void ElementsTransitionGenerator::GenerateDoubleToObject(
- MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
- // ----------- S t a t e -------------
- // -- r0 : value
- // -- r1 : key
- // -- r2 : receiver
- // -- lr : return address
- // -- r3 : target map, scratch for subsequent call
- // -- r4 : scratch (elements)
- // -----------------------------------
- Label entry, loop, convert_hole, gc_required, only_change_map;
-
- if (mode == TRACK_ALLOCATION_SITE) {
- __ TestJSArrayForAllocationSiteInfo(r2, r4);
- __ b(eq, fail);
- }
-
- // Check for empty arrays, which only require a map transition and no changes
- // to the backing store.
- __ ldr(r4, FieldMemOperand(r2, JSObject::kElementsOffset));
- __ CompareRoot(r4, Heap::kEmptyFixedArrayRootIndex);
- __ b(eq, &only_change_map);
-
- __ push(lr);
- __ Push(r3, r2, r1, r0);
- __ ldr(r5, FieldMemOperand(r4, FixedArray::kLengthOffset));
- // r4: source FixedDoubleArray
- // r5: number of elements (smi-tagged)
-
- // Allocate new FixedArray.
- __ mov(r0, Operand(FixedDoubleArray::kHeaderSize));
- __ add(r0, r0, Operand(r5, LSL, 1));
- __ AllocateInNewSpace(r0, r6, r7, r9, &gc_required, NO_ALLOCATION_FLAGS);
- // r6: destination FixedArray, not tagged as heap object
- // Set destination FixedDoubleArray's length and map.
- __ LoadRoot(r9, Heap::kFixedArrayMapRootIndex);
- __ str(r5, MemOperand(r6, FixedDoubleArray::kLengthOffset));
- __ str(r9, MemOperand(r6, HeapObject::kMapOffset));
-
- // Prepare for conversion loop.
- __ add(r4, r4, Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag + 4));
- __ add(r3, r6, Operand(FixedArray::kHeaderSize));
- __ add(r6, r6, Operand(kHeapObjectTag));
- __ add(r5, r3, Operand(r5, LSL, 1));
- __ LoadRoot(r7, Heap::kTheHoleValueRootIndex);
- __ LoadRoot(r9, Heap::kHeapNumberMapRootIndex);
- // Using offsetted addresses in r4 to fully take advantage of post-indexing.
- // r3: begin of destination FixedArray element fields, not tagged
- // r4: begin of source FixedDoubleArray element fields, not tagged, +4
- // r5: end of destination FixedArray, not tagged
- // r6: destination FixedArray
- // r7: the-hole pointer
- // r9: heap number map
- __ b(&entry);
-
- // Call into runtime if GC is required.
- __ bind(&gc_required);
- __ Pop(r3, r2, r1, r0);
- __ pop(lr);
- __ b(fail);
-
- __ bind(&loop);
- __ ldr(r1, MemOperand(r4, 8, PostIndex));
- // lr: current element's upper 32 bit
- // r4: address of next element's upper 32 bit
- __ cmp(r1, Operand(kHoleNanUpper32));
- __ b(eq, &convert_hole);
-
- // Non-hole double, copy value into a heap number.
- __ AllocateHeapNumber(r2, r0, lr, r9, &gc_required);
- // r2: new heap number
- __ ldr(r0, MemOperand(r4, 12, NegOffset));
- __ Strd(r0, r1, FieldMemOperand(r2, HeapNumber::kValueOffset));
- __ mov(r0, r3);
- __ str(r2, MemOperand(r3, 4, PostIndex));
- __ RecordWrite(r6,
- r0,
- r2,
- kLRHasBeenSaved,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- __ b(&entry);
-
- // Replace the-hole NaN with the-hole pointer.
- __ bind(&convert_hole);
- __ str(r7, MemOperand(r3, 4, PostIndex));
-
- __ bind(&entry);
- __ cmp(r3, r5);
- __ b(lt, &loop);
-
- __ Pop(r3, r2, r1, r0);
- // Replace receiver's backing store with newly created and filled FixedArray.
- __ str(r6, FieldMemOperand(r2, JSObject::kElementsOffset));
- __ RecordWriteField(r2,
- JSObject::kElementsOffset,
- r6,
- r9,
- kLRHasBeenSaved,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- __ pop(lr);
-
- __ bind(&only_change_map);
- // Update receiver's map.
- __ str(r3, FieldMemOperand(r2, HeapObject::kMapOffset));
- __ RecordWriteField(r2,
- HeapObject::kMapOffset,
- r3,
- r9,
- kLRHasNotBeenSaved,
- kDontSaveFPRegs,
- OMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
-}
-
-
-void StringCharLoadGenerator::Generate(MacroAssembler* masm,
- Register string,
- Register index,
- Register result,
- Label* call_runtime) {
- // Fetch the instance type of the receiver into result register.
- __ ldr(result, FieldMemOperand(string, HeapObject::kMapOffset));
- __ ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
-
- // We need special handling for indirect strings.
- Label check_sequential;
- __ tst(result, Operand(kIsIndirectStringMask));
- __ b(eq, &check_sequential);
-
- // Dispatch on the indirect string shape: slice or cons.
- Label cons_string;
- __ tst(result, Operand(kSlicedNotConsMask));
- __ b(eq, &cons_string);
-
- // Handle slices.
- Label indirect_string_loaded;
- __ ldr(result, FieldMemOperand(string, SlicedString::kOffsetOffset));
- __ ldr(string, FieldMemOperand(string, SlicedString::kParentOffset));
- __ add(index, index, Operand(result, ASR, kSmiTagSize));
- __ jmp(&indirect_string_loaded);
-
- // Handle cons strings.
- // Check whether the right hand side is the empty string (i.e. if
- // this is really a flat string in a cons string). If that is not
- // the case we would rather go to the runtime system now to flatten
- // the string.
- __ bind(&cons_string);
- __ ldr(result, FieldMemOperand(string, ConsString::kSecondOffset));
- __ CompareRoot(result, Heap::kempty_stringRootIndex);
- __ b(ne, call_runtime);
- // Get the first of the two strings and load its instance type.
- __ ldr(string, FieldMemOperand(string, ConsString::kFirstOffset));
-
- __ bind(&indirect_string_loaded);
- __ ldr(result, FieldMemOperand(string, HeapObject::kMapOffset));
- __ ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
-
- // Distinguish sequential and external strings. Only these two string
- // representations can reach here (slices and flat cons strings have been
- // reduced to the underlying sequential or external string).
- Label external_string, check_encoding;
- __ bind(&check_sequential);
- STATIC_ASSERT(kSeqStringTag == 0);
- __ tst(result, Operand(kStringRepresentationMask));
- __ b(ne, &external_string);
-
- // Prepare sequential strings
- STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
- __ add(string,
- string,
- Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
- __ jmp(&check_encoding);
-
- // Handle external strings.
- __ bind(&external_string);
- if (FLAG_debug_code) {
- // Assert that we do not have a cons or slice (indirect strings) here.
- // Sequential strings have already been ruled out.
- __ tst(result, Operand(kIsIndirectStringMask));
- __ Assert(eq, "external string expected, but not found");
- }
- // Rule out short external strings.
- STATIC_CHECK(kShortExternalStringTag != 0);
- __ tst(result, Operand(kShortExternalStringMask));
- __ b(ne, call_runtime);
- __ ldr(string, FieldMemOperand(string, ExternalString::kResourceDataOffset));
-
- Label ascii, done;
- __ bind(&check_encoding);
- STATIC_ASSERT(kTwoByteStringTag == 0);
- __ tst(result, Operand(kStringEncodingMask));
- __ b(ne, &ascii);
- // Two-byte string.
- __ ldrh(result, MemOperand(string, index, LSL, 1));
- __ jmp(&done);
- __ bind(&ascii);
- // Ascii string.
- __ ldrb(result, MemOperand(string, index));
- __ bind(&done);
-}
-
-
-void SeqStringSetCharGenerator::Generate(MacroAssembler* masm,
- String::Encoding encoding,
- Register string,
- Register index,
- Register value) {
- if (FLAG_debug_code) {
- __ tst(index, Operand(kSmiTagMask));
- __ Check(eq, "Non-smi index");
- __ tst(value, Operand(kSmiTagMask));
- __ Check(eq, "Non-smi value");
-
- __ ldr(ip, FieldMemOperand(string, String::kLengthOffset));
- __ cmp(index, ip);
- __ Check(lt, "Index is too large");
-
- __ cmp(index, Operand(Smi::FromInt(0)));
- __ Check(ge, "Index is negative");
-
- __ ldr(ip, FieldMemOperand(string, HeapObject::kMapOffset));
- __ ldrb(ip, FieldMemOperand(ip, Map::kInstanceTypeOffset));
-
- __ and_(ip, ip, Operand(kStringRepresentationMask | kStringEncodingMask));
- static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
- static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
- __ cmp(ip, Operand(encoding == String::ONE_BYTE_ENCODING
- ? one_byte_seq_type : two_byte_seq_type));
- __ Check(eq, "Unexpected string type");
- }
-
- __ add(ip,
- string,
- Operand(SeqString::kHeaderSize - kHeapObjectTag));
- __ SmiUntag(value, value);
- STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
- if (encoding == String::ONE_BYTE_ENCODING) {
- // Smis are tagged by left shift by 1, thus LSR by 1 to smi-untag inline.
- __ strb(value, MemOperand(ip, index, LSR, 1));
- } else {
- // No need to untag a smi for two-byte addressing.
- __ strh(value, MemOperand(ip, index));
- }
-}
-
-
-static MemOperand ExpConstant(int index, Register base) {
- return MemOperand(base, index * kDoubleSize);
-}
-
-
-void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
- DwVfpRegister input,
- DwVfpRegister result,
- DwVfpRegister double_scratch1,
- DwVfpRegister double_scratch2,
- Register temp1,
- Register temp2,
- Register temp3) {
- ASSERT(!input.is(result));
- ASSERT(!input.is(double_scratch1));
- ASSERT(!input.is(double_scratch2));
- ASSERT(!result.is(double_scratch1));
- ASSERT(!result.is(double_scratch2));
- ASSERT(!double_scratch1.is(double_scratch2));
- ASSERT(!temp1.is(temp2));
- ASSERT(!temp1.is(temp3));
- ASSERT(!temp2.is(temp3));
- ASSERT(ExternalReference::math_exp_constants(0).address() != NULL);
-
- Label done;
-
- __ mov(temp3, Operand(ExternalReference::math_exp_constants(0)));
-
- __ vldr(double_scratch1, ExpConstant(0, temp3));
- __ vmov(result, kDoubleRegZero);
- __ VFPCompareAndSetFlags(double_scratch1, input);
- __ b(ge, &done);
- __ vldr(double_scratch2, ExpConstant(1, temp3));
- __ VFPCompareAndSetFlags(input, double_scratch2);
- __ vldr(result, ExpConstant(2, temp3));
- __ b(ge, &done);
- __ vldr(double_scratch1, ExpConstant(3, temp3));
- __ vldr(result, ExpConstant(4, temp3));
- __ vmul(double_scratch1, double_scratch1, input);
- __ vadd(double_scratch1, double_scratch1, result);
- __ vmov(temp2, temp1, double_scratch1);
- __ vsub(double_scratch1, double_scratch1, result);
- __ vldr(result, ExpConstant(6, temp3));
- __ vldr(double_scratch2, ExpConstant(5, temp3));
- __ vmul(double_scratch1, double_scratch1, double_scratch2);
- __ vsub(double_scratch1, double_scratch1, input);
- __ vsub(result, result, double_scratch1);
- __ vmul(input, double_scratch1, double_scratch1);
- __ vmul(result, result, input);
- __ mov(temp1, Operand(temp2, LSR, 11));
- __ vldr(double_scratch2, ExpConstant(7, temp3));
- __ vmul(result, result, double_scratch2);
- __ vsub(result, result, double_scratch1);
- __ vldr(double_scratch2, ExpConstant(8, temp3));
- __ vadd(result, result, double_scratch2);
- __ movw(ip, 0x7ff);
- __ and_(temp2, temp2, Operand(ip));
- __ add(temp1, temp1, Operand(0x3ff));
- __ mov(temp1, Operand(temp1, LSL, 20));
-
- // Must not call ExpConstant() after overwriting temp3!
- __ mov(temp3, Operand(ExternalReference::math_exp_log_table()));
- __ ldr(ip, MemOperand(temp3, temp2, LSL, 3));
- __ add(temp3, temp3, Operand(kPointerSize));
- __ ldr(temp2, MemOperand(temp3, temp2, LSL, 3));
- __ orr(temp1, temp1, temp2);
- __ vmov(input, ip, temp1);
- __ vmul(result, result, input);
- __ bind(&done);
-}
-
-#undef __
-
-// add(r0, pc, Operand(-8))
-static const uint32_t kCodeAgePatchFirstInstruction = 0xe24f0008;
-
-static byte* GetNoCodeAgeSequence(uint32_t* length) {
- // The sequence of instructions that is patched out for aging code is the
- // following boilerplate stack-building prologue that is found in FUNCTIONS
- static bool initialized = false;
- static uint32_t sequence[kNoCodeAgeSequenceLength];
- byte* byte_sequence = reinterpret_cast<byte*>(sequence);
- *length = kNoCodeAgeSequenceLength * Assembler::kInstrSize;
- if (!initialized) {
- CodePatcher patcher(byte_sequence, kNoCodeAgeSequenceLength);
- PredictableCodeSizeScope scope(patcher.masm(), *length);
- patcher.masm()->stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
- patcher.masm()->LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- patcher.masm()->add(fp, sp, Operand(2 * kPointerSize));
- initialized = true;
- }
- return byte_sequence;
-}
-
-
-bool Code::IsYoungSequence(byte* sequence) {
- uint32_t young_length;
- byte* young_sequence = GetNoCodeAgeSequence(&young_length);
- bool result = !memcmp(sequence, young_sequence, young_length);
- ASSERT(result ||
- Memory::uint32_at(sequence) == kCodeAgePatchFirstInstruction);
- return result;
-}
-
-
-void Code::GetCodeAgeAndParity(byte* sequence, Age* age,
- MarkingParity* parity) {
- if (IsYoungSequence(sequence)) {
- *age = kNoAge;
- *parity = NO_MARKING_PARITY;
- } else {
- Address target_address = Memory::Address_at(
- sequence + Assembler::kInstrSize * (kNoCodeAgeSequenceLength - 1));
- Code* stub = GetCodeFromTargetAddress(target_address);
- GetCodeAgeAndParity(stub, age, parity);
- }
-}
-
-
-void Code::PatchPlatformCodeAge(byte* sequence,
- Code::Age age,
- MarkingParity parity) {
- uint32_t young_length;
- byte* young_sequence = GetNoCodeAgeSequence(&young_length);
- if (age == kNoAge) {
- memcpy(sequence, young_sequence, young_length);
- CPU::FlushICache(sequence, young_length);
- } else {
- Code* stub = GetCodeAgeStub(age, parity);
- CodePatcher patcher(sequence, young_length / Assembler::kInstrSize);
- patcher.masm()->add(r0, pc, Operand(-8));
- patcher.masm()->ldr(pc, MemOperand(pc, -4));
- patcher.masm()->dd(reinterpret_cast<uint32_t>(stub->instruction_start()));
- }
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_ARM
diff --git a/src/3rdparty/v8/src/arm/codegen-arm.h b/src/3rdparty/v8/src/arm/codegen-arm.h
deleted file mode 100644
index 75899a9..0000000
--- a/src/3rdparty/v8/src/arm/codegen-arm.h
+++ /dev/null
@@ -1,115 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_ARM_CODEGEN_ARM_H_
-#define V8_ARM_CODEGEN_ARM_H_
-
-#include "ast.h"
-#include "ic-inl.h"
-
-namespace v8 {
-namespace internal {
-
-// Forward declarations
-class CompilationInfo;
-
-enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
-
-// -------------------------------------------------------------------------
-// CodeGenerator
-
-class CodeGenerator: public AstVisitor {
- public:
- CodeGenerator() {
- InitializeAstVisitor();
- }
-
- static bool MakeCode(CompilationInfo* info);
-
- // Printing of AST, etc. as requested by flags.
- static void MakeCodePrologue(CompilationInfo* info);
-
- // Allocate and install the code.
- static Handle<Code> MakeCodeEpilogue(MacroAssembler* masm,
- Code::Flags flags,
- CompilationInfo* info);
-
- // Print the code after compiling it.
- static void PrintCode(Handle<Code> code, CompilationInfo* info);
-
- static bool ShouldGenerateLog(Expression* type);
-
- static void SetFunctionInfo(Handle<JSFunction> fun,
- FunctionLiteral* lit,
- bool is_toplevel,
- Handle<Script> script);
-
- static bool RecordPositions(MacroAssembler* masm,
- int pos,
- bool right_here = false);
-
- DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
-
- private:
- DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
-};
-
-
-class StringCharLoadGenerator : public AllStatic {
- public:
- // Generates the code for handling different string types and loading the
- // indexed character into |result|. We expect |index| as untagged input and
- // |result| as untagged output.
- static void Generate(MacroAssembler* masm,
- Register string,
- Register index,
- Register result,
- Label* call_runtime);
-
- private:
- DISALLOW_COPY_AND_ASSIGN(StringCharLoadGenerator);
-};
-
-
-class MathExpGenerator : public AllStatic {
- public:
- static void EmitMathExp(MacroAssembler* masm,
- DwVfpRegister input,
- DwVfpRegister result,
- DwVfpRegister double_scratch1,
- DwVfpRegister double_scratch2,
- Register temp1,
- Register temp2,
- Register temp3);
-
- private:
- DISALLOW_COPY_AND_ASSIGN(MathExpGenerator);
-};
-
-} } // namespace v8::internal
-
-#endif // V8_ARM_CODEGEN_ARM_H_
diff --git a/src/3rdparty/v8/src/arm/constants-arm.cc b/src/3rdparty/v8/src/arm/constants-arm.cc
deleted file mode 100644
index cdca1f5..0000000
--- a/src/3rdparty/v8/src/arm/constants-arm.cc
+++ /dev/null
@@ -1,154 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_ARM)
-
-#include "constants-arm.h"
-
-
-namespace v8 {
-namespace internal {
-
-double Instruction::DoubleImmedVmov() const {
- // Reconstruct a double from the immediate encoded in the vmov instruction.
- //
- // instruction: [xxxxxxxx,xxxxabcd,xxxxxxxx,xxxxefgh]
- // double: [aBbbbbbb,bbcdefgh,00000000,00000000,
- // 00000000,00000000,00000000,00000000]
- //
- // where B = ~b. Only the high 16 bits are affected.
- uint64_t high16;
- high16 = (Bits(17, 16) << 4) | Bits(3, 0); // xxxxxxxx,xxcdefgh.
- high16 |= (0xff * Bit(18)) << 6; // xxbbbbbb,bbxxxxxx.
- high16 |= (Bit(18) ^ 1) << 14; // xBxxxxxx,xxxxxxxx.
- high16 |= Bit(19) << 15; // axxxxxxx,xxxxxxxx.
-
- uint64_t imm = high16 << 48;
- double d;
- memcpy(&d, &imm, 8);
- return d;
-}
-
-
-// These register names are defined in a way to match the native disassembler
-// formatting. See for example the command "objdump -d <binary file>".
-const char* Registers::names_[kNumRegisters] = {
- "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
- "r8", "r9", "r10", "fp", "ip", "sp", "lr", "pc",
-};
-
-
-// List of alias names which can be used when referring to ARM registers.
-const Registers::RegisterAlias Registers::aliases_[] = {
- {10, "sl"},
- {11, "r11"},
- {12, "r12"},
- {13, "r13"},
- {14, "r14"},
- {15, "r15"},
- {kNoRegister, NULL}
-};
-
-
-const char* Registers::Name(int reg) {
- const char* result;
- if ((0 <= reg) && (reg < kNumRegisters)) {
- result = names_[reg];
- } else {
- result = "noreg";
- }
- return result;
-}
-
-
-// Support for VFP registers s0 to s31 (d0 to d15) and d16-d31.
-// Note that "sN:sM" is the same as "dN/2" up to d15.
-// These register names are defined in a way to match the native disassembler
-// formatting. See for example the command "objdump -d <binary file>".
-const char* VFPRegisters::names_[kNumVFPRegisters] = {
- "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
- "s8", "s9", "s10", "s11", "s12", "s13", "s14", "s15",
- "s16", "s17", "s18", "s19", "s20", "s21", "s22", "s23",
- "s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31",
- "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
- "d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15",
- "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23",
- "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31"
-};
-
-
-const char* VFPRegisters::Name(int reg, bool is_double) {
- ASSERT((0 <= reg) && (reg < kNumVFPRegisters));
- return names_[reg + (is_double ? kNumVFPSingleRegisters : 0)];
-}
-
-
-int VFPRegisters::Number(const char* name, bool* is_double) {
- for (int i = 0; i < kNumVFPRegisters; i++) {
- if (strcmp(names_[i], name) == 0) {
- if (i < kNumVFPSingleRegisters) {
- *is_double = false;
- return i;
- } else {
- *is_double = true;
- return i - kNumVFPSingleRegisters;
- }
- }
- }
-
- // No register with the requested name found.
- return kNoRegister;
-}
-
-
-int Registers::Number(const char* name) {
- // Look through the canonical names.
- for (int i = 0; i < kNumRegisters; i++) {
- if (strcmp(names_[i], name) == 0) {
- return i;
- }
- }
-
- // Look through the alias names.
- int i = 0;
- while (aliases_[i].reg != kNoRegister) {
- if (strcmp(aliases_[i].name, name) == 0) {
- return aliases_[i].reg;
- }
- i++;
- }
-
- // No register with the requested name found.
- return kNoRegister;
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_ARM
diff --git a/src/3rdparty/v8/src/arm/constants-arm.h b/src/3rdparty/v8/src/arm/constants-arm.h
deleted file mode 100644
index 841df92..0000000
--- a/src/3rdparty/v8/src/arm/constants-arm.h
+++ /dev/null
@@ -1,789 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_ARM_CONSTANTS_ARM_H_
-#define V8_ARM_CONSTANTS_ARM_H_
-
-// ARM EABI is required.
-#if defined(__arm__) && !defined(__ARM_EABI__) && !defined(_WIN32_WCE)
-#error ARM EABI support is required.
-#endif
-
-// This means that interwork-compatible jump instructions are generated. We
-// want to generate them on the simulator too so it makes snapshots that can
-// be used on real hardware.
-#if defined(__THUMB_INTERWORK__) || !defined(__arm__) || defined(_WIN32_WCE)
-# define USE_THUMB_INTERWORK 1
-#endif
-
-#if defined(__ARM_ARCH_7A__) || \
- defined(__ARM_ARCH_7R__) || \
- defined(__ARM_ARCH_7__)
-# define CAN_USE_ARMV7_INSTRUCTIONS 1
-#endif
-
-#if defined(__ARM_ARCH_6__) || \
- defined(__ARM_ARCH_6J__) || \
- defined(__ARM_ARCH_6K__) || \
- defined(__ARM_ARCH_6Z__) || \
- defined(__ARM_ARCH_6ZK__) || \
- defined(__ARM_ARCH_6T2__) || \
- defined(CAN_USE_ARMV7_INSTRUCTIONS)
-# define CAN_USE_ARMV6_INSTRUCTIONS 1
-#endif
-
-#if defined(__ARM_ARCH_5T__) || \
- defined(__ARM_ARCH_5TE__) || \
- defined(__ARM_ARCH_5TEJ__) || \
- defined(CAN_USE_ARMV6_INSTRUCTIONS)
-# define CAN_USE_ARMV5_INSTRUCTIONS 1
-# define CAN_USE_THUMB_INSTRUCTIONS 1
-#endif
-
-// Simulator should support ARM5 instructions and unaligned access by default.
-#if !defined(__arm__) || defined(_WIN32_WCE)
-# if !defined(_WIN32_WCE)
-# define CAN_USE_ARMV5_INSTRUCTIONS 1
-# endif
-# define CAN_USE_THUMB_INSTRUCTIONS 1
-
-# ifndef CAN_USE_UNALIGNED_ACCESSES
-# define CAN_USE_UNALIGNED_ACCESSES 1
-# endif
-
-#endif
-
-// Using blx may yield better code, so use it when required or when available
-#if defined(USE_THUMB_INTERWORK) || defined(CAN_USE_ARMV5_INSTRUCTIONS)
-#define USE_BLX 1
-#endif
-
-namespace v8 {
-namespace internal {
-
-// Constant pool marker.
-// Use UDF, the permanently undefined instruction.
-const int kConstantPoolMarkerMask = 0xfff000f0;
-const int kConstantPoolMarker = 0xe7f000f0;
-const int kConstantPoolLengthMaxMask = 0xffff;
-inline int EncodeConstantPoolLength(int length) {
- ASSERT((length & kConstantPoolLengthMaxMask) == length);
- return ((length & 0xfff0) << 4) | (length & 0xf);
-}
-inline int DecodeConstantPoolLength(int instr) {
- ASSERT((instr & kConstantPoolMarkerMask) == kConstantPoolMarker);
- return ((instr >> 4) & 0xfff0) | (instr & 0xf);
-}
-
-// Number of registers in normal ARM mode.
-const int kNumRegisters = 16;
-
-// VFP support.
-const int kNumVFPSingleRegisters = 32;
-const int kNumVFPDoubleRegisters = 32;
-const int kNumVFPRegisters = kNumVFPSingleRegisters + kNumVFPDoubleRegisters;
-
-// PC is register 15.
-const int kPCRegister = 15;
-const int kNoRegister = -1;
-
-// -----------------------------------------------------------------------------
-// Conditions.
-
-// Defines constants and accessor classes to assemble, disassemble and
-// simulate ARM instructions.
-//
-// Section references in the code refer to the "ARM Architecture Reference
-// Manual" from July 2005 (available at http://www.arm.com/miscPDFs/14128.pdf)
-//
-// Constants for specific fields are defined in their respective named enums.
-// General constants are in an anonymous enum in class Instr.
-
-// Values for the condition field as defined in section A3.2
-enum Condition {
- kNoCondition = -1,
-
- eq = 0 << 28, // Z set Equal.
- ne = 1 << 28, // Z clear Not equal.
- cs = 2 << 28, // C set Unsigned higher or same.
- cc = 3 << 28, // C clear Unsigned lower.
- mi = 4 << 28, // N set Negative.
- pl = 5 << 28, // N clear Positive or zero.
- vs = 6 << 28, // V set Overflow.
- vc = 7 << 28, // V clear No overflow.
- hi = 8 << 28, // C set, Z clear Unsigned higher.
- ls = 9 << 28, // C clear or Z set Unsigned lower or same.
- ge = 10 << 28, // N == V Greater or equal.
- lt = 11 << 28, // N != V Less than.
- gt = 12 << 28, // Z clear, N == V Greater than.
- le = 13 << 28, // Z set or N != V Less then or equal
- al = 14 << 28, // Always.
-
- kSpecialCondition = 15 << 28, // Special condition (refer to section A3.2.1).
- kNumberOfConditions = 16,
-
- // Aliases.
- hs = cs, // C set Unsigned higher or same.
- lo = cc // C clear Unsigned lower.
-};
-
-
-inline Condition NegateCondition(Condition cond) {
- ASSERT(cond != al);
- return static_cast<Condition>(cond ^ ne);
-}
-
-
-// Corresponds to transposing the operands of a comparison.
-inline Condition ReverseCondition(Condition cond) {
- switch (cond) {
- case lo:
- return hi;
- case hi:
- return lo;
- case hs:
- return ls;
- case ls:
- return hs;
- case lt:
- return gt;
- case gt:
- return lt;
- case ge:
- return le;
- case le:
- return ge;
- default:
- return cond;
- };
-}
-
-
-// -----------------------------------------------------------------------------
-// Instructions encoding.
-
-// Instr is merely used by the Assembler to distinguish 32bit integers
-// representing instructions from usual 32 bit values.
-// Instruction objects are pointers to 32bit values, and provide methods to
-// access the various ISA fields.
-typedef int32_t Instr;
-
-
-// Opcodes for Data-processing instructions (instructions with a type 0 and 1)
-// as defined in section A3.4
-enum Opcode {
- AND = 0 << 21, // Logical AND.
- EOR = 1 << 21, // Logical Exclusive OR.
- SUB = 2 << 21, // Subtract.
- RSB = 3 << 21, // Reverse Subtract.
- ADD = 4 << 21, // Add.
- ADC = 5 << 21, // Add with Carry.
- SBC = 6 << 21, // Subtract with Carry.
- RSC = 7 << 21, // Reverse Subtract with Carry.
- TST = 8 << 21, // Test.
- TEQ = 9 << 21, // Test Equivalence.
- CMP = 10 << 21, // Compare.
- CMN = 11 << 21, // Compare Negated.
- ORR = 12 << 21, // Logical (inclusive) OR.
- MOV = 13 << 21, // Move.
- BIC = 14 << 21, // Bit Clear.
- MVN = 15 << 21 // Move Not.
-};
-
-
-// The bits for bit 7-4 for some type 0 miscellaneous instructions.
-enum MiscInstructionsBits74 {
- // With bits 22-21 01.
- BX = 1 << 4,
- BXJ = 2 << 4,
- BLX = 3 << 4,
- BKPT = 7 << 4,
-
- // With bits 22-21 11.
- CLZ = 1 << 4
-};
-
-
-// Instruction encoding bits and masks.
-enum {
- H = 1 << 5, // Halfword (or byte).
- S6 = 1 << 6, // Signed (or unsigned).
- L = 1 << 20, // Load (or store).
- S = 1 << 20, // Set condition code (or leave unchanged).
- W = 1 << 21, // Writeback base register (or leave unchanged).
- A = 1 << 21, // Accumulate in multiply instruction (or not).
- B = 1 << 22, // Unsigned byte (or word).
- N = 1 << 22, // Long (or short).
- U = 1 << 23, // Positive (or negative) offset/index.
- P = 1 << 24, // Offset/pre-indexed addressing (or post-indexed addressing).
- I = 1 << 25, // Immediate shifter operand (or not).
-
- B4 = 1 << 4,
- B5 = 1 << 5,
- B6 = 1 << 6,
- B7 = 1 << 7,
- B8 = 1 << 8,
- B9 = 1 << 9,
- B12 = 1 << 12,
- B16 = 1 << 16,
- B18 = 1 << 18,
- B19 = 1 << 19,
- B20 = 1 << 20,
- B21 = 1 << 21,
- B22 = 1 << 22,
- B23 = 1 << 23,
- B24 = 1 << 24,
- B25 = 1 << 25,
- B26 = 1 << 26,
- B27 = 1 << 27,
- B28 = 1 << 28,
-
- // Instruction bit masks.
- kCondMask = 15 << 28,
- kALUMask = 0x6f << 21,
- kRdMask = 15 << 12, // In str instruction.
- kCoprocessorMask = 15 << 8,
- kOpCodeMask = 15 << 21, // In data-processing instructions.
- kImm24Mask = (1 << 24) - 1,
- kOff12Mask = (1 << 12) - 1,
- kOff8Mask = (1 << 8) - 1
-};
-
-
-// -----------------------------------------------------------------------------
-// Addressing modes and instruction variants.
-
-// Condition code updating mode.
-enum SBit {
- SetCC = 1 << 20, // Set condition code.
- LeaveCC = 0 << 20 // Leave condition code unchanged.
-};
-
-
-// Status register selection.
-enum SRegister {
- CPSR = 0 << 22,
- SPSR = 1 << 22
-};
-
-
-// Shifter types for Data-processing operands as defined in section A5.1.2.
-enum ShiftOp {
- LSL = 0 << 5, // Logical shift left.
- LSR = 1 << 5, // Logical shift right.
- ASR = 2 << 5, // Arithmetic shift right.
- ROR = 3 << 5, // Rotate right.
-
- // RRX is encoded as ROR with shift_imm == 0.
- // Use a special code to make the distinction. The RRX ShiftOp is only used
- // as an argument, and will never actually be encoded. The Assembler will
- // detect it and emit the correct ROR shift operand with shift_imm == 0.
- RRX = -1,
- kNumberOfShifts = 4
-};
-
-
-// Status register fields.
-enum SRegisterField {
- CPSR_c = CPSR | 1 << 16,
- CPSR_x = CPSR | 1 << 17,
- CPSR_s = CPSR | 1 << 18,
- CPSR_f = CPSR | 1 << 19,
- SPSR_c = SPSR | 1 << 16,
- SPSR_x = SPSR | 1 << 17,
- SPSR_s = SPSR | 1 << 18,
- SPSR_f = SPSR | 1 << 19
-};
-
-// Status register field mask (or'ed SRegisterField enum values).
-typedef uint32_t SRegisterFieldMask;
-
-
-// Memory operand addressing mode.
-enum AddrMode {
- // Bit encoding P U W.
- Offset = (8|4|0) << 21, // Offset (without writeback to base).
- PreIndex = (8|4|1) << 21, // Pre-indexed addressing with writeback.
- PostIndex = (0|4|0) << 21, // Post-indexed addressing with writeback.
- NegOffset = (8|0|0) << 21, // Negative offset (without writeback to base).
- NegPreIndex = (8|0|1) << 21, // Negative pre-indexed with writeback.
- NegPostIndex = (0|0|0) << 21 // Negative post-indexed with writeback.
-};
-
-
-// Load/store multiple addressing mode.
-enum BlockAddrMode {
- // Bit encoding P U W .
- da = (0|0|0) << 21, // Decrement after.
- ia = (0|4|0) << 21, // Increment after.
- db = (8|0|0) << 21, // Decrement before.
- ib = (8|4|0) << 21, // Increment before.
- da_w = (0|0|1) << 21, // Decrement after with writeback to base.
- ia_w = (0|4|1) << 21, // Increment after with writeback to base.
- db_w = (8|0|1) << 21, // Decrement before with writeback to base.
- ib_w = (8|4|1) << 21, // Increment before with writeback to base.
-
- // Alias modes for comparison when writeback does not matter.
- da_x = (0|0|0) << 21, // Decrement after.
- ia_x = (0|4|0) << 21, // Increment after.
- db_x = (8|0|0) << 21, // Decrement before.
- ib_x = (8|4|0) << 21, // Increment before.
-
- kBlockAddrModeMask = (8|4|1) << 21
-};
-
-
-// Coprocessor load/store operand size.
-enum LFlag {
- Long = 1 << 22, // Long load/store coprocessor.
- Short = 0 << 22 // Short load/store coprocessor.
-};
-
-
-// -----------------------------------------------------------------------------
-// Supervisor Call (svc) specific support.
-
-// Special Software Interrupt codes when used in the presence of the ARM
-// simulator.
-// svc (formerly swi) provides a 24bit immediate value. Use bits 22:0 for
-// standard SoftwareInterrupCode. Bit 23 is reserved for the stop feature.
-enum SoftwareInterruptCodes {
- // transition to C code
- kCallRtRedirected= 0x10,
- // break point
- kBreakpoint= 0x20,
- // stop
- kStopCode = 1 << 23
-};
-const uint32_t kStopCodeMask = kStopCode - 1;
-const uint32_t kMaxStopCode = kStopCode - 1;
-const int32_t kDefaultStopCode = -1;
-
-
-// Type of VFP register. Determines register encoding.
-enum VFPRegPrecision {
- kSinglePrecision = 0,
- kDoublePrecision = 1
-};
-
-
-// VFP FPSCR constants.
-enum VFPConversionMode {
- kFPSCRRounding = 0,
- kDefaultRoundToZero = 1
-};
-
-// This mask does not include the "inexact" or "input denormal" cumulative
-// exceptions flags, because we usually don't want to check for it.
-const uint32_t kVFPExceptionMask = 0xf;
-const uint32_t kVFPInvalidOpExceptionBit = 1 << 0;
-const uint32_t kVFPOverflowExceptionBit = 1 << 2;
-const uint32_t kVFPUnderflowExceptionBit = 1 << 3;
-const uint32_t kVFPInexactExceptionBit = 1 << 4;
-const uint32_t kVFPFlushToZeroMask = 1 << 24;
-
-const uint32_t kVFPNConditionFlagBit = 1 << 31;
-const uint32_t kVFPZConditionFlagBit = 1 << 30;
-const uint32_t kVFPCConditionFlagBit = 1 << 29;
-const uint32_t kVFPVConditionFlagBit = 1 << 28;
-
-
-// VFP rounding modes. See ARM DDI 0406B Page A2-29.
-enum VFPRoundingMode {
- RN = 0 << 22, // Round to Nearest.
- RP = 1 << 22, // Round towards Plus Infinity.
- RM = 2 << 22, // Round towards Minus Infinity.
- RZ = 3 << 22, // Round towards zero.
-
- // Aliases.
- kRoundToNearest = RN,
- kRoundToPlusInf = RP,
- kRoundToMinusInf = RM,
- kRoundToZero = RZ
-};
-
-const uint32_t kVFPRoundingModeMask = 3 << 22;
-
-enum CheckForInexactConversion {
- kCheckForInexactConversion,
- kDontCheckForInexactConversion
-};
-
-// -----------------------------------------------------------------------------
-// Hints.
-
-// Branch hints are not used on the ARM. They are defined so that they can
-// appear in shared function signatures, but will be ignored in ARM
-// implementations.
-enum Hint { no_hint };
-
-// Hints are not used on the arm. Negating is trivial.
-inline Hint NegateHint(Hint ignored) { return no_hint; }
-
-
-// -----------------------------------------------------------------------------
-// Specific instructions, constants, and masks.
-// These constants are declared in assembler-arm.cc, as they use named registers
-// and other constants.
-
-
-// add(sp, sp, 4) instruction (aka Pop())
-extern const Instr kPopInstruction;
-
-// str(r, MemOperand(sp, 4, NegPreIndex), al) instruction (aka push(r))
-// register r is not encoded.
-extern const Instr kPushRegPattern;
-
-// ldr(r, MemOperand(sp, 4, PostIndex), al) instruction (aka pop(r))
-// register r is not encoded.
-extern const Instr kPopRegPattern;
-
-// mov lr, pc
-extern const Instr kMovLrPc;
-// ldr rd, [pc, #offset]
-extern const Instr kLdrPCMask;
-extern const Instr kLdrPCPattern;
-// vldr dd, [pc, #offset]
-extern const Instr kVldrDPCMask;
-extern const Instr kVldrDPCPattern;
-// blxcc rm
-extern const Instr kBlxRegMask;
-
-extern const Instr kBlxRegPattern;
-
-extern const Instr kMovMvnMask;
-extern const Instr kMovMvnPattern;
-extern const Instr kMovMvnFlip;
-extern const Instr kMovLeaveCCMask;
-extern const Instr kMovLeaveCCPattern;
-extern const Instr kMovwMask;
-extern const Instr kMovwPattern;
-extern const Instr kMovwLeaveCCFlip;
-extern const Instr kCmpCmnMask;
-extern const Instr kCmpCmnPattern;
-extern const Instr kCmpCmnFlip;
-extern const Instr kAddSubFlip;
-extern const Instr kAndBicFlip;
-
-// A mask for the Rd register for push, pop, ldr, str instructions.
-extern const Instr kLdrRegFpOffsetPattern;
-
-extern const Instr kStrRegFpOffsetPattern;
-
-extern const Instr kLdrRegFpNegOffsetPattern;
-
-extern const Instr kStrRegFpNegOffsetPattern;
-
-extern const Instr kLdrStrInstrTypeMask;
-extern const Instr kLdrStrInstrArgumentMask;
-extern const Instr kLdrStrOffsetMask;
-
-
-// -----------------------------------------------------------------------------
-// Instruction abstraction.
-
-// The class Instruction enables access to individual fields defined in the ARM
-// architecture instruction set encoding as described in figure A3-1.
-// Note that the Assembler uses typedef int32_t Instr.
-//
-// Example: Test whether the instruction at ptr does set the condition code
-// bits.
-//
-// bool InstructionSetsConditionCodes(byte* ptr) {
-// Instruction* instr = Instruction::At(ptr);
-// int type = instr->TypeValue();
-// return ((type == 0) || (type == 1)) && instr->HasS();
-// }
-//
-class Instruction {
- public:
- enum {
- kInstrSize = 4,
- kInstrSizeLog2 = 2,
- kPCReadOffset = 8
- };
-
- // Helper macro to define static accessors.
- // We use the cast to char* trick to bypass the strict anti-aliasing rules.
- #define DECLARE_STATIC_TYPED_ACCESSOR(return_type, Name) \
- static inline return_type Name(Instr instr) { \
- char* temp = reinterpret_cast<char*>(&instr); \
- return reinterpret_cast<Instruction*>(temp)->Name(); \
- }
-
- #define DECLARE_STATIC_ACCESSOR(Name) DECLARE_STATIC_TYPED_ACCESSOR(int, Name)
-
- // Get the raw instruction bits.
- inline Instr InstructionBits() const {
- return *reinterpret_cast<const Instr*>(this);
- }
-
- // Set the raw instruction bits to value.
- inline void SetInstructionBits(Instr value) {
- *reinterpret_cast<Instr*>(this) = value;
- }
-
- // Read one particular bit out of the instruction bits.
- inline int Bit(int nr) const {
- return (InstructionBits() >> nr) & 1;
- }
-
- // Read a bit field's value out of the instruction bits.
- inline int Bits(int hi, int lo) const {
- return (InstructionBits() >> lo) & ((2 << (hi - lo)) - 1);
- }
-
- // Read a bit field out of the instruction bits.
- inline int BitField(int hi, int lo) const {
- return InstructionBits() & (((2 << (hi - lo)) - 1) << lo);
- }
-
- // Static support.
-
- // Read one particular bit out of the instruction bits.
- static inline int Bit(Instr instr, int nr) {
- return (instr >> nr) & 1;
- }
-
- // Read the value of a bit field out of the instruction bits.
- static inline int Bits(Instr instr, int hi, int lo) {
- return (instr >> lo) & ((2 << (hi - lo)) - 1);
- }
-
-
- // Read a bit field out of the instruction bits.
- static inline int BitField(Instr instr, int hi, int lo) {
- return instr & (((2 << (hi - lo)) - 1) << lo);
- }
-
-
- // Accessors for the different named fields used in the ARM encoding.
- // The naming of these accessor corresponds to figure A3-1.
- //
- // Two kind of accessors are declared:
- // - <Name>Field() will return the raw field, i.e. the field's bits at their
- // original place in the instruction encoding.
- // e.g. if instr is the 'addgt r0, r1, r2' instruction, encoded as
- // 0xC0810002 ConditionField(instr) will return 0xC0000000.
- // - <Name>Value() will return the field value, shifted back to bit 0.
- // e.g. if instr is the 'addgt r0, r1, r2' instruction, encoded as
- // 0xC0810002 ConditionField(instr) will return 0xC.
-
-
- // Generally applicable fields
- inline Condition ConditionValue() const {
- return static_cast<Condition>(Bits(31, 28));
- }
- inline Condition ConditionField() const {
- return static_cast<Condition>(BitField(31, 28));
- }
- DECLARE_STATIC_TYPED_ACCESSOR(Condition, ConditionValue);
- DECLARE_STATIC_TYPED_ACCESSOR(Condition, ConditionField);
-
- inline int TypeValue() const { return Bits(27, 25); }
-
- inline int RnValue() const { return Bits(19, 16); }
- DECLARE_STATIC_ACCESSOR(RnValue);
- inline int RdValue() const { return Bits(15, 12); }
- DECLARE_STATIC_ACCESSOR(RdValue);
-
- inline int CoprocessorValue() const { return Bits(11, 8); }
- // Support for VFP.
- // Vn(19-16) | Vd(15-12) | Vm(3-0)
- inline int VnValue() const { return Bits(19, 16); }
- inline int VmValue() const { return Bits(3, 0); }
- inline int VdValue() const { return Bits(15, 12); }
- inline int NValue() const { return Bit(7); }
- inline int MValue() const { return Bit(5); }
- inline int DValue() const { return Bit(22); }
- inline int RtValue() const { return Bits(15, 12); }
- inline int PValue() const { return Bit(24); }
- inline int UValue() const { return Bit(23); }
- inline int Opc1Value() const { return (Bit(23) << 2) | Bits(21, 20); }
- inline int Opc2Value() const { return Bits(19, 16); }
- inline int Opc3Value() const { return Bits(7, 6); }
- inline int SzValue() const { return Bit(8); }
- inline int VLValue() const { return Bit(20); }
- inline int VCValue() const { return Bit(8); }
- inline int VAValue() const { return Bits(23, 21); }
- inline int VBValue() const { return Bits(6, 5); }
- inline int VFPNRegValue(VFPRegPrecision pre) {
- return VFPGlueRegValue(pre, 16, 7);
- }
- inline int VFPMRegValue(VFPRegPrecision pre) {
- return VFPGlueRegValue(pre, 0, 5);
- }
- inline int VFPDRegValue(VFPRegPrecision pre) {
- return VFPGlueRegValue(pre, 12, 22);
- }
-
- // Fields used in Data processing instructions
- inline int OpcodeValue() const {
- return static_cast<Opcode>(Bits(24, 21));
- }
- inline Opcode OpcodeField() const {
- return static_cast<Opcode>(BitField(24, 21));
- }
- inline int SValue() const { return Bit(20); }
- // with register
- inline int RmValue() const { return Bits(3, 0); }
- DECLARE_STATIC_ACCESSOR(RmValue);
- inline int ShiftValue() const { return static_cast<ShiftOp>(Bits(6, 5)); }
- inline ShiftOp ShiftField() const {
- return static_cast<ShiftOp>(BitField(6, 5));
- }
- inline int RegShiftValue() const { return Bit(4); }
- inline int RsValue() const { return Bits(11, 8); }
- inline int ShiftAmountValue() const { return Bits(11, 7); }
- // with immediate
- inline int RotateValue() const { return Bits(11, 8); }
- inline int Immed8Value() const { return Bits(7, 0); }
- inline int Immed4Value() const { return Bits(19, 16); }
- inline int ImmedMovwMovtValue() const {
- return Immed4Value() << 12 | Offset12Value(); }
-
- // Fields used in Load/Store instructions
- inline int PUValue() const { return Bits(24, 23); }
- inline int PUField() const { return BitField(24, 23); }
- inline int BValue() const { return Bit(22); }
- inline int WValue() const { return Bit(21); }
- inline int LValue() const { return Bit(20); }
- // with register uses same fields as Data processing instructions above
- // with immediate
- inline int Offset12Value() const { return Bits(11, 0); }
- // multiple
- inline int RlistValue() const { return Bits(15, 0); }
- // extra loads and stores
- inline int SignValue() const { return Bit(6); }
- inline int HValue() const { return Bit(5); }
- inline int ImmedHValue() const { return Bits(11, 8); }
- inline int ImmedLValue() const { return Bits(3, 0); }
-
- // Fields used in Branch instructions
- inline int LinkValue() const { return Bit(24); }
- inline int SImmed24Value() const { return ((InstructionBits() << 8) >> 8); }
-
- // Fields used in Software interrupt instructions
- inline SoftwareInterruptCodes SvcValue() const {
- return static_cast<SoftwareInterruptCodes>(Bits(23, 0));
- }
-
- // Test for special encodings of type 0 instructions (extra loads and stores,
- // as well as multiplications).
- inline bool IsSpecialType0() const { return (Bit(7) == 1) && (Bit(4) == 1); }
-
- // Test for miscellaneous instructions encodings of type 0 instructions.
- inline bool IsMiscType0() const { return (Bit(24) == 1)
- && (Bit(23) == 0)
- && (Bit(20) == 0)
- && ((Bit(7) == 0)); }
-
- // Test for a nop instruction, which falls under type 1.
- inline bool IsNopType1() const { return Bits(24, 0) == 0x0120F000; }
-
- // Test for a stop instruction.
- inline bool IsStop() const {
- return (TypeValue() == 7) && (Bit(24) == 1) && (SvcValue() >= kStopCode);
- }
-
- // Special accessors that test for existence of a value.
- inline bool HasS() const { return SValue() == 1; }
- inline bool HasB() const { return BValue() == 1; }
- inline bool HasW() const { return WValue() == 1; }
- inline bool HasL() const { return LValue() == 1; }
- inline bool HasU() const { return UValue() == 1; }
- inline bool HasSign() const { return SignValue() == 1; }
- inline bool HasH() const { return HValue() == 1; }
- inline bool HasLink() const { return LinkValue() == 1; }
-
- // Decoding the double immediate in the vmov instruction.
- double DoubleImmedVmov() const;
-
- // Instructions are read of out a code stream. The only way to get a
- // reference to an instruction is to convert a pointer. There is no way
- // to allocate or create instances of class Instruction.
- // Use the At(pc) function to create references to Instruction.
- static Instruction* At(byte* pc) {
- return reinterpret_cast<Instruction*>(pc);
- }
-
-
- private:
- // Join split register codes, depending on single or double precision.
- // four_bit is the position of the least-significant bit of the four
- // bit specifier. one_bit is the position of the additional single bit
- // specifier.
- inline int VFPGlueRegValue(VFPRegPrecision pre, int four_bit, int one_bit) {
- if (pre == kSinglePrecision) {
- return (Bits(four_bit + 3, four_bit) << 1) | Bit(one_bit);
- }
- return (Bit(one_bit) << 4) | Bits(four_bit + 3, four_bit);
- }
-
- // We need to prevent the creation of instances of class Instruction.
- DISALLOW_IMPLICIT_CONSTRUCTORS(Instruction);
-};
-
-
-// Helper functions for converting between register numbers and names.
-class Registers {
- public:
- // Return the name of the register.
- static const char* Name(int reg);
-
- // Lookup the register number for the name provided.
- static int Number(const char* name);
-
- struct RegisterAlias {
- int reg;
- const char* name;
- };
-
- private:
- static const char* names_[kNumRegisters];
- static const RegisterAlias aliases_[];
-};
-
-// Helper functions for converting between VFP register numbers and names.
-class VFPRegisters {
- public:
- // Return the name of the register.
- static const char* Name(int reg, bool is_double);
-
- // Lookup the register number for the name provided.
- // Set flag pointed by is_double to true if register
- // is double-precision.
- static int Number(const char* name, bool* is_double);
-
- private:
- static const char* names_[kNumVFPRegisters];
-};
-
-
-} } // namespace v8::internal
-
-#endif // V8_ARM_CONSTANTS_ARM_H_
diff --git a/src/3rdparty/v8/src/arm/cpu-arm.cc b/src/3rdparty/v8/src/arm/cpu-arm.cc
deleted file mode 100644
index bed9503..0000000
--- a/src/3rdparty/v8/src/arm/cpu-arm.cc
+++ /dev/null
@@ -1,134 +0,0 @@
-// Copyright 2006-2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// CPU specific code for arm independent of OS goes here.
-
-#include "v8.h"
-
-#if defined(__arm__) && !defined(_WIN32_WCE)
- #if !defined(__QNXNTO__)
- #include <sys/syscall.h> // for cache flushing.
- #else
- #include <sys/mman.h> // for cache flushing.
- #endif
-#endif
-
-#if defined(V8_TARGET_ARCH_ARM)
-
-#include "cpu.h"
-#include "macro-assembler.h"
-#include "simulator.h" // for cache flushing.
-
-namespace v8 {
-namespace internal {
-
-void CPU::SetUp() {
- CpuFeatures::Probe();
-}
-
-
-bool CPU::SupportsCrankshaft() {
- return CpuFeatures::IsSupported(VFP3);
-}
-
-
-void CPU::FlushICache(void* start, size_t size) {
- // Nothing to do flushing no instructions.
- if (size == 0) {
- return;
- }
-
-#if defined (USE_SIMULATOR)
- // Not generating ARM instructions for C-code. This means that we are
- // building an ARM emulator based target. We should notify the simulator
- // that the Icache was flushed.
- // None of this code ends up in the snapshot so there are no issues
- // around whether or not to generate the code when building snapshots.
- Simulator::FlushICache(Isolate::Current()->simulator_i_cache(), start, size);
-#elif defined(__QNXNTO__)
- // The QNX kernel does not expose the symbol __ARM_NR_cacheflush so we
- // use the msync system call instead of the approach used on Linux
- msync(start, size, MS_SYNC|MS_INVALIDATE_ICACHE);
-#elif defined(_WIN32_WCE)
- // Windows CE compiler does not support the asm command, nor does it expose
- // __ARM_NR_cacheflush. As well as Windows CE does not support to flush a
- // region, so we need to flush the whole process.
- FlushInstructionCache(GetCurrentProcess(), NULL, NULL);
-#else
- // Ideally, we would call
- // syscall(__ARM_NR_cacheflush, start,
- // reinterpret_cast<intptr_t>(start) + size, 0);
- // however, syscall(int, ...) is not supported on all platforms, especially
- // not when using EABI, so we call the __ARM_NR_cacheflush syscall directly.
-
- register uint32_t beg asm("a1") = reinterpret_cast<uint32_t>(start);
- register uint32_t end asm("a2") =
- reinterpret_cast<uint32_t>(start) + size;
- register uint32_t flg asm("a3") = 0;
- #if defined (__arm__) && !defined(__thumb__)
- // __arm__ may be defined in thumb mode.
- register uint32_t scno asm("r7") = __ARM_NR_cacheflush;
- asm volatile(
- "svc 0x0"
- : "=r" (beg)
- : "0" (beg), "r" (end), "r" (flg), "r" (scno));
- #else
- // r7 is reserved by the EABI in thumb mode.
- asm volatile(
- "@ Enter ARM Mode \n\t"
- "adr r3, 1f \n\t"
- "bx r3 \n\t"
- ".ALIGN 4 \n\t"
- ".ARM \n"
- "1: push {r7} \n\t"
- "mov r7, %4 \n\t"
- "svc 0x0 \n\t"
- "pop {r7} \n\t"
- "@ Enter THUMB Mode\n\t"
- "adr r3, 2f+1 \n\t"
- "bx r3 \n\t"
- ".THUMB \n"
- "2: \n\t"
- : "=r" (beg)
- : "0" (beg), "r" (end), "r" (flg), "r" (__ARM_NR_cacheflush)
- : "r3");
- #endif
-#endif
-}
-
-
-void CPU::DebugBreak() {
-#if !defined (__arm__) || !defined(CAN_USE_ARMV5_INSTRUCTIONS)
- UNIMPLEMENTED(); // when building ARM emulator target
-#else
- asm volatile("bkpt 0");
-#endif
-}
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_ARM
diff --git a/src/3rdparty/v8/src/arm/debug-arm.cc b/src/3rdparty/v8/src/arm/debug-arm.cc
deleted file mode 100644
index e9a65b2..0000000
--- a/src/3rdparty/v8/src/arm/debug-arm.cc
+++ /dev/null
@@ -1,345 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_ARM)
-
-#include "codegen.h"
-#include "debug.h"
-
-namespace v8 {
-namespace internal {
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-bool BreakLocationIterator::IsDebugBreakAtReturn() {
- return Debug::IsDebugBreakAtReturn(rinfo());
-}
-
-
-void BreakLocationIterator::SetDebugBreakAtReturn() {
- // Patch the code changing the return from JS function sequence from
- // mov sp, fp
- // ldmia sp!, {fp, lr}
- // add sp, sp, #4
- // bx lr
- // to a call to the debug break return code.
- // #ifdef USE_BLX
- // ldr ip, [pc, #0]
- // blx ip
- // #else
- // mov lr, pc
- // ldr pc, [pc, #-4]
- // #endif
- // <debug break return code entry point address>
- // bktp 0
- CodePatcher patcher(rinfo()->pc(), Assembler::kJSReturnSequenceInstructions);
-#ifdef USE_BLX
- patcher.masm()->ldr(v8::internal::ip, MemOperand(v8::internal::pc, 0));
- patcher.masm()->blx(v8::internal::ip);
-#else
- patcher.masm()->mov(v8::internal::lr, v8::internal::pc);
- patcher.masm()->ldr(v8::internal::pc, MemOperand(v8::internal::pc, -4));
-#endif
- patcher.Emit(Isolate::Current()->debug()->debug_break_return()->entry());
- patcher.masm()->bkpt(0);
-}
-
-
-// Restore the JS frame exit code.
-void BreakLocationIterator::ClearDebugBreakAtReturn() {
- rinfo()->PatchCode(original_rinfo()->pc(),
- Assembler::kJSReturnSequenceInstructions);
-}
-
-
-// A debug break in the frame exit code is identified by the JS frame exit code
-// having been patched with a call instruction.
-bool Debug::IsDebugBreakAtReturn(RelocInfo* rinfo) {
- ASSERT(RelocInfo::IsJSReturn(rinfo->rmode()));
- return rinfo->IsPatchedReturnSequence();
-}
-
-
-bool BreakLocationIterator::IsDebugBreakAtSlot() {
- ASSERT(IsDebugBreakSlot());
- // Check whether the debug break slot instructions have been patched.
- return rinfo()->IsPatchedDebugBreakSlotSequence();
-}
-
-
-void BreakLocationIterator::SetDebugBreakAtSlot() {
- ASSERT(IsDebugBreakSlot());
- // Patch the code changing the debug break slot code from
- // mov r2, r2
- // mov r2, r2
- // mov r2, r2
- // to a call to the debug break slot code.
- // #ifdef USE_BLX
- // ldr ip, [pc, #0]
- // blx ip
- // #else
- // mov lr, pc
- // ldr pc, [pc, #-4]
- // #endif
- // <debug break slot code entry point address>
- CodePatcher patcher(rinfo()->pc(), Assembler::kDebugBreakSlotInstructions);
-#ifdef USE_BLX
- patcher.masm()->ldr(v8::internal::ip, MemOperand(v8::internal::pc, 0));
- patcher.masm()->blx(v8::internal::ip);
-#else
- patcher.masm()->mov(v8::internal::lr, v8::internal::pc);
- patcher.masm()->ldr(v8::internal::pc, MemOperand(v8::internal::pc, -4));
-#endif
- patcher.Emit(Isolate::Current()->debug()->debug_break_slot()->entry());
-}
-
-
-void BreakLocationIterator::ClearDebugBreakAtSlot() {
- ASSERT(IsDebugBreakSlot());
- rinfo()->PatchCode(original_rinfo()->pc(),
- Assembler::kDebugBreakSlotInstructions);
-}
-
-const bool Debug::FramePaddingLayout::kIsSupported = false;
-
-
-#define __ ACCESS_MASM(masm)
-
-
-static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
- RegList object_regs,
- RegList non_object_regs) {
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Store the registers containing live values on the expression stack to
- // make sure that these are correctly updated during GC. Non object values
- // are stored as a smi causing it to be untouched by GC.
- ASSERT((object_regs & ~kJSCallerSaved) == 0);
- ASSERT((non_object_regs & ~kJSCallerSaved) == 0);
- ASSERT((object_regs & non_object_regs) == 0);
- if ((object_regs | non_object_regs) != 0) {
- for (int i = 0; i < kNumJSCallerSaved; i++) {
- int r = JSCallerSavedCode(i);
- Register reg = { r };
- if ((non_object_regs & (1 << r)) != 0) {
- if (FLAG_debug_code) {
- __ tst(reg, Operand(0xc0000000));
- __ Assert(eq, "Unable to encode value as smi");
- }
- __ mov(reg, Operand(reg, LSL, kSmiTagSize));
- }
- }
- __ stm(db_w, sp, object_regs | non_object_regs);
- }
-
-#ifdef DEBUG
- __ RecordComment("// Calling from debug break to runtime - come in - over");
-#endif
- __ mov(r0, Operand::Zero()); // no arguments
- __ mov(r1, Operand(ExternalReference::debug_break(masm->isolate())));
-
- CEntryStub ceb(1);
- __ CallStub(&ceb);
-
- // Restore the register values from the expression stack.
- if ((object_regs | non_object_regs) != 0) {
- __ ldm(ia_w, sp, object_regs | non_object_regs);
- for (int i = 0; i < kNumJSCallerSaved; i++) {
- int r = JSCallerSavedCode(i);
- Register reg = { r };
- if ((non_object_regs & (1 << r)) != 0) {
- __ mov(reg, Operand(reg, LSR, kSmiTagSize));
- }
- if (FLAG_debug_code &&
- (((object_regs |non_object_regs) & (1 << r)) == 0)) {
- __ mov(reg, Operand(kDebugZapValue));
- }
- }
- }
-
- // Leave the internal frame.
- }
-
- // Now that the break point has been handled, resume normal execution by
- // jumping to the target address intended by the caller and that was
- // overwritten by the address of DebugBreakXXX.
- ExternalReference after_break_target =
- ExternalReference(Debug_Address::AfterBreakTarget(), masm->isolate());
- __ mov(ip, Operand(after_break_target));
- __ ldr(ip, MemOperand(ip));
- __ Jump(ip);
-}
-
-
-void Debug::GenerateLoadICDebugBreak(MacroAssembler* masm) {
- // Calling convention for IC load (from ic-arm.cc).
- // ----------- S t a t e -------------
- // -- r2 : name
- // -- lr : return address
- // -- r0 : receiver
- // -- [sp] : receiver
- // -----------------------------------
- // Registers r0 and r2 contain objects that need to be pushed on the
- // expression stack of the fake JS frame.
- Generate_DebugBreakCallHelper(masm, r0.bit() | r2.bit(), 0);
-}
-
-
-void Debug::GenerateStoreICDebugBreak(MacroAssembler* masm) {
- // Calling convention for IC store (from ic-arm.cc).
- // ----------- S t a t e -------------
- // -- r0 : value
- // -- r1 : receiver
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
- // Registers r0, r1, and r2 contain objects that need to be pushed on the
- // expression stack of the fake JS frame.
- Generate_DebugBreakCallHelper(masm, r0.bit() | r1.bit() | r2.bit(), 0);
-}
-
-
-void Debug::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
- // ---------- S t a t e --------------
- // -- lr : return address
- // -- r0 : key
- // -- r1 : receiver
- Generate_DebugBreakCallHelper(masm, r0.bit() | r1.bit(), 0);
-}
-
-
-void Debug::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
- // ---------- S t a t e --------------
- // -- r0 : value
- // -- r1 : key
- // -- r2 : receiver
- // -- lr : return address
- Generate_DebugBreakCallHelper(masm, r0.bit() | r1.bit() | r2.bit(), 0);
-}
-
-
-void Debug::GenerateCallICDebugBreak(MacroAssembler* masm) {
- // Calling convention for IC call (from ic-arm.cc)
- // ----------- S t a t e -------------
- // -- r2 : name
- // -----------------------------------
- Generate_DebugBreakCallHelper(masm, r2.bit(), 0);
-}
-
-
-void Debug::GenerateReturnDebugBreak(MacroAssembler* masm) {
- // In places other than IC call sites it is expected that r0 is TOS which
- // is an object - this is not generally the case so this should be used with
- // care.
- Generate_DebugBreakCallHelper(masm, r0.bit(), 0);
-}
-
-
-void Debug::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) {
- // Register state for CallFunctionStub (from code-stubs-arm.cc).
- // ----------- S t a t e -------------
- // -- r1 : function
- // -----------------------------------
- Generate_DebugBreakCallHelper(masm, r1.bit(), 0);
-}
-
-
-void Debug::GenerateCallFunctionStubRecordDebugBreak(MacroAssembler* masm) {
- // Register state for CallFunctionStub (from code-stubs-arm.cc).
- // ----------- S t a t e -------------
- // -- r1 : function
- // -- r2 : cache cell for call target
- // -----------------------------------
- Generate_DebugBreakCallHelper(masm, r1.bit() | r2.bit(), 0);
-}
-
-
-void Debug::GenerateCallConstructStubDebugBreak(MacroAssembler* masm) {
- // Calling convention for CallConstructStub (from code-stubs-arm.cc)
- // ----------- S t a t e -------------
- // -- r0 : number of arguments (not smi)
- // -- r1 : constructor function
- // -----------------------------------
- Generate_DebugBreakCallHelper(masm, r1.bit(), r0.bit());
-}
-
-
-void Debug::GenerateCallConstructStubRecordDebugBreak(MacroAssembler* masm) {
- // Calling convention for CallConstructStub (from code-stubs-arm.cc)
- // ----------- S t a t e -------------
- // -- r0 : number of arguments (not smi)
- // -- r1 : constructor function
- // -- r2 : cache cell for call target
- // -----------------------------------
- Generate_DebugBreakCallHelper(masm, r1.bit() | r2.bit(), r0.bit());
-}
-
-
-void Debug::GenerateSlot(MacroAssembler* masm) {
- // Generate enough nop's to make space for a call instruction. Avoid emitting
- // the constant pool in the debug break slot code.
- Assembler::BlockConstPoolScope block_const_pool(masm);
- Label check_codesize;
- __ bind(&check_codesize);
- __ RecordDebugBreakSlot();
- for (int i = 0; i < Assembler::kDebugBreakSlotInstructions; i++) {
- __ nop(MacroAssembler::DEBUG_BREAK_NOP);
- }
- ASSERT_EQ(Assembler::kDebugBreakSlotInstructions,
- masm->InstructionsGeneratedSince(&check_codesize));
-}
-
-
-void Debug::GenerateSlotDebugBreak(MacroAssembler* masm) {
- // In the places where a debug break slot is inserted no registers can contain
- // object pointers.
- Generate_DebugBreakCallHelper(masm, 0, 0);
-}
-
-
-void Debug::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
- masm->Abort("LiveEdit frame dropping is not supported on arm");
-}
-
-
-void Debug::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
- masm->Abort("LiveEdit frame dropping is not supported on arm");
-}
-
-const bool Debug::kFrameDropperSupported = false;
-
-#undef __
-
-
-
-#endif // ENABLE_DEBUGGER_SUPPORT
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_ARM
diff --git a/src/3rdparty/v8/src/arm/deoptimizer-arm.cc b/src/3rdparty/v8/src/arm/deoptimizer-arm.cc
deleted file mode 100644
index 2e1e3e3..0000000
--- a/src/3rdparty/v8/src/arm/deoptimizer-arm.cc
+++ /dev/null
@@ -1,1106 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "codegen.h"
-#include "deoptimizer.h"
-#include "full-codegen.h"
-#include "safepoint-table.h"
-
-namespace v8 {
-namespace internal {
-
-const int Deoptimizer::table_entry_size_ = 16;
-
-
-int Deoptimizer::patch_size() {
- const int kCallInstructionSizeInWords = 3;
- return kCallInstructionSizeInWords * Assembler::kInstrSize;
-}
-
-
-void Deoptimizer::DeoptimizeFunctionWithPreparedFunctionList(
- JSFunction* function) {
- Isolate* isolate = function->GetIsolate();
- HandleScope scope(isolate);
- AssertNoAllocation no_allocation;
-
- ASSERT(function->IsOptimized());
- ASSERT(function->FunctionsInFunctionListShareSameCode());
-
- // The optimized code is going to be patched, so we cannot use it
- // any more. Play safe and reset the whole cache.
- function->shared()->ClearOptimizedCodeMap();
-
- // Get the optimized code.
- Code* code = function->code();
- Address code_start_address = code->instruction_start();
-
- // Invalidate the relocation information, as it will become invalid by the
- // code patching below, and is not needed any more.
- code->InvalidateRelocation();
-
- // For each LLazyBailout instruction insert a call to the corresponding
- // deoptimization entry.
- DeoptimizationInputData* deopt_data =
- DeoptimizationInputData::cast(code->deoptimization_data());
-#ifdef DEBUG
- Address prev_call_address = NULL;
-#endif
- for (int i = 0; i < deopt_data->DeoptCount(); i++) {
- if (deopt_data->Pc(i)->value() == -1) continue;
- Address call_address = code_start_address + deopt_data->Pc(i)->value();
- Address deopt_entry = GetDeoptimizationEntry(isolate, i, LAZY);
- // We need calls to have a predictable size in the unoptimized code, but
- // this is optimized code, so we don't have to have a predictable size.
- int call_size_in_bytes =
- MacroAssembler::CallSizeNotPredictableCodeSize(deopt_entry,
- RelocInfo::NONE32);
- int call_size_in_words = call_size_in_bytes / Assembler::kInstrSize;
- ASSERT(call_size_in_bytes % Assembler::kInstrSize == 0);
- ASSERT(call_size_in_bytes <= patch_size());
- CodePatcher patcher(call_address, call_size_in_words);
- patcher.masm()->Call(deopt_entry, RelocInfo::NONE32);
- ASSERT(prev_call_address == NULL ||
- call_address >= prev_call_address + patch_size());
- ASSERT(call_address + patch_size() <= code->instruction_end());
-#ifdef DEBUG
- prev_call_address = call_address;
-#endif
- }
-
- // Add the deoptimizing code to the list.
- DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code);
- DeoptimizerData* data = isolate->deoptimizer_data();
- node->set_next(data->deoptimizing_code_list_);
- data->deoptimizing_code_list_ = node;
-
- // We might be in the middle of incremental marking with compaction.
- // Tell collector to treat this code object in a special way and
- // ignore all slots that might have been recorded on it.
- isolate->heap()->mark_compact_collector()->InvalidateCode(code);
-
- ReplaceCodeForRelatedFunctions(function, code);
-
- if (FLAG_trace_deopt) {
- PrintF("[forced deoptimization: ");
- function->PrintName();
- PrintF(" / %x]\n", reinterpret_cast<uint32_t>(function));
- }
-}
-
-
-static const int32_t kBranchBeforeInterrupt = 0x5a000004;
-
-
-void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code,
- Address pc_after,
- Code* check_code,
- Code* replacement_code) {
- const int kInstrSize = Assembler::kInstrSize;
- // The back edge bookkeeping code matches the pattern:
- //
- // <decrement profiling counter>
- // 2a 00 00 01 bpl ok
- // e5 9f c? ?? ldr ip, [pc, <stack guard address>]
- // e1 2f ff 3c blx ip
- ASSERT(Memory::int32_at(pc_after - kInstrSize) == kBlxIp);
- ASSERT(Assembler::IsLdrPcImmediateOffset(
- Assembler::instr_at(pc_after - 2 * kInstrSize)));
- ASSERT_EQ(kBranchBeforeInterrupt,
- Memory::int32_at(pc_after - 3 * kInstrSize));
-
- // We patch the code to the following form:
- //
- // <decrement profiling counter>
- // e1 a0 00 00 mov r0, r0 (NOP)
- // e5 9f c? ?? ldr ip, [pc, <on-stack replacement address>]
- // e1 2f ff 3c blx ip
- // and overwrite the constant containing the
- // address of the stack check stub.
-
- // Replace conditional jump with NOP.
- CodePatcher patcher(pc_after - 3 * kInstrSize, 1);
- patcher.masm()->nop();
-
- // Replace the stack check address in the constant pool
- // with the entry address of the replacement code.
- uint32_t stack_check_address_offset = Memory::uint16_at(pc_after -
- 2 * kInstrSize) & 0xfff;
- Address stack_check_address_pointer = pc_after + stack_check_address_offset;
- ASSERT(Memory::uint32_at(stack_check_address_pointer) ==
- reinterpret_cast<uint32_t>(check_code->entry()));
- Memory::uint32_at(stack_check_address_pointer) =
- reinterpret_cast<uint32_t>(replacement_code->entry());
-
- unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
- unoptimized_code, pc_after - 2 * kInstrSize, replacement_code);
-}
-
-
-void Deoptimizer::RevertStackCheckCodeAt(Code* unoptimized_code,
- Address pc_after,
- Code* check_code,
- Code* replacement_code) {
- const int kInstrSize = Assembler::kInstrSize;
- ASSERT(Memory::int32_at(pc_after - kInstrSize) == kBlxIp);
- ASSERT(Assembler::IsLdrPcImmediateOffset(
- Assembler::instr_at(pc_after - 2 * kInstrSize)));
-
- // Replace NOP with conditional jump.
- CodePatcher patcher(pc_after - 3 * kInstrSize, 1);
- patcher.masm()->b(+16, pl);
- ASSERT_EQ(kBranchBeforeInterrupt,
- Memory::int32_at(pc_after - 3 * kInstrSize));
-
- // Replace the stack check address in the constant pool
- // with the entry address of the replacement code.
- uint32_t stack_check_address_offset = Memory::uint16_at(pc_after -
- 2 * kInstrSize) & 0xfff;
- Address stack_check_address_pointer = pc_after + stack_check_address_offset;
- ASSERT(Memory::uint32_at(stack_check_address_pointer) ==
- reinterpret_cast<uint32_t>(replacement_code->entry()));
- Memory::uint32_at(stack_check_address_pointer) =
- reinterpret_cast<uint32_t>(check_code->entry());
-
- check_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
- unoptimized_code, pc_after - 2 * kInstrSize, check_code);
-}
-
-
-static int LookupBailoutId(DeoptimizationInputData* data, BailoutId ast_id) {
- ByteArray* translations = data->TranslationByteArray();
- int length = data->DeoptCount();
- for (int i = 0; i < length; i++) {
- if (data->AstId(i) == ast_id) {
- TranslationIterator it(translations, data->TranslationIndex(i)->value());
- int value = it.Next();
- ASSERT(Translation::BEGIN == static_cast<Translation::Opcode>(value));
- // Read the number of frames.
- value = it.Next();
- if (value == 1) return i;
- }
- }
- UNREACHABLE();
- return -1;
-}
-
-
-void Deoptimizer::DoComputeOsrOutputFrame() {
- DeoptimizationInputData* data = DeoptimizationInputData::cast(
- compiled_code_->deoptimization_data());
- unsigned ast_id = data->OsrAstId()->value();
-
- int bailout_id = LookupBailoutId(data, BailoutId(ast_id));
- unsigned translation_index = data->TranslationIndex(bailout_id)->value();
- ByteArray* translations = data->TranslationByteArray();
-
- TranslationIterator iterator(translations, translation_index);
- Translation::Opcode opcode =
- static_cast<Translation::Opcode>(iterator.Next());
- ASSERT(Translation::BEGIN == opcode);
- USE(opcode);
- int count = iterator.Next();
- iterator.Skip(1); // Drop JS frame count.
- ASSERT(count == 1);
- USE(count);
-
- opcode = static_cast<Translation::Opcode>(iterator.Next());
- USE(opcode);
- ASSERT(Translation::JS_FRAME == opcode);
- unsigned node_id = iterator.Next();
- USE(node_id);
- ASSERT(node_id == ast_id);
- int closure_id = iterator.Next();
- USE(closure_id);
- ASSERT_EQ(Translation::kSelfLiteralId, closure_id);
- unsigned height = iterator.Next();
- unsigned height_in_bytes = height * kPointerSize;
- USE(height_in_bytes);
-
- unsigned fixed_size = ComputeFixedSize(function_);
- unsigned input_frame_size = input_->GetFrameSize();
- ASSERT(fixed_size + height_in_bytes == input_frame_size);
-
- unsigned stack_slot_size = compiled_code_->stack_slots() * kPointerSize;
- unsigned outgoing_height = data->ArgumentsStackHeight(bailout_id)->value();
- unsigned outgoing_size = outgoing_height * kPointerSize;
- unsigned output_frame_size = fixed_size + stack_slot_size + outgoing_size;
- ASSERT(outgoing_size == 0); // OSR does not happen in the middle of a call.
-
- if (FLAG_trace_osr) {
- PrintF("[on-stack replacement: begin 0x%08" V8PRIxPTR " ",
- reinterpret_cast<intptr_t>(function_));
- function_->PrintName();
- PrintF(" => node=%u, frame=%d->%d]\n",
- ast_id,
- input_frame_size,
- output_frame_size);
- }
-
- // There's only one output frame in the OSR case.
- output_count_ = 1;
- output_ = new FrameDescription*[1];
- output_[0] = new(output_frame_size) FrameDescription(
- output_frame_size, function_);
- output_[0]->SetFrameType(StackFrame::JAVA_SCRIPT);
-
- // Clear the incoming parameters in the optimized frame to avoid
- // confusing the garbage collector.
- unsigned output_offset = output_frame_size - kPointerSize;
- int parameter_count = function_->shared()->formal_parameter_count() + 1;
- for (int i = 0; i < parameter_count; ++i) {
- output_[0]->SetFrameSlot(output_offset, 0);
- output_offset -= kPointerSize;
- }
-
- // Translate the incoming parameters. This may overwrite some of the
- // incoming argument slots we've just cleared.
- int input_offset = input_frame_size - kPointerSize;
- bool ok = true;
- int limit = input_offset - (parameter_count * kPointerSize);
- while (ok && input_offset > limit) {
- ok = DoOsrTranslateCommand(&iterator, &input_offset);
- }
-
- // There are no translation commands for the caller's pc and fp, the
- // context, and the function. Set them up explicitly.
- for (int i = StandardFrameConstants::kCallerPCOffset;
- ok && i >= StandardFrameConstants::kMarkerOffset;
- i -= kPointerSize) {
- uint32_t input_value = input_->GetFrameSlot(input_offset);
- if (FLAG_trace_osr) {
- const char* name = "UNKNOWN";
- switch (i) {
- case StandardFrameConstants::kCallerPCOffset:
- name = "caller's pc";
- break;
- case StandardFrameConstants::kCallerFPOffset:
- name = "fp";
- break;
- case StandardFrameConstants::kContextOffset:
- name = "context";
- break;
- case StandardFrameConstants::kMarkerOffset:
- name = "function";
- break;
- }
- PrintF(" [sp + %d] <- 0x%08x ; [sp + %d] (fixed part - %s)\n",
- output_offset,
- input_value,
- input_offset,
- name);
- }
-
- output_[0]->SetFrameSlot(output_offset, input_->GetFrameSlot(input_offset));
- input_offset -= kPointerSize;
- output_offset -= kPointerSize;
- }
-
- // Translate the rest of the frame.
- while (ok && input_offset >= 0) {
- ok = DoOsrTranslateCommand(&iterator, &input_offset);
- }
-
- // If translation of any command failed, continue using the input frame.
- if (!ok) {
- delete output_[0];
- output_[0] = input_;
- output_[0]->SetPc(reinterpret_cast<uint32_t>(from_));
- } else {
- // Set up the frame pointer and the context pointer.
- output_[0]->SetRegister(fp.code(), input_->GetRegister(fp.code()));
- output_[0]->SetRegister(cp.code(), input_->GetRegister(cp.code()));
-
- unsigned pc_offset = data->OsrPcOffset()->value();
- uint32_t pc = reinterpret_cast<uint32_t>(
- compiled_code_->entry() + pc_offset);
- output_[0]->SetPc(pc);
- }
- Code* continuation = isolate_->builtins()->builtin(Builtins::kNotifyOSR);
- output_[0]->SetContinuation(
- reinterpret_cast<uint32_t>(continuation->entry()));
-
- if (FLAG_trace_osr) {
- PrintF("[on-stack replacement translation %s: 0x%08" V8PRIxPTR " ",
- ok ? "finished" : "aborted",
- reinterpret_cast<intptr_t>(function_));
- function_->PrintName();
- PrintF(" => pc=0x%0x]\n", output_[0]->GetPc());
- }
-}
-
-
-void Deoptimizer::DoComputeCompiledStubFrame(TranslationIterator* iterator,
- int frame_index) {
- //
- // FROM TO
- // | .... | | .... |
- // +-------------------------+ +-------------------------+
- // | JSFunction continuation | | JSFunction continuation |
- // +-------------------------+ +-------------------------+
- // | | saved frame (fp) | | saved frame (fp) |
- // | +=========================+<-fp +=========================+<-fp
- // | | JSFunction context | | JSFunction context |
- // v +-------------------------+ +-------------------------|
- // | COMPILED_STUB marker | | STUB_FAILURE marker |
- // +-------------------------+ +-------------------------+
- // | | | caller args.arguments_ |
- // | ... | +-------------------------+
- // | | | caller args.length_ |
- // |-------------------------|<-sp +-------------------------+
- // | caller args pointer |
- // +-------------------------+
- // | caller stack param 1 |
- // parameters in registers +-------------------------+
- // and spilled to stack | .... |
- // +-------------------------+
- // | caller stack param n |
- // +-------------------------+<-sp
- // r0 = number of parameters
- // r1 = failure handler address
- // fp = saved frame
- // cp = JSFunction context
- //
-
- ASSERT(compiled_code_->kind() == Code::COMPILED_STUB);
- int major_key = compiled_code_->major_key();
- CodeStubInterfaceDescriptor* descriptor =
- isolate_->code_stub_interface_descriptor(major_key);
-
- // The output frame must have room for all pushed register parameters
- // and the standard stack frame slots. Include space for an argument
- // object to the callee and optionally the space to pass the argument
- // object to the stub failure handler.
- int height_in_bytes = kPointerSize * descriptor->register_param_count_ +
- sizeof(Arguments) + kPointerSize;
- int fixed_frame_size = StandardFrameConstants::kFixedFrameSize;
- int input_frame_size = input_->GetFrameSize();
- int output_frame_size = height_in_bytes + fixed_frame_size;
- if (trace_) {
- PrintF(" translating %s => StubFailureTrampolineStub, height=%d\n",
- CodeStub::MajorName(static_cast<CodeStub::Major>(major_key), false),
- height_in_bytes);
- }
-
- // The stub failure trampoline is a single frame.
- FrameDescription* output_frame =
- new(output_frame_size) FrameDescription(output_frame_size, NULL);
- output_frame->SetFrameType(StackFrame::STUB_FAILURE_TRAMPOLINE);
- ASSERT(frame_index == 0);
- output_[frame_index] = output_frame;
-
- // The top address for the output frame can be computed from the input
- // frame pointer and the output frame's height. Subtract space for the
- // context and function slots.
- intptr_t top_address = input_->GetRegister(fp.code()) - (2 * kPointerSize) -
- height_in_bytes;
- output_frame->SetTop(top_address);
-
- // Read caller's PC (JSFunction continuation) from the input frame.
- intptr_t input_frame_offset = input_frame_size - kPointerSize;
- intptr_t output_frame_offset = output_frame_size - kPointerSize;
- intptr_t value = input_->GetFrameSlot(input_frame_offset);
- output_frame->SetFrameSlot(output_frame_offset, value);
- if (trace_) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's pc\n",
- top_address + output_frame_offset, output_frame_offset, value);
- }
-
- // Read caller's FP from the input frame, and set this frame's FP.
- input_frame_offset -= kPointerSize;
- value = input_->GetFrameSlot(input_frame_offset);
- output_frame_offset -= kPointerSize;
- output_frame->SetFrameSlot(output_frame_offset, value);
- intptr_t frame_ptr = input_->GetRegister(fp.code());
- output_frame->SetRegister(fp.code(), frame_ptr);
- output_frame->SetFp(frame_ptr);
- if (trace_) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's fp\n",
- top_address + output_frame_offset, output_frame_offset, value);
- }
-
- // The context can be gotten from the input frame.
- input_frame_offset -= kPointerSize;
- value = input_->GetFrameSlot(input_frame_offset);
- output_frame->SetRegister(cp.code(), value);
- output_frame_offset -= kPointerSize;
- output_frame->SetFrameSlot(output_frame_offset, value);
- if (trace_) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; context\n",
- top_address + output_frame_offset, output_frame_offset, value);
- }
-
- // A marker value is used in place of the function.
- output_frame_offset -= kPointerSize;
- value = reinterpret_cast<intptr_t>(
- Smi::FromInt(StackFrame::STUB_FAILURE_TRAMPOLINE));
- output_frame->SetFrameSlot(output_frame_offset, value);
- if (trace_) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; function (stub fail sentinel)\n",
- top_address + output_frame_offset, output_frame_offset, value);
- }
-
- int caller_arg_count = 0;
- if (descriptor->stack_parameter_count_ != NULL) {
- caller_arg_count =
- input_->GetRegister(descriptor->stack_parameter_count_->code());
- }
-
- // Build the Arguments object for the caller's parameters and a pointer to it.
- output_frame_offset -= kPointerSize;
- value = frame_ptr + StandardFrameConstants::kCallerSPOffset +
- (caller_arg_count - 1) * kPointerSize;
- output_frame->SetFrameSlot(output_frame_offset, value);
- if (trace_) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; args.arguments\n",
- top_address + output_frame_offset, output_frame_offset, value);
- }
-
- output_frame_offset -= kPointerSize;
- value = caller_arg_count;
- output_frame->SetFrameSlot(output_frame_offset, value);
- if (trace_) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; args.length\n",
- top_address + output_frame_offset, output_frame_offset, value);
- }
-
- output_frame_offset -= kPointerSize;
- value = frame_ptr - (output_frame_size - output_frame_offset) -
- StandardFrameConstants::kMarkerOffset + kPointerSize;
- output_frame->SetFrameSlot(output_frame_offset, value);
- if (trace_) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; args*\n",
- top_address + output_frame_offset, output_frame_offset, value);
- }
-
- // Copy the register parameters to the failure frame.
- for (int i = 0; i < descriptor->register_param_count_; ++i) {
- output_frame_offset -= kPointerSize;
- DoTranslateCommand(iterator, 0, output_frame_offset);
- }
-
- ASSERT(0 == output_frame_offset);
-
- for (int i = 0; i < DwVfpRegister::kMaxNumRegisters; ++i) {
- double double_value = input_->GetDoubleRegister(i);
- output_frame->SetDoubleRegister(i, double_value);
- }
-
- ApiFunction function(descriptor->deoptimization_handler_);
- ExternalReference xref(&function, ExternalReference::BUILTIN_CALL, isolate_);
- intptr_t handler = reinterpret_cast<intptr_t>(xref.address());
- int params = descriptor->register_param_count_;
- if (descriptor->stack_parameter_count_ != NULL) {
- params++;
- }
- output_frame->SetRegister(r0.code(), params);
- output_frame->SetRegister(r1.code(), handler);
-
- // Compute this frame's PC, state, and continuation.
- Code* trampoline = NULL;
- int extra = descriptor->extra_expression_stack_count_;
- StubFailureTrampolineStub(extra).FindCodeInCache(&trampoline, isolate_);
- ASSERT(trampoline != NULL);
- output_frame->SetPc(reinterpret_cast<intptr_t>(
- trampoline->instruction_start()));
- output_frame->SetState(Smi::FromInt(FullCodeGenerator::NO_REGISTERS));
- Code* notify_failure =
- isolate_->builtins()->builtin(Builtins::kNotifyStubFailure);
- output_frame->SetContinuation(
- reinterpret_cast<intptr_t>(notify_failure->entry()));
-}
-
-
-void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
- int frame_index) {
- Builtins* builtins = isolate_->builtins();
- Code* construct_stub = builtins->builtin(Builtins::kJSConstructStubGeneric);
- JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
- unsigned height = iterator->Next();
- unsigned height_in_bytes = height * kPointerSize;
- if (FLAG_trace_deopt) {
- PrintF(" translating construct stub => height=%d\n", height_in_bytes);
- }
-
- unsigned fixed_frame_size = 8 * kPointerSize;
- unsigned output_frame_size = height_in_bytes + fixed_frame_size;
-
- // Allocate and store the output frame description.
- FrameDescription* output_frame =
- new(output_frame_size) FrameDescription(output_frame_size, function);
- output_frame->SetFrameType(StackFrame::CONSTRUCT);
-
- // Construct stub can not be topmost or bottommost.
- ASSERT(frame_index > 0 && frame_index < output_count_ - 1);
- ASSERT(output_[frame_index] == NULL);
- output_[frame_index] = output_frame;
-
- // The top address of the frame is computed from the previous
- // frame's top and this frame's size.
- uint32_t top_address;
- top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
- output_frame->SetTop(top_address);
-
- // Compute the incoming parameter translation.
- int parameter_count = height;
- unsigned output_offset = output_frame_size;
- for (int i = 0; i < parameter_count; ++i) {
- output_offset -= kPointerSize;
- DoTranslateCommand(iterator, frame_index, output_offset);
- }
-
- // Read caller's PC from the previous frame.
- output_offset -= kPointerSize;
- intptr_t callers_pc = output_[frame_index - 1]->GetPc();
- output_frame->SetFrameSlot(output_offset, callers_pc);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's pc\n",
- top_address + output_offset, output_offset, callers_pc);
- }
-
- // Read caller's FP from the previous frame, and set this frame's FP.
- output_offset -= kPointerSize;
- intptr_t value = output_[frame_index - 1]->GetFp();
- output_frame->SetFrameSlot(output_offset, value);
- intptr_t fp_value = top_address + output_offset;
- output_frame->SetFp(fp_value);
- if (trace_) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's fp\n",
- fp_value, output_offset, value);
- }
-
- // The context can be gotten from the previous frame.
- output_offset -= kPointerSize;
- value = output_[frame_index - 1]->GetContext();
- output_frame->SetFrameSlot(output_offset, value);
- if (trace_) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; context\n",
- top_address + output_offset, output_offset, value);
- }
-
- // A marker value is used in place of the function.
- output_offset -= kPointerSize;
- value = reinterpret_cast<intptr_t>(Smi::FromInt(StackFrame::CONSTRUCT));
- output_frame->SetFrameSlot(output_offset, value);
- if (trace_) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; function (construct sentinel)\n",
- top_address + output_offset, output_offset, value);
- }
-
- // The output frame reflects a JSConstructStubGeneric frame.
- output_offset -= kPointerSize;
- value = reinterpret_cast<intptr_t>(construct_stub);
- output_frame->SetFrameSlot(output_offset, value);
- if (trace_) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; code object\n",
- top_address + output_offset, output_offset, value);
- }
-
- // Number of incoming arguments.
- output_offset -= kPointerSize;
- value = reinterpret_cast<uint32_t>(Smi::FromInt(height - 1));
- output_frame->SetFrameSlot(output_offset, value);
- if (trace_) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; argc (%d)\n",
- top_address + output_offset, output_offset, value, height - 1);
- }
-
- // Constructor function being invoked by the stub.
- output_offset -= kPointerSize;
- value = reinterpret_cast<intptr_t>(function);
- output_frame->SetFrameSlot(output_offset, value);
- if (trace_) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; constructor function\n",
- top_address + output_offset, output_offset, value);
- }
-
- // The newly allocated object was passed as receiver in the artificial
- // constructor stub environment created by HEnvironment::CopyForInlining().
- output_offset -= kPointerSize;
- value = output_frame->GetFrameSlot(output_frame_size - kPointerSize);
- output_frame->SetFrameSlot(output_offset, value);
- if (trace_) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; allocated receiver\n",
- top_address + output_offset, output_offset, value);
- }
-
- ASSERT(0 == output_offset);
-
- uint32_t pc = reinterpret_cast<uint32_t>(
- construct_stub->instruction_start() +
- isolate_->heap()->construct_stub_deopt_pc_offset()->value());
- output_frame->SetPc(pc);
-}
-
-
-// This code is very similar to ia32 code, but relies on register names (fp, sp)
-// and how the frame is laid out.
-void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
- int frame_index) {
- // Read the ast node id, function, and frame height for this output frame.
- BailoutId node_id = BailoutId(iterator->Next());
- JSFunction* function;
- if (frame_index != 0) {
- function = JSFunction::cast(ComputeLiteral(iterator->Next()));
- } else {
- int closure_id = iterator->Next();
- USE(closure_id);
- ASSERT_EQ(Translation::kSelfLiteralId, closure_id);
- function = function_;
- }
- unsigned height = iterator->Next();
- unsigned height_in_bytes = height * kPointerSize;
- if (trace_) {
- PrintF(" translating ");
- function->PrintName();
- PrintF(" => node=%d, height=%d\n", node_id.ToInt(), height_in_bytes);
- }
-
- // The 'fixed' part of the frame consists of the incoming parameters and
- // the part described by JavaScriptFrameConstants.
- unsigned fixed_frame_size = ComputeFixedSize(function);
- unsigned input_frame_size = input_->GetFrameSize();
- unsigned output_frame_size = height_in_bytes + fixed_frame_size;
-
- // Allocate and store the output frame description.
- FrameDescription* output_frame =
- new(output_frame_size) FrameDescription(output_frame_size, function);
- output_frame->SetFrameType(StackFrame::JAVA_SCRIPT);
-
- bool is_bottommost = (0 == frame_index);
- bool is_topmost = (output_count_ - 1 == frame_index);
- ASSERT(frame_index >= 0 && frame_index < output_count_);
- ASSERT(output_[frame_index] == NULL);
- output_[frame_index] = output_frame;
-
- // The top address for the bottommost output frame can be computed from
- // the input frame pointer and the output frame's height. For all
- // subsequent output frames, it can be computed from the previous one's
- // top address and the current frame's size.
- uint32_t top_address;
- if (is_bottommost) {
- // 2 = context and function in the frame.
- top_address =
- input_->GetRegister(fp.code()) - (2 * kPointerSize) - height_in_bytes;
- } else {
- top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
- }
- output_frame->SetTop(top_address);
-
- // Compute the incoming parameter translation.
- int parameter_count = function->shared()->formal_parameter_count() + 1;
- unsigned output_offset = output_frame_size;
- unsigned input_offset = input_frame_size;
- for (int i = 0; i < parameter_count; ++i) {
- output_offset -= kPointerSize;
- DoTranslateCommand(iterator, frame_index, output_offset);
- }
- input_offset -= (parameter_count * kPointerSize);
-
- // There are no translation commands for the caller's pc and fp, the
- // context, and the function. Synthesize their values and set them up
- // explicitly.
- //
- // The caller's pc for the bottommost output frame is the same as in the
- // input frame. For all subsequent output frames, it can be read from the
- // previous one. This frame's pc can be computed from the non-optimized
- // function code and AST id of the bailout.
- output_offset -= kPointerSize;
- input_offset -= kPointerSize;
- intptr_t value;
- if (is_bottommost) {
- value = input_->GetFrameSlot(input_offset);
- } else {
- value = output_[frame_index - 1]->GetPc();
- }
- output_frame->SetFrameSlot(output_offset, value);
- if (trace_) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's pc\n",
- top_address + output_offset, output_offset, value);
- }
-
- // The caller's frame pointer for the bottommost output frame is the same
- // as in the input frame. For all subsequent output frames, it can be
- // read from the previous one. Also compute and set this frame's frame
- // pointer.
- output_offset -= kPointerSize;
- input_offset -= kPointerSize;
- if (is_bottommost) {
- value = input_->GetFrameSlot(input_offset);
- } else {
- value = output_[frame_index - 1]->GetFp();
- }
- output_frame->SetFrameSlot(output_offset, value);
- intptr_t fp_value = top_address + output_offset;
- ASSERT(!is_bottommost || input_->GetRegister(fp.code()) == fp_value);
- output_frame->SetFp(fp_value);
- if (is_topmost) {
- output_frame->SetRegister(fp.code(), fp_value);
- }
- if (trace_) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's fp\n",
- fp_value, output_offset, value);
- }
-
- // For the bottommost output frame the context can be gotten from the input
- // frame. For all subsequent output frames it can be gotten from the function
- // so long as we don't inline functions that need local contexts.
- output_offset -= kPointerSize;
- input_offset -= kPointerSize;
- if (is_bottommost) {
- value = input_->GetFrameSlot(input_offset);
- } else {
- value = reinterpret_cast<intptr_t>(function->context());
- }
- output_frame->SetFrameSlot(output_offset, value);
- output_frame->SetContext(value);
- if (is_topmost) output_frame->SetRegister(cp.code(), value);
- if (trace_) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; context\n",
- top_address + output_offset, output_offset, value);
- }
-
- // The function was mentioned explicitly in the BEGIN_FRAME.
- output_offset -= kPointerSize;
- input_offset -= kPointerSize;
- value = reinterpret_cast<uint32_t>(function);
- // The function for the bottommost output frame should also agree with the
- // input frame.
- ASSERT(!is_bottommost || input_->GetFrameSlot(input_offset) == value);
- output_frame->SetFrameSlot(output_offset, value);
- if (trace_) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; function\n",
- top_address + output_offset, output_offset, value);
- }
-
- // Translate the rest of the frame.
- for (unsigned i = 0; i < height; ++i) {
- output_offset -= kPointerSize;
- DoTranslateCommand(iterator, frame_index, output_offset);
- }
- ASSERT(0 == output_offset);
-
- // Compute this frame's PC, state, and continuation.
- Code* non_optimized_code = function->shared()->code();
- FixedArray* raw_data = non_optimized_code->deoptimization_data();
- DeoptimizationOutputData* data = DeoptimizationOutputData::cast(raw_data);
- Address start = non_optimized_code->instruction_start();
- unsigned pc_and_state = GetOutputInfo(data, node_id, function->shared());
- unsigned pc_offset = FullCodeGenerator::PcField::decode(pc_and_state);
- uint32_t pc_value = reinterpret_cast<uint32_t>(start + pc_offset);
- output_frame->SetPc(pc_value);
- if (is_topmost) {
- output_frame->SetRegister(pc.code(), pc_value);
- }
-
- FullCodeGenerator::State state =
- FullCodeGenerator::StateField::decode(pc_and_state);
- output_frame->SetState(Smi::FromInt(state));
-
-
- // Set the continuation for the topmost frame.
- if (is_topmost && bailout_type_ != DEBUGGER) {
- Builtins* builtins = isolate_->builtins();
- Code* continuation = (bailout_type_ == EAGER)
- ? builtins->builtin(Builtins::kNotifyDeoptimized)
- : builtins->builtin(Builtins::kNotifyLazyDeoptimized);
- output_frame->SetContinuation(
- reinterpret_cast<uint32_t>(continuation->entry()));
- }
-}
-
-
-void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
- // Set the register values. The values are not important as there are no
- // callee saved registers in JavaScript frames, so all registers are
- // spilled. Registers fp and sp are set to the correct values though.
-
- for (int i = 0; i < Register::kNumRegisters; i++) {
- input_->SetRegister(i, i * 4);
- }
- input_->SetRegister(sp.code(), reinterpret_cast<intptr_t>(frame->sp()));
- input_->SetRegister(fp.code(), reinterpret_cast<intptr_t>(frame->fp()));
- for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); i++) {
- input_->SetDoubleRegister(i, 0.0);
- }
-
- // Fill the frame content from the actual data on the frame.
- for (unsigned i = 0; i < input_->GetFrameSize(); i += kPointerSize) {
- input_->SetFrameSlot(i, Memory::uint32_at(tos + i));
- }
-}
-
-
-#define __ masm()->
-
-// This code tries to be close to ia32 code so that any changes can be
-// easily ported.
-void Deoptimizer::EntryGenerator::Generate() {
- GeneratePrologue();
-
- Isolate* isolate = masm()->isolate();
-
- // Save all general purpose registers before messing with them.
- const int kNumberOfRegisters = Register::kNumRegisters;
-
- // Everything but pc, lr and ip which will be saved but not restored.
- RegList restored_regs = kJSCallerSaved | kCalleeSaved | ip.bit();
-
- const int kDoubleRegsSize =
- kDoubleSize * DwVfpRegister::kMaxNumAllocatableRegisters;
-
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatures::Scope scope(VFP2);
- // Save all allocatable VFP registers before messing with them.
- ASSERT(kDoubleRegZero.code() == 14);
- ASSERT(kScratchDoubleReg.code() == 15);
-
- // Check CPU flags for number of registers, setting the Z condition flag.
- __ CheckFor32DRegs(ip);
-
- // Push registers d0-d13, and possibly d16-d31, on the stack.
- // If d16-d31 are not pushed, decrease the stack pointer instead.
- __ vstm(db_w, sp, d16, d31, ne);
- __ sub(sp, sp, Operand(16 * kDoubleSize), LeaveCC, eq);
- __ vstm(db_w, sp, d0, d13);
- } else {
- __ sub(sp, sp, Operand(kDoubleRegsSize));
- }
-
- // Push all 16 registers (needed to populate FrameDescription::registers_).
- // TODO(1588) Note that using pc with stm is deprecated, so we should perhaps
- // handle this a bit differently.
- __ stm(db_w, sp, restored_regs | sp.bit() | lr.bit() | pc.bit());
-
- const int kSavedRegistersAreaSize =
- (kNumberOfRegisters * kPointerSize) + kDoubleRegsSize;
-
- // Get the bailout id from the stack.
- __ ldr(r2, MemOperand(sp, kSavedRegistersAreaSize));
-
- // Get the address of the location in the code object if possible (r3) (return
- // address for lazy deoptimization) and compute the fp-to-sp delta in
- // register r4.
- if (type() == EAGER) {
- __ mov(r3, Operand::Zero());
- // Correct one word for bailout id.
- __ add(r4, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
- } else if (type() == OSR) {
- __ mov(r3, lr);
- // Correct one word for bailout id.
- __ add(r4, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
- } else {
- __ mov(r3, lr);
- // Correct two words for bailout id and return address.
- __ add(r4, sp, Operand(kSavedRegistersAreaSize + (2 * kPointerSize)));
- }
- __ sub(r4, fp, r4);
-
- // Allocate a new deoptimizer object.
- // Pass four arguments in r0 to r3 and fifth argument on stack.
- __ PrepareCallCFunction(6, r5);
- __ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ mov(r1, Operand(type())); // bailout type,
- // r2: bailout id already loaded.
- // r3: code address or 0 already loaded.
- __ str(r4, MemOperand(sp, 0 * kPointerSize)); // Fp-to-sp delta.
- __ mov(r5, Operand(ExternalReference::isolate_address()));
- __ str(r5, MemOperand(sp, 1 * kPointerSize)); // Isolate.
- // Call Deoptimizer::New().
- {
- AllowExternalCallThatCantCauseGC scope(masm());
- __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 6);
- }
-
- // Preserve "deoptimizer" object in register r0 and get the input
- // frame descriptor pointer to r1 (deoptimizer->input_);
- __ ldr(r1, MemOperand(r0, Deoptimizer::input_offset()));
-
- // Copy core registers into FrameDescription::registers_[kNumRegisters].
- ASSERT(Register::kNumRegisters == kNumberOfRegisters);
- for (int i = 0; i < kNumberOfRegisters; i++) {
- int offset = (i * kPointerSize) + FrameDescription::registers_offset();
- __ ldr(r2, MemOperand(sp, i * kPointerSize));
- __ str(r2, MemOperand(r1, offset));
- }
-
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatures::Scope scope(VFP2);
- // Copy VFP registers to
- // double_registers_[DoubleRegister::kMaxNumAllocatableRegisters]
- int double_regs_offset = FrameDescription::double_registers_offset();
- for (int i = 0; i < DwVfpRegister::kMaxNumAllocatableRegisters; ++i) {
- int dst_offset = i * kDoubleSize + double_regs_offset;
- int src_offset = i * kDoubleSize + kNumberOfRegisters * kPointerSize;
- __ vldr(d0, sp, src_offset);
- __ vstr(d0, r1, dst_offset);
- }
- }
-
- // Remove the bailout id, eventually return address, and the saved registers
- // from the stack.
- if (type() == EAGER || type() == OSR) {
- __ add(sp, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
- } else {
- __ add(sp, sp, Operand(kSavedRegistersAreaSize + (2 * kPointerSize)));
- }
-
- // Compute a pointer to the unwinding limit in register r2; that is
- // the first stack slot not part of the input frame.
- __ ldr(r2, MemOperand(r1, FrameDescription::frame_size_offset()));
- __ add(r2, r2, sp);
-
- // Unwind the stack down to - but not including - the unwinding
- // limit and copy the contents of the activation frame to the input
- // frame description.
- __ add(r3, r1, Operand(FrameDescription::frame_content_offset()));
- Label pop_loop;
- Label pop_loop_header;
- __ b(&pop_loop_header);
- __ bind(&pop_loop);
- __ pop(r4);
- __ str(r4, MemOperand(r3, 0));
- __ add(r3, r3, Operand(sizeof(uint32_t)));
- __ bind(&pop_loop_header);
- __ cmp(r2, sp);
- __ b(ne, &pop_loop);
-
- // Compute the output frame in the deoptimizer.
- __ push(r0); // Preserve deoptimizer object across call.
- // r0: deoptimizer object; r1: scratch.
- __ PrepareCallCFunction(1, r1);
- // Call Deoptimizer::ComputeOutputFrames().
- {
- AllowExternalCallThatCantCauseGC scope(masm());
- __ CallCFunction(
- ExternalReference::compute_output_frames_function(isolate), 1);
- }
- __ pop(r0); // Restore deoptimizer object (class Deoptimizer).
-
- // Replace the current (input) frame with the output frames.
- Label outer_push_loop, inner_push_loop,
- outer_loop_header, inner_loop_header;
- // Outer loop state: r4 = current "FrameDescription** output_",
- // r1 = one past the last FrameDescription**.
- __ ldr(r1, MemOperand(r0, Deoptimizer::output_count_offset()));
- __ ldr(r4, MemOperand(r0, Deoptimizer::output_offset())); // r4 is output_.
- __ add(r1, r4, Operand(r1, LSL, 2));
- __ jmp(&outer_loop_header);
- __ bind(&outer_push_loop);
- // Inner loop state: r2 = current FrameDescription*, r3 = loop index.
- __ ldr(r2, MemOperand(r4, 0)); // output_[ix]
- __ ldr(r3, MemOperand(r2, FrameDescription::frame_size_offset()));
- __ jmp(&inner_loop_header);
- __ bind(&inner_push_loop);
- __ sub(r3, r3, Operand(sizeof(uint32_t)));
- __ add(r6, r2, Operand(r3));
- __ ldr(r7, MemOperand(r6, FrameDescription::frame_content_offset()));
- __ push(r7);
- __ bind(&inner_loop_header);
- __ cmp(r3, Operand::Zero());
- __ b(ne, &inner_push_loop); // test for gt?
- __ add(r4, r4, Operand(kPointerSize));
- __ bind(&outer_loop_header);
- __ cmp(r4, r1);
- __ b(lt, &outer_push_loop);
-
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatures::Scope scope(VFP2);
- // Check CPU flags for number of registers, setting the Z condition flag.
- __ CheckFor32DRegs(ip);
-
- __ ldr(r1, MemOperand(r0, Deoptimizer::input_offset()));
- int src_offset = FrameDescription::double_registers_offset();
- for (int i = 0; i < DwVfpRegister::kMaxNumRegisters; ++i) {
- if (i == kDoubleRegZero.code()) continue;
- if (i == kScratchDoubleReg.code()) continue;
-
- const DwVfpRegister reg = DwVfpRegister::from_code(i);
- __ vldr(reg, r1, src_offset, i < 16 ? al : ne);
- src_offset += kDoubleSize;
- }
- }
-
- // Push state, pc, and continuation from the last output frame.
- if (type() != OSR) {
- __ ldr(r6, MemOperand(r2, FrameDescription::state_offset()));
- __ push(r6);
- }
-
- __ ldr(r6, MemOperand(r2, FrameDescription::pc_offset()));
- __ push(r6);
- __ ldr(r6, MemOperand(r2, FrameDescription::continuation_offset()));
- __ push(r6);
-
- // Push the registers from the last output frame.
- for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
- int offset = (i * kPointerSize) + FrameDescription::registers_offset();
- __ ldr(r6, MemOperand(r2, offset));
- __ push(r6);
- }
-
- // Restore the registers from the stack.
- __ ldm(ia_w, sp, restored_regs); // all but pc registers.
- __ pop(ip); // remove sp
- __ pop(ip); // remove lr
-
- __ InitializeRootRegister();
-
- __ pop(ip); // remove pc
- __ pop(r7); // get continuation, leave pc on stack
- __ pop(lr);
- __ Jump(r7);
- __ stop("Unreachable.");
-}
-
-
-void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
- // Create a sequence of deoptimization entries. Note that any
- // registers may be still live.
- Label done;
- for (int i = 0; i < count(); i++) {
- int start = masm()->pc_offset();
- USE(start);
- if (type() == EAGER) {
- __ nop();
- } else {
- // Emulate ia32 like call by pushing return address to stack.
- __ push(lr);
- }
- __ mov(ip, Operand(i));
- __ push(ip);
- __ b(&done);
- ASSERT(masm()->pc_offset() - start == table_entry_size_);
- }
- __ bind(&done);
-}
-
-#undef __
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/arm/disasm-arm.cc b/src/3rdparty/v8/src/arm/disasm-arm.cc
deleted file mode 100644
index dec62b3..0000000
--- a/src/3rdparty/v8/src/arm/disasm-arm.cc
+++ /dev/null
@@ -1,1572 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// A Disassembler object is used to disassemble a block of code instruction by
-// instruction. The default implementation of the NameConverter object can be
-// overriden to modify register names or to do symbol lookup on addresses.
-//
-// The example below will disassemble a block of code and print it to stdout.
-//
-// NameConverter converter;
-// Disassembler d(converter);
-// for (byte* pc = begin; pc < end;) {
-// v8::internal::EmbeddedVector<char, 256> buffer;
-// byte* prev_pc = pc;
-// pc += d.InstructionDecode(buffer, pc);
-// printf("%p %08x %s\n",
-// prev_pc, *reinterpret_cast<int32_t*>(prev_pc), buffer);
-// }
-//
-// The Disassembler class also has a convenience method to disassemble a block
-// of code into a FILE*, meaning that the above functionality could also be
-// achieved by just calling Disassembler::Disassemble(stdout, begin, end);
-
-
-#include <assert.h>
-#include <stdio.h>
-#include <stdarg.h>
-#include <string.h>
-#ifndef WIN32
-#include <stdint.h>
-#endif
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_ARM)
-
-#include "constants-arm.h"
-#include "disasm.h"
-#include "macro-assembler.h"
-#include "platform.h"
-
-
-namespace v8 {
-namespace internal {
-
-
-//------------------------------------------------------------------------------
-
-// Decoder decodes and disassembles instructions into an output buffer.
-// It uses the converter to convert register names and call destinations into
-// more informative description.
-class Decoder {
- public:
- Decoder(const disasm::NameConverter& converter,
- Vector<char> out_buffer)
- : converter_(converter),
- out_buffer_(out_buffer),
- out_buffer_pos_(0) {
- out_buffer_[out_buffer_pos_] = '\0';
- }
-
- ~Decoder() {}
-
- // Writes one disassembled instruction into 'buffer' (0-terminated).
- // Returns the length of the disassembled machine instruction in bytes.
- int InstructionDecode(byte* instruction);
-
- static bool IsConstantPoolAt(byte* instr_ptr);
- static int ConstantPoolSizeAt(byte* instr_ptr);
-
- private:
- // Bottleneck functions to print into the out_buffer.
- void PrintChar(const char ch);
- void Print(const char* str);
-
- // Printing of common values.
- void PrintRegister(int reg);
- void PrintSRegister(int reg);
- void PrintDRegister(int reg);
- int FormatVFPRegister(Instruction* instr, const char* format);
- void PrintMovwMovt(Instruction* instr);
- int FormatVFPinstruction(Instruction* instr, const char* format);
- void PrintCondition(Instruction* instr);
- void PrintShiftRm(Instruction* instr);
- void PrintShiftImm(Instruction* instr);
- void PrintShiftSat(Instruction* instr);
- void PrintPU(Instruction* instr);
- void PrintSoftwareInterrupt(SoftwareInterruptCodes svc);
-
- // Handle formatting of instructions and their options.
- int FormatRegister(Instruction* instr, const char* option);
- int FormatOption(Instruction* instr, const char* option);
- void Format(Instruction* instr, const char* format);
- void Unknown(Instruction* instr);
-
- // Each of these functions decodes one particular instruction type, a 3-bit
- // field in the instruction encoding.
- // Types 0 and 1 are combined as they are largely the same except for the way
- // they interpret the shifter operand.
- void DecodeType01(Instruction* instr);
- void DecodeType2(Instruction* instr);
- void DecodeType3(Instruction* instr);
- void DecodeType4(Instruction* instr);
- void DecodeType5(Instruction* instr);
- void DecodeType6(Instruction* instr);
- // Type 7 includes special Debugger instructions.
- int DecodeType7(Instruction* instr);
- // For VFP support.
- void DecodeTypeVFP(Instruction* instr);
- void DecodeType6CoprocessorIns(Instruction* instr);
-
- void DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(Instruction* instr);
- void DecodeVCMP(Instruction* instr);
- void DecodeVCVTBetweenDoubleAndSingle(Instruction* instr);
- void DecodeVCVTBetweenFloatingPointAndInteger(Instruction* instr);
-
- const disasm::NameConverter& converter_;
- Vector<char> out_buffer_;
- int out_buffer_pos_;
-
- DISALLOW_COPY_AND_ASSIGN(Decoder);
-};
-
-
-// Support for assertions in the Decoder formatting functions.
-#define STRING_STARTS_WITH(string, compare_string) \
- (strncmp(string, compare_string, strlen(compare_string)) == 0)
-
-
-// Append the ch to the output buffer.
-void Decoder::PrintChar(const char ch) {
- out_buffer_[out_buffer_pos_++] = ch;
-}
-
-
-// Append the str to the output buffer.
-void Decoder::Print(const char* str) {
- char cur = *str++;
- while (cur != '\0' && (out_buffer_pos_ < (out_buffer_.length() - 1))) {
- PrintChar(cur);
- cur = *str++;
- }
- out_buffer_[out_buffer_pos_] = 0;
-}
-
-
-// These condition names are defined in a way to match the native disassembler
-// formatting. See for example the command "objdump -d <binary file>".
-static const char* cond_names[kNumberOfConditions] = {
- "eq", "ne", "cs" , "cc" , "mi" , "pl" , "vs" , "vc" ,
- "hi", "ls", "ge", "lt", "gt", "le", "", "invalid",
-};
-
-
-// Print the condition guarding the instruction.
-void Decoder::PrintCondition(Instruction* instr) {
- Print(cond_names[instr->ConditionValue()]);
-}
-
-
-// Print the register name according to the active name converter.
-void Decoder::PrintRegister(int reg) {
- Print(converter_.NameOfCPURegister(reg));
-}
-
-// Print the VFP S register name according to the active name converter.
-void Decoder::PrintSRegister(int reg) {
- Print(VFPRegisters::Name(reg, false));
-}
-
-// Print the VFP D register name according to the active name converter.
-void Decoder::PrintDRegister(int reg) {
- Print(VFPRegisters::Name(reg, true));
-}
-
-
-// These shift names are defined in a way to match the native disassembler
-// formatting. See for example the command "objdump -d <binary file>".
-static const char* const shift_names[kNumberOfShifts] = {
- "lsl", "lsr", "asr", "ror"
-};
-
-
-// Print the register shift operands for the instruction. Generally used for
-// data processing instructions.
-void Decoder::PrintShiftRm(Instruction* instr) {
- ShiftOp shift = instr->ShiftField();
- int shift_index = instr->ShiftValue();
- int shift_amount = instr->ShiftAmountValue();
- int rm = instr->RmValue();
-
- PrintRegister(rm);
-
- if ((instr->RegShiftValue() == 0) && (shift == LSL) && (shift_amount == 0)) {
- // Special case for using rm only.
- return;
- }
- if (instr->RegShiftValue() == 0) {
- // by immediate
- if ((shift == ROR) && (shift_amount == 0)) {
- Print(", RRX");
- return;
- } else if (((shift == LSR) || (shift == ASR)) && (shift_amount == 0)) {
- shift_amount = 32;
- }
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- ", %s #%d",
- shift_names[shift_index],
- shift_amount);
- } else {
- // by register
- int rs = instr->RsValue();
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- ", %s ", shift_names[shift_index]);
- PrintRegister(rs);
- }
-}
-
-
-// Print the immediate operand for the instruction. Generally used for data
-// processing instructions.
-void Decoder::PrintShiftImm(Instruction* instr) {
- int rotate = instr->RotateValue() * 2;
- int immed8 = instr->Immed8Value();
- int imm = (immed8 >> rotate) | (immed8 << (32 - rotate));
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "#%d", imm);
-}
-
-
-// Print the optional shift and immediate used by saturating instructions.
-void Decoder::PrintShiftSat(Instruction* instr) {
- int shift = instr->Bits(11, 7);
- if (shift > 0) {
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- ", %s #%d",
- shift_names[instr->Bit(6) * 2],
- instr->Bits(11, 7));
- }
-}
-
-
-// Print PU formatting to reduce complexity of FormatOption.
-void Decoder::PrintPU(Instruction* instr) {
- switch (instr->PUField()) {
- case da_x: {
- Print("da");
- break;
- }
- case ia_x: {
- Print("ia");
- break;
- }
- case db_x: {
- Print("db");
- break;
- }
- case ib_x: {
- Print("ib");
- break;
- }
- default: {
- UNREACHABLE();
- break;
- }
- }
-}
-
-
-// Print SoftwareInterrupt codes. Factoring this out reduces the complexity of
-// the FormatOption method.
-void Decoder::PrintSoftwareInterrupt(SoftwareInterruptCodes svc) {
- switch (svc) {
- case kCallRtRedirected:
- Print("call rt redirected");
- return;
- case kBreakpoint:
- Print("breakpoint");
- return;
- default:
- if (svc >= kStopCode) {
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "%d - 0x%x",
- svc & kStopCodeMask,
- svc & kStopCodeMask);
- } else {
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "%d",
- svc);
- }
- return;
- }
-}
-
-
-// Handle all register based formatting in this function to reduce the
-// complexity of FormatOption.
-int Decoder::FormatRegister(Instruction* instr, const char* format) {
- ASSERT(format[0] == 'r');
- if (format[1] == 'n') { // 'rn: Rn register
- int reg = instr->RnValue();
- PrintRegister(reg);
- return 2;
- } else if (format[1] == 'd') { // 'rd: Rd register
- int reg = instr->RdValue();
- PrintRegister(reg);
- return 2;
- } else if (format[1] == 's') { // 'rs: Rs register
- int reg = instr->RsValue();
- PrintRegister(reg);
- return 2;
- } else if (format[1] == 'm') { // 'rm: Rm register
- int reg = instr->RmValue();
- PrintRegister(reg);
- return 2;
- } else if (format[1] == 't') { // 'rt: Rt register
- int reg = instr->RtValue();
- PrintRegister(reg);
- return 2;
- } else if (format[1] == 'l') {
- // 'rlist: register list for load and store multiple instructions
- ASSERT(STRING_STARTS_WITH(format, "rlist"));
- int rlist = instr->RlistValue();
- int reg = 0;
- Print("{");
- // Print register list in ascending order, by scanning the bit mask.
- while (rlist != 0) {
- if ((rlist & 1) != 0) {
- PrintRegister(reg);
- if ((rlist >> 1) != 0) {
- Print(", ");
- }
- }
- reg++;
- rlist >>= 1;
- }
- Print("}");
- return 5;
- }
- UNREACHABLE();
- return -1;
-}
-
-
-// Handle all VFP register based formatting in this function to reduce the
-// complexity of FormatOption.
-int Decoder::FormatVFPRegister(Instruction* instr, const char* format) {
- ASSERT((format[0] == 'S') || (format[0] == 'D'));
-
- VFPRegPrecision precision =
- format[0] == 'D' ? kDoublePrecision : kSinglePrecision;
-
- int retval = 2;
- int reg = -1;
- if (format[1] == 'n') {
- reg = instr->VFPNRegValue(precision);
- } else if (format[1] == 'm') {
- reg = instr->VFPMRegValue(precision);
- } else if (format[1] == 'd') {
- if ((instr->TypeValue() == 7) &&
- (instr->Bit(24) == 0x0) &&
- (instr->Bits(11, 9) == 0x5) &&
- (instr->Bit(4) == 0x1)) {
- // vmov.32 has Vd in a different place.
- reg = instr->Bits(19, 16) | (instr->Bit(7) << 4);
- } else {
- reg = instr->VFPDRegValue(precision);
- }
-
- if (format[2] == '+') {
- int immed8 = instr->Immed8Value();
- if (format[0] == 'S') reg += immed8 - 1;
- if (format[0] == 'D') reg += (immed8 / 2 - 1);
- }
- if (format[2] == '+') retval = 3;
- } else {
- UNREACHABLE();
- }
-
- if (precision == kSinglePrecision) {
- PrintSRegister(reg);
- } else {
- PrintDRegister(reg);
- }
-
- return retval;
-}
-
-
-int Decoder::FormatVFPinstruction(Instruction* instr, const char* format) {
- Print(format);
- return 0;
-}
-
-
-// Print the movw or movt instruction.
-void Decoder::PrintMovwMovt(Instruction* instr) {
- int imm = instr->ImmedMovwMovtValue();
- int rd = instr->RdValue();
- PrintRegister(rd);
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- ", #%d", imm);
-}
-
-
-// FormatOption takes a formatting string and interprets it based on
-// the current instructions. The format string points to the first
-// character of the option string (the option escape has already been
-// consumed by the caller.) FormatOption returns the number of
-// characters that were consumed from the formatting string.
-int Decoder::FormatOption(Instruction* instr, const char* format) {
- switch (format[0]) {
- case 'a': { // 'a: accumulate multiplies
- if (instr->Bit(21) == 0) {
- Print("ul");
- } else {
- Print("la");
- }
- return 1;
- }
- case 'b': { // 'b: byte loads or stores
- if (instr->HasB()) {
- Print("b");
- }
- return 1;
- }
- case 'c': { // 'cond: conditional execution
- ASSERT(STRING_STARTS_WITH(format, "cond"));
- PrintCondition(instr);
- return 4;
- }
- case 'd': { // 'd: vmov double immediate.
- double d = instr->DoubleImmedVmov();
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "#%g", d);
- return 1;
- }
- case 'f': { // 'f: bitfield instructions - v7 and above.
- uint32_t lsbit = instr->Bits(11, 7);
- uint32_t width = instr->Bits(20, 16) + 1;
- if (instr->Bit(21) == 0) {
- // BFC/BFI:
- // Bits 20-16 represent most-significant bit. Covert to width.
- width -= lsbit;
- ASSERT(width > 0);
- }
- ASSERT((width + lsbit) <= 32);
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "#%d, #%d", lsbit, width);
- return 1;
- }
- case 'h': { // 'h: halfword operation for extra loads and stores
- if (instr->HasH()) {
- Print("h");
- } else {
- Print("b");
- }
- return 1;
- }
- case 'i': { // 'i: immediate value from adjacent bits.
- // Expects tokens in the form imm%02d@%02d, i.e. imm05@07, imm10@16
- int width = (format[3] - '0') * 10 + (format[4] - '0');
- int lsb = (format[6] - '0') * 10 + (format[7] - '0');
-
- ASSERT((width >= 1) && (width <= 32));
- ASSERT((lsb >= 0) && (lsb <= 31));
- ASSERT((width + lsb) <= 32);
-
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "%d",
- instr->Bits(width + lsb - 1, lsb));
- return 8;
- }
- case 'l': { // 'l: branch and link
- if (instr->HasLink()) {
- Print("l");
- }
- return 1;
- }
- case 'm': {
- if (format[1] == 'w') {
- // 'mw: movt/movw instructions.
- PrintMovwMovt(instr);
- return 2;
- }
- if (format[1] == 'e') { // 'memop: load/store instructions.
- ASSERT(STRING_STARTS_WITH(format, "memop"));
- if (instr->HasL()) {
- Print("ldr");
- } else {
- if ((instr->Bits(27, 25) == 0) && (instr->Bit(20) == 0) &&
- (instr->Bits(7, 6) == 3) && (instr->Bit(4) == 1)) {
- if (instr->Bit(5) == 1) {
- Print("strd");
- } else {
- Print("ldrd");
- }
- return 5;
- }
- Print("str");
- }
- return 5;
- }
- // 'msg: for simulator break instructions
- ASSERT(STRING_STARTS_WITH(format, "msg"));
- byte* str =
- reinterpret_cast<byte*>(instr->InstructionBits() & 0x0fffffff);
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "%s", converter_.NameInCode(str));
- return 3;
- }
- case 'o': {
- if ((format[3] == '1') && (format[4] == '2')) {
- // 'off12: 12-bit offset for load and store instructions
- ASSERT(STRING_STARTS_WITH(format, "off12"));
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "%d", instr->Offset12Value());
- return 5;
- } else if (format[3] == '0') {
- // 'off0to3and8to19 16-bit immediate encoded in bits 19-8 and 3-0.
- ASSERT(STRING_STARTS_WITH(format, "off0to3and8to19"));
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "%d",
- (instr->Bits(19, 8) << 4) +
- instr->Bits(3, 0));
- return 15;
- }
- // 'off8: 8-bit offset for extra load and store instructions
- ASSERT(STRING_STARTS_WITH(format, "off8"));
- int offs8 = (instr->ImmedHValue() << 4) | instr->ImmedLValue();
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "%d", offs8);
- return 4;
- }
- case 'p': { // 'pu: P and U bits for load and store instructions
- ASSERT(STRING_STARTS_WITH(format, "pu"));
- PrintPU(instr);
- return 2;
- }
- case 'r': {
- return FormatRegister(instr, format);
- }
- case 's': {
- if (format[1] == 'h') { // 'shift_op or 'shift_rm or 'shift_sat.
- if (format[6] == 'o') { // 'shift_op
- ASSERT(STRING_STARTS_WITH(format, "shift_op"));
- if (instr->TypeValue() == 0) {
- PrintShiftRm(instr);
- } else {
- ASSERT(instr->TypeValue() == 1);
- PrintShiftImm(instr);
- }
- return 8;
- } else if (format[6] == 's') { // 'shift_sat.
- ASSERT(STRING_STARTS_WITH(format, "shift_sat"));
- PrintShiftSat(instr);
- return 9;
- } else { // 'shift_rm
- ASSERT(STRING_STARTS_WITH(format, "shift_rm"));
- PrintShiftRm(instr);
- return 8;
- }
- } else if (format[1] == 'v') { // 'svc
- ASSERT(STRING_STARTS_WITH(format, "svc"));
- PrintSoftwareInterrupt(instr->SvcValue());
- return 3;
- } else if (format[1] == 'i') { // 'sign: signed extra loads and stores
- ASSERT(STRING_STARTS_WITH(format, "sign"));
- if (instr->HasSign()) {
- Print("s");
- }
- return 4;
- }
- // 's: S field of data processing instructions
- if (instr->HasS()) {
- Print("s");
- }
- return 1;
- }
- case 't': { // 'target: target of branch instructions
- ASSERT(STRING_STARTS_WITH(format, "target"));
- int off = (instr->SImmed24Value() << 2) + 8;
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "%+d -> %s",
- off,
- converter_.NameOfAddress(
- reinterpret_cast<byte*>(instr) + off));
- return 6;
- }
- case 'u': { // 'u: signed or unsigned multiplies
- // The manual gets the meaning of bit 22 backwards in the multiply
- // instruction overview on page A3.16.2. The instructions that
- // exist in u and s variants are the following:
- // smull A4.1.87
- // umull A4.1.129
- // umlal A4.1.128
- // smlal A4.1.76
- // For these 0 means u and 1 means s. As can be seen on their individual
- // pages. The other 18 mul instructions have the bit set or unset in
- // arbitrary ways that are unrelated to the signedness of the instruction.
- // None of these 18 instructions exist in both a 'u' and an 's' variant.
-
- if (instr->Bit(22) == 0) {
- Print("u");
- } else {
- Print("s");
- }
- return 1;
- }
- case 'v': {
- return FormatVFPinstruction(instr, format);
- }
- case 'S':
- case 'D': {
- return FormatVFPRegister(instr, format);
- }
- case 'w': { // 'w: W field of load and store instructions
- if (instr->HasW()) {
- Print("!");
- }
- return 1;
- }
- default: {
- UNREACHABLE();
- break;
- }
- }
- UNREACHABLE();
- return -1;
-}
-
-
-// Format takes a formatting string for a whole instruction and prints it into
-// the output buffer. All escaped options are handed to FormatOption to be
-// parsed further.
-void Decoder::Format(Instruction* instr, const char* format) {
- char cur = *format++;
- while ((cur != 0) && (out_buffer_pos_ < (out_buffer_.length() - 1))) {
- if (cur == '\'') { // Single quote is used as the formatting escape.
- format += FormatOption(instr, format);
- } else {
- out_buffer_[out_buffer_pos_++] = cur;
- }
- cur = *format++;
- }
- out_buffer_[out_buffer_pos_] = '\0';
-}
-
-
-// The disassembler may end up decoding data inlined in the code. We do not want
-// it to crash if the data does not ressemble any known instruction.
-#define VERIFY(condition) \
-if(!(condition)) { \
- Unknown(instr); \
- return; \
-}
-
-
-// For currently unimplemented decodings the disassembler calls Unknown(instr)
-// which will just print "unknown" of the instruction bits.
-void Decoder::Unknown(Instruction* instr) {
- Format(instr, "unknown");
-}
-
-
-void Decoder::DecodeType01(Instruction* instr) {
- int type = instr->TypeValue();
- if ((type == 0) && instr->IsSpecialType0()) {
- // multiply instruction or extra loads and stores
- if (instr->Bits(7, 4) == 9) {
- if (instr->Bit(24) == 0) {
- // multiply instructions
- if (instr->Bit(23) == 0) {
- if (instr->Bit(21) == 0) {
- // The MUL instruction description (A 4.1.33) refers to Rd as being
- // the destination for the operation, but it confusingly uses the
- // Rn field to encode it.
- Format(instr, "mul'cond's 'rn, 'rm, 'rs");
- } else {
- if (instr->Bit(22) == 0) {
- // The MLA instruction description (A 4.1.28) refers to the order
- // of registers as "Rd, Rm, Rs, Rn". But confusingly it uses the
- // Rn field to encode the Rd register and the Rd field to encode
- // the Rn register.
- Format(instr, "mla'cond's 'rn, 'rm, 'rs, 'rd");
- } else {
- // The MLS instruction description (A 4.1.29) refers to the order
- // of registers as "Rd, Rm, Rs, Rn". But confusingly it uses the
- // Rn field to encode the Rd register and the Rd field to encode
- // the Rn register.
- Format(instr, "mls'cond's 'rn, 'rm, 'rs, 'rd");
- }
- }
- } else {
- // The signed/long multiply instructions use the terms RdHi and RdLo
- // when referring to the target registers. They are mapped to the Rn
- // and Rd fields as follows:
- // RdLo == Rd field
- // RdHi == Rn field
- // The order of registers is: <RdLo>, <RdHi>, <Rm>, <Rs>
- Format(instr, "'um'al'cond's 'rd, 'rn, 'rm, 'rs");
- }
- } else {
- Unknown(instr); // not used by V8
- }
- } else if ((instr->Bit(20) == 0) && ((instr->Bits(7, 4) & 0xd) == 0xd)) {
- // ldrd, strd
- switch (instr->PUField()) {
- case da_x: {
- if (instr->Bit(22) == 0) {
- Format(instr, "'memop'cond's 'rd, ['rn], -'rm");
- } else {
- Format(instr, "'memop'cond's 'rd, ['rn], #-'off8");
- }
- break;
- }
- case ia_x: {
- if (instr->Bit(22) == 0) {
- Format(instr, "'memop'cond's 'rd, ['rn], +'rm");
- } else {
- Format(instr, "'memop'cond's 'rd, ['rn], #+'off8");
- }
- break;
- }
- case db_x: {
- if (instr->Bit(22) == 0) {
- Format(instr, "'memop'cond's 'rd, ['rn, -'rm]'w");
- } else {
- Format(instr, "'memop'cond's 'rd, ['rn, #-'off8]'w");
- }
- break;
- }
- case ib_x: {
- if (instr->Bit(22) == 0) {
- Format(instr, "'memop'cond's 'rd, ['rn, +'rm]'w");
- } else {
- Format(instr, "'memop'cond's 'rd, ['rn, #+'off8]'w");
- }
- break;
- }
- default: {
- // The PU field is a 2-bit field.
- UNREACHABLE();
- break;
- }
- }
- } else {
- // extra load/store instructions
- switch (instr->PUField()) {
- case da_x: {
- if (instr->Bit(22) == 0) {
- Format(instr, "'memop'cond'sign'h 'rd, ['rn], -'rm");
- } else {
- Format(instr, "'memop'cond'sign'h 'rd, ['rn], #-'off8");
- }
- break;
- }
- case ia_x: {
- if (instr->Bit(22) == 0) {
- Format(instr, "'memop'cond'sign'h 'rd, ['rn], +'rm");
- } else {
- Format(instr, "'memop'cond'sign'h 'rd, ['rn], #+'off8");
- }
- break;
- }
- case db_x: {
- if (instr->Bit(22) == 0) {
- Format(instr, "'memop'cond'sign'h 'rd, ['rn, -'rm]'w");
- } else {
- Format(instr, "'memop'cond'sign'h 'rd, ['rn, #-'off8]'w");
- }
- break;
- }
- case ib_x: {
- if (instr->Bit(22) == 0) {
- Format(instr, "'memop'cond'sign'h 'rd, ['rn, +'rm]'w");
- } else {
- Format(instr, "'memop'cond'sign'h 'rd, ['rn, #+'off8]'w");
- }
- break;
- }
- default: {
- // The PU field is a 2-bit field.
- UNREACHABLE();
- break;
- }
- }
- return;
- }
- } else if ((type == 0) && instr->IsMiscType0()) {
- if (instr->Bits(22, 21) == 1) {
- switch (instr->BitField(7, 4)) {
- case BX:
- Format(instr, "bx'cond 'rm");
- break;
- case BLX:
- Format(instr, "blx'cond 'rm");
- break;
- case BKPT:
- Format(instr, "bkpt 'off0to3and8to19");
- break;
- default:
- Unknown(instr); // not used by V8
- break;
- }
- } else if (instr->Bits(22, 21) == 3) {
- switch (instr->BitField(7, 4)) {
- case CLZ:
- Format(instr, "clz'cond 'rd, 'rm");
- break;
- default:
- Unknown(instr); // not used by V8
- break;
- }
- } else {
- Unknown(instr); // not used by V8
- }
- } else if ((type == 1) && instr->IsNopType1()) {
- Format(instr, "nop'cond");
- } else {
- switch (instr->OpcodeField()) {
- case AND: {
- Format(instr, "and'cond's 'rd, 'rn, 'shift_op");
- break;
- }
- case EOR: {
- Format(instr, "eor'cond's 'rd, 'rn, 'shift_op");
- break;
- }
- case SUB: {
- Format(instr, "sub'cond's 'rd, 'rn, 'shift_op");
- break;
- }
- case RSB: {
- Format(instr, "rsb'cond's 'rd, 'rn, 'shift_op");
- break;
- }
- case ADD: {
- Format(instr, "add'cond's 'rd, 'rn, 'shift_op");
- break;
- }
- case ADC: {
- Format(instr, "adc'cond's 'rd, 'rn, 'shift_op");
- break;
- }
- case SBC: {
- Format(instr, "sbc'cond's 'rd, 'rn, 'shift_op");
- break;
- }
- case RSC: {
- Format(instr, "rsc'cond's 'rd, 'rn, 'shift_op");
- break;
- }
- case TST: {
- if (instr->HasS()) {
- Format(instr, "tst'cond 'rn, 'shift_op");
- } else {
- Format(instr, "movw'cond 'mw");
- }
- break;
- }
- case TEQ: {
- if (instr->HasS()) {
- Format(instr, "teq'cond 'rn, 'shift_op");
- } else {
- // Other instructions matching this pattern are handled in the
- // miscellaneous instructions part above.
- UNREACHABLE();
- }
- break;
- }
- case CMP: {
- if (instr->HasS()) {
- Format(instr, "cmp'cond 'rn, 'shift_op");
- } else {
- Format(instr, "movt'cond 'mw");
- }
- break;
- }
- case CMN: {
- if (instr->HasS()) {
- Format(instr, "cmn'cond 'rn, 'shift_op");
- } else {
- // Other instructions matching this pattern are handled in the
- // miscellaneous instructions part above.
- UNREACHABLE();
- }
- break;
- }
- case ORR: {
- Format(instr, "orr'cond's 'rd, 'rn, 'shift_op");
- break;
- }
- case MOV: {
- Format(instr, "mov'cond's 'rd, 'shift_op");
- break;
- }
- case BIC: {
- Format(instr, "bic'cond's 'rd, 'rn, 'shift_op");
- break;
- }
- case MVN: {
- Format(instr, "mvn'cond's 'rd, 'shift_op");
- break;
- }
- default: {
- // The Opcode field is a 4-bit field.
- UNREACHABLE();
- break;
- }
- }
- }
-}
-
-
-void Decoder::DecodeType2(Instruction* instr) {
- switch (instr->PUField()) {
- case da_x: {
- if (instr->HasW()) {
- Unknown(instr); // not used in V8
- return;
- }
- Format(instr, "'memop'cond'b 'rd, ['rn], #-'off12");
- break;
- }
- case ia_x: {
- if (instr->HasW()) {
- Unknown(instr); // not used in V8
- return;
- }
- Format(instr, "'memop'cond'b 'rd, ['rn], #+'off12");
- break;
- }
- case db_x: {
- Format(instr, "'memop'cond'b 'rd, ['rn, #-'off12]'w");
- break;
- }
- case ib_x: {
- Format(instr, "'memop'cond'b 'rd, ['rn, #+'off12]'w");
- break;
- }
- default: {
- // The PU field is a 2-bit field.
- UNREACHABLE();
- break;
- }
- }
-}
-
-
-void Decoder::DecodeType3(Instruction* instr) {
- switch (instr->PUField()) {
- case da_x: {
- VERIFY(!instr->HasW());
- Format(instr, "'memop'cond'b 'rd, ['rn], -'shift_rm");
- break;
- }
- case ia_x: {
- if (instr->HasW()) {
- VERIFY(instr->Bits(5, 4) == 0x1);
- if (instr->Bit(22) == 0x1) {
- Format(instr, "usat 'rd, #'imm05@16, 'rm'shift_sat");
- } else {
- UNREACHABLE(); // SSAT.
- }
- } else {
- Format(instr, "'memop'cond'b 'rd, ['rn], +'shift_rm");
- }
- break;
- }
- case db_x: {
- if (FLAG_enable_sudiv) {
- if (!instr->HasW()) {
- if (instr->Bits(5, 4) == 0x1) {
- if ((instr->Bit(22) == 0x0) && (instr->Bit(20) == 0x1)) {
- // SDIV (in V8 notation matching ARM ISA format) rn = rm/rs
- Format(instr, "sdiv'cond'b 'rn, 'rm, 'rs");
- break;
- }
- }
- }
- }
- Format(instr, "'memop'cond'b 'rd, ['rn, -'shift_rm]'w");
- break;
- }
- case ib_x: {
- if (instr->HasW() && (instr->Bits(6, 4) == 0x5)) {
- uint32_t widthminus1 = static_cast<uint32_t>(instr->Bits(20, 16));
- uint32_t lsbit = static_cast<uint32_t>(instr->Bits(11, 7));
- uint32_t msbit = widthminus1 + lsbit;
- if (msbit <= 31) {
- if (instr->Bit(22)) {
- Format(instr, "ubfx'cond 'rd, 'rm, 'f");
- } else {
- Format(instr, "sbfx'cond 'rd, 'rm, 'f");
- }
- } else {
- UNREACHABLE();
- }
- } else if (!instr->HasW() && (instr->Bits(6, 4) == 0x1)) {
- uint32_t lsbit = static_cast<uint32_t>(instr->Bits(11, 7));
- uint32_t msbit = static_cast<uint32_t>(instr->Bits(20, 16));
- if (msbit >= lsbit) {
- if (instr->RmValue() == 15) {
- Format(instr, "bfc'cond 'rd, 'f");
- } else {
- Format(instr, "bfi'cond 'rd, 'rm, 'f");
- }
- } else {
- UNREACHABLE();
- }
- } else {
- Format(instr, "'memop'cond'b 'rd, ['rn, +'shift_rm]'w");
- }
- break;
- }
- default: {
- // The PU field is a 2-bit field.
- UNREACHABLE();
- break;
- }
- }
-}
-
-
-void Decoder::DecodeType4(Instruction* instr) {
- if (instr->Bit(22) != 0) {
- // Privileged mode currently not supported.
- Unknown(instr);
- } else {
- if (instr->HasL()) {
- Format(instr, "ldm'cond'pu 'rn'w, 'rlist");
- } else {
- Format(instr, "stm'cond'pu 'rn'w, 'rlist");
- }
- }
-}
-
-
-void Decoder::DecodeType5(Instruction* instr) {
- Format(instr, "b'l'cond 'target");
-}
-
-
-void Decoder::DecodeType6(Instruction* instr) {
- DecodeType6CoprocessorIns(instr);
-}
-
-
-int Decoder::DecodeType7(Instruction* instr) {
- if (instr->Bit(24) == 1) {
- if (instr->SvcValue() >= kStopCode) {
- Format(instr, "stop'cond 'svc");
- // Also print the stop message. Its address is encoded
- // in the following 4 bytes.
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "\n %p %08x stop message: %s",
- reinterpret_cast<int32_t*>(instr
- + Instruction::kInstrSize),
- *reinterpret_cast<char**>(instr
- + Instruction::kInstrSize),
- *reinterpret_cast<char**>(instr
- + Instruction::kInstrSize));
- // We have decoded 2 * Instruction::kInstrSize bytes.
- return 2 * Instruction::kInstrSize;
- } else {
- Format(instr, "svc'cond 'svc");
- }
- } else {
- DecodeTypeVFP(instr);
- }
- return Instruction::kInstrSize;
-}
-
-
-// void Decoder::DecodeTypeVFP(Instruction* instr)
-// vmov: Sn = Rt
-// vmov: Rt = Sn
-// vcvt: Dd = Sm
-// vcvt: Sd = Dm
-// Dd = vabs(Dm)
-// Dd = vneg(Dm)
-// Dd = vadd(Dn, Dm)
-// Dd = vsub(Dn, Dm)
-// Dd = vmul(Dn, Dm)
-// Dd = vmla(Dn, Dm)
-// Dd = vmls(Dn, Dm)
-// Dd = vdiv(Dn, Dm)
-// vcmp(Dd, Dm)
-// vmrs
-// vmsr
-// Dd = vsqrt(Dm)
-void Decoder::DecodeTypeVFP(Instruction* instr) {
- VERIFY((instr->TypeValue() == 7) && (instr->Bit(24) == 0x0) );
- VERIFY(instr->Bits(11, 9) == 0x5);
-
- if (instr->Bit(4) == 0) {
- if (instr->Opc1Value() == 0x7) {
- // Other data processing instructions
- if ((instr->Opc2Value() == 0x0) && (instr->Opc3Value() == 0x1)) {
- // vmov register to register.
- if (instr->SzValue() == 0x1) {
- Format(instr, "vmov'cond.f64 'Dd, 'Dm");
- } else {
- Format(instr, "vmov'cond.f32 'Sd, 'Sm");
- }
- } else if ((instr->Opc2Value() == 0x0) && (instr->Opc3Value() == 0x3)) {
- // vabs
- Format(instr, "vabs'cond.f64 'Dd, 'Dm");
- } else if ((instr->Opc2Value() == 0x1) && (instr->Opc3Value() == 0x1)) {
- // vneg
- Format(instr, "vneg'cond.f64 'Dd, 'Dm");
- } else if ((instr->Opc2Value() == 0x7) && (instr->Opc3Value() == 0x3)) {
- DecodeVCVTBetweenDoubleAndSingle(instr);
- } else if ((instr->Opc2Value() == 0x8) && (instr->Opc3Value() & 0x1)) {
- DecodeVCVTBetweenFloatingPointAndInteger(instr);
- } else if (((instr->Opc2Value() >> 1) == 0x6) &&
- (instr->Opc3Value() & 0x1)) {
- DecodeVCVTBetweenFloatingPointAndInteger(instr);
- } else if (((instr->Opc2Value() == 0x4) || (instr->Opc2Value() == 0x5)) &&
- (instr->Opc3Value() & 0x1)) {
- DecodeVCMP(instr);
- } else if (((instr->Opc2Value() == 0x1)) && (instr->Opc3Value() == 0x3)) {
- Format(instr, "vsqrt'cond.f64 'Dd, 'Dm");
- } else if (instr->Opc3Value() == 0x0) {
- if (instr->SzValue() == 0x1) {
- Format(instr, "vmov'cond.f64 'Dd, 'd");
- } else {
- Unknown(instr); // Not used by V8.
- }
- } else {
- Unknown(instr); // Not used by V8.
- }
- } else if (instr->Opc1Value() == 0x3) {
- if (instr->SzValue() == 0x1) {
- if (instr->Opc3Value() & 0x1) {
- Format(instr, "vsub'cond.f64 'Dd, 'Dn, 'Dm");
- } else {
- Format(instr, "vadd'cond.f64 'Dd, 'Dn, 'Dm");
- }
- } else {
- Unknown(instr); // Not used by V8.
- }
- } else if ((instr->Opc1Value() == 0x2) && !(instr->Opc3Value() & 0x1)) {
- if (instr->SzValue() == 0x1) {
- Format(instr, "vmul'cond.f64 'Dd, 'Dn, 'Dm");
- } else {
- Unknown(instr); // Not used by V8.
- }
- } else if ((instr->Opc1Value() == 0x0) && !(instr->Opc3Value() & 0x1)) {
- if (instr->SzValue() == 0x1) {
- Format(instr, "vmla'cond.f64 'Dd, 'Dn, 'Dm");
- } else {
- Unknown(instr); // Not used by V8.
- }
- } else if ((instr->Opc1Value() == 0x0) && (instr->Opc3Value() & 0x1)) {
- if (instr->SzValue() == 0x1) {
- Format(instr, "vmls'cond.f64 'Dd, 'Dn, 'Dm");
- } else {
- Unknown(instr); // Not used by V8.
- }
- } else if ((instr->Opc1Value() == 0x4) && !(instr->Opc3Value() & 0x1)) {
- if (instr->SzValue() == 0x1) {
- Format(instr, "vdiv'cond.f64 'Dd, 'Dn, 'Dm");
- } else {
- Unknown(instr); // Not used by V8.
- }
- } else {
- Unknown(instr); // Not used by V8.
- }
- } else {
- if ((instr->VCValue() == 0x0) &&
- (instr->VAValue() == 0x0)) {
- DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(instr);
- } else if ((instr->VLValue() == 0x0) &&
- (instr->VCValue() == 0x1) &&
- (instr->Bit(23) == 0x0)) {
- if (instr->Bit(21) == 0x0) {
- Format(instr, "vmov'cond.32 'Dd[0], 'rt");
- } else {
- Format(instr, "vmov'cond.32 'Dd[1], 'rt");
- }
- } else if ((instr->VCValue() == 0x0) &&
- (instr->VAValue() == 0x7) &&
- (instr->Bits(19, 16) == 0x1)) {
- if (instr->VLValue() == 0) {
- if (instr->Bits(15, 12) == 0xF) {
- Format(instr, "vmsr'cond FPSCR, APSR");
- } else {
- Format(instr, "vmsr'cond FPSCR, 'rt");
- }
- } else {
- if (instr->Bits(15, 12) == 0xF) {
- Format(instr, "vmrs'cond APSR, FPSCR");
- } else {
- Format(instr, "vmrs'cond 'rt, FPSCR");
- }
- }
- }
- }
-}
-
-
-void Decoder::DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(
- Instruction* instr) {
- VERIFY((instr->Bit(4) == 1) && (instr->VCValue() == 0x0) &&
- (instr->VAValue() == 0x0));
-
- bool to_arm_register = (instr->VLValue() == 0x1);
-
- if (to_arm_register) {
- Format(instr, "vmov'cond 'rt, 'Sn");
- } else {
- Format(instr, "vmov'cond 'Sn, 'rt");
- }
-}
-
-
-void Decoder::DecodeVCMP(Instruction* instr) {
- VERIFY((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7));
- VERIFY(((instr->Opc2Value() == 0x4) || (instr->Opc2Value() == 0x5)) &&
- (instr->Opc3Value() & 0x1));
-
- // Comparison.
- bool dp_operation = (instr->SzValue() == 1);
- bool raise_exception_for_qnan = (instr->Bit(7) == 0x1);
-
- if (dp_operation && !raise_exception_for_qnan) {
- if (instr->Opc2Value() == 0x4) {
- Format(instr, "vcmp'cond.f64 'Dd, 'Dm");
- } else if (instr->Opc2Value() == 0x5) {
- Format(instr, "vcmp'cond.f64 'Dd, #0.0");
- } else {
- Unknown(instr); // invalid
- }
- } else {
- Unknown(instr); // Not used by V8.
- }
-}
-
-
-void Decoder::DecodeVCVTBetweenDoubleAndSingle(Instruction* instr) {
- VERIFY((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7));
- VERIFY((instr->Opc2Value() == 0x7) && (instr->Opc3Value() == 0x3));
-
- bool double_to_single = (instr->SzValue() == 1);
-
- if (double_to_single) {
- Format(instr, "vcvt'cond.f32.f64 'Sd, 'Dm");
- } else {
- Format(instr, "vcvt'cond.f64.f32 'Dd, 'Sm");
- }
-}
-
-
-void Decoder::DecodeVCVTBetweenFloatingPointAndInteger(Instruction* instr) {
- VERIFY((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7));
- VERIFY(((instr->Opc2Value() == 0x8) && (instr->Opc3Value() & 0x1)) ||
- (((instr->Opc2Value() >> 1) == 0x6) && (instr->Opc3Value() & 0x1)));
-
- bool to_integer = (instr->Bit(18) == 1);
- bool dp_operation = (instr->SzValue() == 1);
- if (to_integer) {
- bool unsigned_integer = (instr->Bit(16) == 0);
-
- if (dp_operation) {
- if (unsigned_integer) {
- Format(instr, "vcvt'cond.u32.f64 'Sd, 'Dm");
- } else {
- Format(instr, "vcvt'cond.s32.f64 'Sd, 'Dm");
- }
- } else {
- if (unsigned_integer) {
- Format(instr, "vcvt'cond.u32.f32 'Sd, 'Sm");
- } else {
- Format(instr, "vcvt'cond.s32.f32 'Sd, 'Sm");
- }
- }
- } else {
- bool unsigned_integer = (instr->Bit(7) == 0);
-
- if (dp_operation) {
- if (unsigned_integer) {
- Format(instr, "vcvt'cond.f64.u32 'Dd, 'Sm");
- } else {
- Format(instr, "vcvt'cond.f64.s32 'Dd, 'Sm");
- }
- } else {
- if (unsigned_integer) {
- Format(instr, "vcvt'cond.f32.u32 'Sd, 'Sm");
- } else {
- Format(instr, "vcvt'cond.f32.s32 'Sd, 'Sm");
- }
- }
- }
-}
-
-
-// Decode Type 6 coprocessor instructions.
-// Dm = vmov(Rt, Rt2)
-// <Rt, Rt2> = vmov(Dm)
-// Ddst = MEM(Rbase + 4*offset).
-// MEM(Rbase + 4*offset) = Dsrc.
-void Decoder::DecodeType6CoprocessorIns(Instruction* instr) {
- VERIFY(instr->TypeValue() == 6);
-
- if (instr->CoprocessorValue() == 0xA) {
- switch (instr->OpcodeValue()) {
- case 0x8:
- case 0xA:
- if (instr->HasL()) {
- Format(instr, "vldr'cond 'Sd, ['rn - 4*'imm08@00]");
- } else {
- Format(instr, "vstr'cond 'Sd, ['rn - 4*'imm08@00]");
- }
- break;
- case 0xC:
- case 0xE:
- if (instr->HasL()) {
- Format(instr, "vldr'cond 'Sd, ['rn + 4*'imm08@00]");
- } else {
- Format(instr, "vstr'cond 'Sd, ['rn + 4*'imm08@00]");
- }
- break;
- case 0x4:
- case 0x5:
- case 0x6:
- case 0x7:
- case 0x9:
- case 0xB: {
- bool to_vfp_register = (instr->VLValue() == 0x1);
- if (to_vfp_register) {
- Format(instr, "vldm'cond'pu 'rn'w, {'Sd-'Sd+}");
- } else {
- Format(instr, "vstm'cond'pu 'rn'w, {'Sd-'Sd+}");
- }
- break;
- }
- default:
- Unknown(instr); // Not used by V8.
- }
- } else if (instr->CoprocessorValue() == 0xB) {
- switch (instr->OpcodeValue()) {
- case 0x2:
- // Load and store double to two GP registers
- if (instr->Bits(7, 6) != 0 || instr->Bit(4) != 1) {
- Unknown(instr); // Not used by V8.
- } else if (instr->HasL()) {
- Format(instr, "vmov'cond 'rt, 'rn, 'Dm");
- } else {
- Format(instr, "vmov'cond 'Dm, 'rt, 'rn");
- }
- break;
- case 0x8:
- case 0xA:
- if (instr->HasL()) {
- Format(instr, "vldr'cond 'Dd, ['rn - 4*'imm08@00]");
- } else {
- Format(instr, "vstr'cond 'Dd, ['rn - 4*'imm08@00]");
- }
- break;
- case 0xC:
- case 0xE:
- if (instr->HasL()) {
- Format(instr, "vldr'cond 'Dd, ['rn + 4*'imm08@00]");
- } else {
- Format(instr, "vstr'cond 'Dd, ['rn + 4*'imm08@00]");
- }
- break;
- case 0x4:
- case 0x5:
- case 0x6:
- case 0x7:
- case 0x9:
- case 0xB: {
- bool to_vfp_register = (instr->VLValue() == 0x1);
- if (to_vfp_register) {
- Format(instr, "vldm'cond'pu 'rn'w, {'Dd-'Dd+}");
- } else {
- Format(instr, "vstm'cond'pu 'rn'w, {'Dd-'Dd+}");
- }
- break;
- }
- default:
- Unknown(instr); // Not used by V8.
- }
- } else {
- Unknown(instr); // Not used by V8.
- }
-}
-
-#undef VERIFIY
-
-bool Decoder::IsConstantPoolAt(byte* instr_ptr) {
- int instruction_bits = *(reinterpret_cast<int*>(instr_ptr));
- return (instruction_bits & kConstantPoolMarkerMask) == kConstantPoolMarker;
-}
-
-
-int Decoder::ConstantPoolSizeAt(byte* instr_ptr) {
- if (IsConstantPoolAt(instr_ptr)) {
- int instruction_bits = *(reinterpret_cast<int*>(instr_ptr));
- return DecodeConstantPoolLength(instruction_bits);
- } else {
- return -1;
- }
-}
-
-
-// Disassemble the instruction at *instr_ptr into the output buffer.
-int Decoder::InstructionDecode(byte* instr_ptr) {
- Instruction* instr = Instruction::At(instr_ptr);
- // Print raw instruction bytes.
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "%08x ",
- instr->InstructionBits());
- if (instr->ConditionField() == kSpecialCondition) {
- Unknown(instr);
- return Instruction::kInstrSize;
- }
- int instruction_bits = *(reinterpret_cast<int*>(instr_ptr));
- if ((instruction_bits & kConstantPoolMarkerMask) == kConstantPoolMarker) {
- out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "constant pool begin (length %d)",
- DecodeConstantPoolLength(instruction_bits));
- return Instruction::kInstrSize;
- }
- switch (instr->TypeValue()) {
- case 0:
- case 1: {
- DecodeType01(instr);
- break;
- }
- case 2: {
- DecodeType2(instr);
- break;
- }
- case 3: {
- DecodeType3(instr);
- break;
- }
- case 4: {
- DecodeType4(instr);
- break;
- }
- case 5: {
- DecodeType5(instr);
- break;
- }
- case 6: {
- DecodeType6(instr);
- break;
- }
- case 7: {
- return DecodeType7(instr);
- }
- default: {
- // The type field is 3-bits in the ARM encoding.
- UNREACHABLE();
- break;
- }
- }
- return Instruction::kInstrSize;
-}
-
-
-} } // namespace v8::internal
-
-
-
-//------------------------------------------------------------------------------
-
-namespace disasm {
-
-
-const char* NameConverter::NameOfAddress(byte* addr) const {
- v8::internal::OS::SNPrintF(tmp_buffer_, "%p", addr);
- return tmp_buffer_.start();
-}
-
-
-const char* NameConverter::NameOfConstant(byte* addr) const {
- return NameOfAddress(addr);
-}
-
-
-const char* NameConverter::NameOfCPURegister(int reg) const {
- return v8::internal::Registers::Name(reg);
-}
-
-
-const char* NameConverter::NameOfByteCPURegister(int reg) const {
- UNREACHABLE(); // ARM does not have the concept of a byte register
- return "nobytereg";
-}
-
-
-const char* NameConverter::NameOfXMMRegister(int reg) const {
- UNREACHABLE(); // ARM does not have any XMM registers
- return "noxmmreg";
-}
-
-
-const char* NameConverter::NameInCode(byte* addr) const {
- // The default name converter is called for unknown code. So we will not try
- // to access any memory.
- return "";
-}
-
-
-//------------------------------------------------------------------------------
-
-Disassembler::Disassembler(const NameConverter& converter)
- : converter_(converter) {}
-
-
-Disassembler::~Disassembler() {}
-
-
-int Disassembler::InstructionDecode(v8::internal::Vector<char> buffer,
- byte* instruction) {
- v8::internal::Decoder d(converter_, buffer);
- return d.InstructionDecode(instruction);
-}
-
-
-int Disassembler::ConstantPoolSizeAt(byte* instruction) {
- return v8::internal::Decoder::ConstantPoolSizeAt(instruction);
-}
-
-
-void Disassembler::Disassemble(FILE* f, byte* begin, byte* end) {
- NameConverter converter;
- Disassembler d(converter);
- for (byte* pc = begin; pc < end;) {
- v8::internal::EmbeddedVector<char, 128> buffer;
- buffer[0] = '\0';
- byte* prev_pc = pc;
- pc += d.InstructionDecode(buffer, pc);
- fprintf(f, "%p %08x %s\n",
- prev_pc, *reinterpret_cast<int32_t*>(prev_pc), buffer.start());
- }
-}
-
-
-} // namespace disasm
-
-#endif // V8_TARGET_ARCH_ARM
diff --git a/src/3rdparty/v8/src/arm/frames-arm.cc b/src/3rdparty/v8/src/arm/frames-arm.cc
deleted file mode 100644
index a805d28..0000000
--- a/src/3rdparty/v8/src/arm/frames-arm.cc
+++ /dev/null
@@ -1,45 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_ARM)
-
-#include "frames-inl.h"
-
-namespace v8 {
-namespace internal {
-
-
-Address ExitFrame::ComputeStackPointer(Address fp) {
- return Memory::Address_at(fp + ExitFrameConstants::kSPOffset);
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_ARM
diff --git a/src/3rdparty/v8/src/arm/frames-arm.h b/src/3rdparty/v8/src/arm/frames-arm.h
deleted file mode 100644
index ee9fc0e..0000000
--- a/src/3rdparty/v8/src/arm/frames-arm.h
+++ /dev/null
@@ -1,172 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_ARM_FRAMES_ARM_H_
-#define V8_ARM_FRAMES_ARM_H_
-
-namespace v8 {
-namespace internal {
-
-
-// The ARM ABI does not specify the usage of register r9, which may be reserved
-// as the static base or thread register on some platforms, in which case we
-// leave it alone. Adjust the value of kR9Available accordingly:
-const int kR9Available = 1; // 1 if available to us, 0 if reserved
-
-
-// Register list in load/store instructions
-// Note that the bit values must match those used in actual instruction encoding
-const int kNumRegs = 16;
-
-
-// Caller-saved/arguments registers
-const RegList kJSCallerSaved =
- 1 << 0 | // r0 a1
- 1 << 1 | // r1 a2
- 1 << 2 | // r2 a3
- 1 << 3; // r3 a4
-
-const int kNumJSCallerSaved = 4;
-
-typedef Object* JSCallerSavedBuffer[kNumJSCallerSaved];
-
-// Return the code of the n-th caller-saved register available to JavaScript
-// e.g. JSCallerSavedReg(0) returns r0.code() == 0
-int JSCallerSavedCode(int n);
-
-
-// Callee-saved registers preserved when switching from C to JavaScript
-const RegList kCalleeSaved =
- 1 << 4 | // r4 v1
- 1 << 5 | // r5 v2
- 1 << 6 | // r6 v3
- 1 << 7 | // r7 v4
- 1 << 8 | // r8 v5 (cp in JavaScript code)
- kR9Available << 9 | // r9 v6
- 1 << 10 | // r10 v7
- 1 << 11; // r11 v8 (fp in JavaScript code)
-
-// When calling into C++ (only for C++ calls that can't cause a GC).
-// The call code will take care of lr, fp, etc.
-const RegList kCallerSaved =
- 1 << 0 | // r0
- 1 << 1 | // r1
- 1 << 2 | // r2
- 1 << 3 | // r3
- 1 << 9; // r9
-
-
-const int kNumCalleeSaved = 7 + kR9Available;
-
-// Double registers d8 to d15 are callee-saved.
-const int kNumDoubleCalleeSaved = 8;
-
-
-// Number of registers for which space is reserved in safepoints. Must be a
-// multiple of 8.
-// TODO(regis): Only 8 registers may actually be sufficient. Revisit.
-const int kNumSafepointRegisters = 16;
-
-// Define the list of registers actually saved at safepoints.
-// Note that the number of saved registers may be smaller than the reserved
-// space, i.e. kNumSafepointSavedRegisters <= kNumSafepointRegisters.
-const RegList kSafepointSavedRegisters = kJSCallerSaved | kCalleeSaved;
-const int kNumSafepointSavedRegisters = kNumJSCallerSaved + kNumCalleeSaved;
-
-// ----------------------------------------------------
-
-
-class StackHandlerConstants : public AllStatic {
- public:
- static const int kNextOffset = 0 * kPointerSize;
- static const int kCodeOffset = 1 * kPointerSize;
- static const int kStateOffset = 2 * kPointerSize;
- static const int kContextOffset = 3 * kPointerSize;
- static const int kFPOffset = 4 * kPointerSize;
-
- static const int kSize = kFPOffset + kPointerSize;
-};
-
-
-class EntryFrameConstants : public AllStatic {
- public:
- static const int kCallerFPOffset = -3 * kPointerSize;
-};
-
-
-class ExitFrameConstants : public AllStatic {
- public:
- static const int kCodeOffset = -2 * kPointerSize;
- static const int kSPOffset = -1 * kPointerSize;
-
- // The caller fields are below the frame pointer on the stack.
- static const int kCallerFPOffset = 0 * kPointerSize;
- // The calling JS function is below FP.
- static const int kCallerPCOffset = 1 * kPointerSize;
-
- // FP-relative displacement of the caller's SP. It points just
- // below the saved PC.
- static const int kCallerSPDisplacement = 2 * kPointerSize;
-};
-
-
-class JavaScriptFrameConstants : public AllStatic {
- public:
- // FP-relative.
- static const int kLocal0Offset = StandardFrameConstants::kExpressionsOffset;
- static const int kLastParameterOffset = +2 * kPointerSize;
- static const int kFunctionOffset = StandardFrameConstants::kMarkerOffset;
-
- // Caller SP-relative.
- static const int kParam0Offset = -2 * kPointerSize;
- static const int kReceiverOffset = -1 * kPointerSize;
-};
-
-
-class ArgumentsAdaptorFrameConstants : public AllStatic {
- public:
- static const int kLengthOffset = StandardFrameConstants::kExpressionsOffset;
- static const int kFrameSize =
- StandardFrameConstants::kFixedFrameSize + kPointerSize;
-};
-
-
-class InternalFrameConstants : public AllStatic {
- public:
- static const int kCodeOffset = StandardFrameConstants::kExpressionsOffset;
-};
-
-
-inline Object* JavaScriptFrame::function_slot_object() const {
- const int offset = JavaScriptFrameConstants::kFunctionOffset;
- return Memory::Object_at(fp() + offset);
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_ARM_FRAMES_ARM_H_
diff --git a/src/3rdparty/v8/src/arm/full-codegen-arm.cc b/src/3rdparty/v8/src/arm/full-codegen-arm.cc
deleted file mode 100644
index 36580c7..0000000
--- a/src/3rdparty/v8/src/arm/full-codegen-arm.cc
+++ /dev/null
@@ -1,4622 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_ARM)
-
-#include "code-stubs.h"
-#include "codegen.h"
-#include "compiler.h"
-#include "debug.h"
-#include "full-codegen.h"
-#include "isolate-inl.h"
-#include "parser.h"
-#include "scopes.h"
-#include "stub-cache.h"
-
-#include "arm/code-stubs-arm.h"
-#include "arm/macro-assembler-arm.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm_)
-
-
-// A patch site is a location in the code which it is possible to patch. This
-// class has a number of methods to emit the code which is patchable and the
-// method EmitPatchInfo to record a marker back to the patchable code. This
-// marker is a cmp rx, #yyy instruction, and x * 0x00000fff + yyy (raw 12 bit
-// immediate value is used) is the delta from the pc to the first instruction of
-// the patchable code.
-class JumpPatchSite BASE_EMBEDDED {
- public:
- explicit JumpPatchSite(MacroAssembler* masm) : masm_(masm) {
-#ifdef DEBUG
- info_emitted_ = false;
-#endif
- }
-
- ~JumpPatchSite() {
- ASSERT(patch_site_.is_bound() == info_emitted_);
- }
-
- // When initially emitting this ensure that a jump is always generated to skip
- // the inlined smi code.
- void EmitJumpIfNotSmi(Register reg, Label* target) {
- ASSERT(!patch_site_.is_bound() && !info_emitted_);
- Assembler::BlockConstPoolScope block_const_pool(masm_);
- __ bind(&patch_site_);
- __ cmp(reg, Operand(reg));
- __ b(eq, target); // Always taken before patched.
- }
-
- // When initially emitting this ensure that a jump is never generated to skip
- // the inlined smi code.
- void EmitJumpIfSmi(Register reg, Label* target) {
- ASSERT(!patch_site_.is_bound() && !info_emitted_);
- Assembler::BlockConstPoolScope block_const_pool(masm_);
- __ bind(&patch_site_);
- __ cmp(reg, Operand(reg));
- __ b(ne, target); // Never taken before patched.
- }
-
- void EmitPatchInfo() {
- // Block literal pool emission whilst recording patch site information.
- Assembler::BlockConstPoolScope block_const_pool(masm_);
- if (patch_site_.is_bound()) {
- int delta_to_patch_site = masm_->InstructionsGeneratedSince(&patch_site_);
- Register reg;
- reg.set_code(delta_to_patch_site / kOff12Mask);
- __ cmp_raw_immediate(reg, delta_to_patch_site % kOff12Mask);
-#ifdef DEBUG
- info_emitted_ = true;
-#endif
- } else {
- __ nop(); // Signals no inlined code.
- }
- }
-
- private:
- MacroAssembler* masm_;
- Label patch_site_;
-#ifdef DEBUG
- bool info_emitted_;
-#endif
-};
-
-
-// Generate code for a JS function. On entry to the function the receiver
-// and arguments have been pushed on the stack left to right. The actual
-// argument count matches the formal parameter count expected by the
-// function.
-//
-// The live registers are:
-// o r1: the JS function object being called (i.e., ourselves)
-// o cp: our context
-// o fp: our caller's frame pointer
-// o sp: stack pointer
-// o lr: return address
-//
-// The function builds a JS frame. Please see JavaScriptFrameConstants in
-// frames-arm.h for its layout.
-void FullCodeGenerator::Generate() {
- CompilationInfo* info = info_;
- handler_table_ =
- isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED);
- profiling_counter_ = isolate()->factory()->NewJSGlobalPropertyCell(
- Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate()));
- SetFunctionPosition(function());
- Comment cmnt(masm_, "[ function compiled by full code generator");
-
- ProfileEntryHookStub::MaybeCallEntryHook(masm_);
-
-#ifdef DEBUG
- if (strlen(FLAG_stop_at) > 0 &&
- info->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
- __ stop("stop-at");
- }
-#endif
-
- // Strict mode functions and builtins need to replace the receiver
- // with undefined when called as functions (without an explicit
- // receiver object). r5 is zero for method calls and non-zero for
- // function calls.
- if (!info->is_classic_mode() || info->is_native()) {
- Label ok;
- __ cmp(r5, Operand::Zero());
- __ b(eq, &ok);
- int receiver_offset = info->scope()->num_parameters() * kPointerSize;
- __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
- __ str(r2, MemOperand(sp, receiver_offset));
- __ bind(&ok);
- }
-
- // Open a frame scope to indicate that there is a frame on the stack. The
- // MANUAL indicates that the scope shouldn't actually generate code to set up
- // the frame (that is done below).
- FrameScope frame_scope(masm_, StackFrame::MANUAL);
-
- int locals_count = info->scope()->num_stack_slots();
-
- info->set_prologue_offset(masm_->pc_offset());
- {
- PredictableCodeSizeScope predictible_code_size_scope(
- masm_, kNoCodeAgeSequenceLength * Assembler::kInstrSize);
- // The following three instructions must remain together and unmodified
- // for code aging to work properly.
- __ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
- // Load undefined value here, so the value is ready for the loop
- // below.
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- // Adjust FP to point to saved FP.
- __ add(fp, sp, Operand(2 * kPointerSize));
- }
-
- { Comment cmnt(masm_, "[ Allocate locals");
- for (int i = 0; i < locals_count; i++) {
- __ push(ip);
- }
- }
-
- bool function_in_register = true;
-
- // Possibly allocate a local context.
- int heap_slots = info->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
- if (heap_slots > 0 ||
- (scope()->is_qml_mode() && scope()->is_global_scope())) {
- // Argument to NewContext is the function, which is still in r1.
- Comment cmnt(masm_, "[ Allocate context");
- __ push(r1);
- if (FLAG_harmony_scoping && info->scope()->is_global_scope()) {
- __ Push(info->scope()->GetScopeInfo());
- __ CallRuntime(Runtime::kNewGlobalContext, 2);
- } else if (heap_slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub((heap_slots < 0) ? 0 : heap_slots);
- __ CallStub(&stub);
- } else {
- __ CallRuntime(Runtime::kNewFunctionContext, 1);
- }
- function_in_register = false;
- // Context is returned in both r0 and cp. It replaces the context
- // passed to us. It's saved in the stack and kept live in cp.
- __ str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- // Copy any necessary parameters into the context.
- int num_parameters = info->scope()->num_parameters();
- for (int i = 0; i < num_parameters; i++) {
- Variable* var = scope()->parameter(i);
- if (var->IsContextSlot()) {
- int parameter_offset = StandardFrameConstants::kCallerSPOffset +
- (num_parameters - 1 - i) * kPointerSize;
- // Load parameter from stack.
- __ ldr(r0, MemOperand(fp, parameter_offset));
- // Store it in the context.
- MemOperand target = ContextOperand(cp, var->index());
- __ str(r0, target);
-
- // Update the write barrier.
- __ RecordWriteContextSlot(
- cp, target.offset(), r0, r3, kLRHasBeenSaved, kDontSaveFPRegs);
- }
- }
- }
-
- Variable* arguments = scope()->arguments();
- if (arguments != NULL) {
- // Function uses arguments object.
- Comment cmnt(masm_, "[ Allocate arguments object");
- if (!function_in_register) {
- // Load this again, if it's used by the local context below.
- __ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- } else {
- __ mov(r3, r1);
- }
- // Receiver is just before the parameters on the caller's stack.
- int num_parameters = info->scope()->num_parameters();
- int offset = num_parameters * kPointerSize;
- __ add(r2, fp,
- Operand(StandardFrameConstants::kCallerSPOffset + offset));
- __ mov(r1, Operand(Smi::FromInt(num_parameters)));
- __ Push(r3, r2, r1);
-
- // Arguments to ArgumentsAccessStub:
- // function, receiver address, parameter count.
- // The stub will rewrite receiever and parameter count if the previous
- // stack frame was an arguments adapter frame.
- ArgumentsAccessStub::Type type;
- if (!is_classic_mode()) {
- type = ArgumentsAccessStub::NEW_STRICT;
- } else if (function()->has_duplicate_parameters()) {
- type = ArgumentsAccessStub::NEW_NON_STRICT_SLOW;
- } else {
- type = ArgumentsAccessStub::NEW_NON_STRICT_FAST;
- }
- ArgumentsAccessStub stub(type);
- __ CallStub(&stub);
-
- SetVar(arguments, r0, r1, r2);
- }
-
- if (FLAG_trace) {
- __ CallRuntime(Runtime::kTraceEnter, 0);
- }
-
- // Visit the declarations and body unless there is an illegal
- // redeclaration.
- if (scope()->HasIllegalRedeclaration()) {
- Comment cmnt(masm_, "[ Declarations");
- scope()->VisitIllegalRedeclaration(this);
-
- } else {
- PrepareForBailoutForId(BailoutId::FunctionEntry(), NO_REGISTERS);
- { Comment cmnt(masm_, "[ Declarations");
- // For named function expressions, declare the function name as a
- // constant.
- if (scope()->is_function_scope() && scope()->function() != NULL) {
- VariableDeclaration* function = scope()->function();
- ASSERT(function->proxy()->var()->mode() == CONST ||
- function->proxy()->var()->mode() == CONST_HARMONY);
- ASSERT(function->proxy()->var()->location() != Variable::UNALLOCATED);
- VisitVariableDeclaration(function);
- }
- VisitDeclarations(scope()->declarations());
- }
-
- { Comment cmnt(masm_, "[ Stack check");
- PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS);
- Label ok;
- __ LoadRoot(ip, Heap::kStackLimitRootIndex);
- __ cmp(sp, Operand(ip));
- __ b(hs, &ok);
- PredictableCodeSizeScope predictable(masm_, 2 * Assembler::kInstrSize);
- StackCheckStub stub;
- __ CallStub(&stub);
- __ bind(&ok);
- }
-
- { Comment cmnt(masm_, "[ Body");
- ASSERT(loop_depth() == 0);
- VisitStatements(function()->body());
- ASSERT(loop_depth() == 0);
- }
- }
-
- // Always emit a 'return undefined' in case control fell off the end of
- // the body.
- { Comment cmnt(masm_, "[ return <undefined>;");
- __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
- }
- EmitReturnSequence();
-
- // Force emit the constant pool, so it doesn't get emitted in the middle
- // of the stack check table.
- masm()->CheckConstPool(true, false);
-}
-
-
-void FullCodeGenerator::ClearAccumulator() {
- __ mov(r0, Operand(Smi::FromInt(0)));
-}
-
-
-void FullCodeGenerator::EmitProfilingCounterDecrement(int delta) {
- __ mov(r2, Operand(profiling_counter_));
- __ ldr(r3, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset));
- __ sub(r3, r3, Operand(Smi::FromInt(delta)), SetCC);
- __ str(r3, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset));
-}
-
-
-void FullCodeGenerator::EmitProfilingCounterReset() {
- int reset_value = FLAG_interrupt_budget;
- if (info_->ShouldSelfOptimize() && !FLAG_retry_self_opt) {
- // Self-optimization is a one-off thing: if it fails, don't try again.
- reset_value = Smi::kMaxValue;
- }
- if (isolate()->IsDebuggerActive()) {
- // Detect debug break requests as soon as possible.
- reset_value = FLAG_interrupt_budget >> 4;
- }
- __ mov(r2, Operand(profiling_counter_));
- __ mov(r3, Operand(Smi::FromInt(reset_value)));
- __ str(r3, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset));
-}
-
-
-void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt,
- Label* back_edge_target) {
- Comment cmnt(masm_, "[ Back edge bookkeeping");
- // Block literal pools whilst emitting stack check code.
- Assembler::BlockConstPoolScope block_const_pool(masm_);
- Label ok;
-
- int weight = 1;
- if (FLAG_weighted_back_edges) {
- ASSERT(back_edge_target->is_bound());
- int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
- weight = Min(kMaxBackEdgeWeight,
- Max(1, distance / kBackEdgeDistanceUnit));
- }
- EmitProfilingCounterDecrement(weight);
- __ b(pl, &ok);
- InterruptStub stub;
- __ CallStub(&stub);
-
- // Record a mapping of this PC offset to the OSR id. This is used to find
- // the AST id from the unoptimized code in order to use it as a key into
- // the deoptimization input data found in the optimized code.
- RecordBackEdge(stmt->OsrEntryId());
-
- EmitProfilingCounterReset();
-
- __ bind(&ok);
- PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
- // Record a mapping of the OSR id to this PC. This is used if the OSR
- // entry becomes the target of a bailout. We don't expect it to be, but
- // we want it to work if it is.
- PrepareForBailoutForId(stmt->OsrEntryId(), NO_REGISTERS);
-}
-
-
-void FullCodeGenerator::EmitReturnSequence() {
- Comment cmnt(masm_, "[ Return sequence");
- if (return_label_.is_bound()) {
- __ b(&return_label_);
- } else {
- __ bind(&return_label_);
- if (FLAG_trace) {
- // Push the return value on the stack as the parameter.
- // Runtime::TraceExit returns its parameter in r0.
- __ push(r0);
- __ CallRuntime(Runtime::kTraceExit, 1);
- }
- if (FLAG_interrupt_at_exit || FLAG_self_optimization) {
- // Pretend that the exit is a backwards jump to the entry.
- int weight = 1;
- if (info_->ShouldSelfOptimize()) {
- weight = FLAG_interrupt_budget / FLAG_self_opt_count;
- } else if (FLAG_weighted_back_edges) {
- int distance = masm_->pc_offset();
- weight = Min(kMaxBackEdgeWeight,
- Max(1, distance / kBackEdgeDistanceUnit));
- }
- EmitProfilingCounterDecrement(weight);
- Label ok;
- __ b(pl, &ok);
- __ push(r0);
- if (info_->ShouldSelfOptimize() && FLAG_direct_self_opt) {
- __ ldr(r2, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ push(r2);
- __ CallRuntime(Runtime::kOptimizeFunctionOnNextCall, 1);
- } else {
- InterruptStub stub;
- __ CallStub(&stub);
- }
- __ pop(r0);
- EmitProfilingCounterReset();
- __ bind(&ok);
- }
-
-#ifdef DEBUG
- // Add a label for checking the size of the code used for returning.
- Label check_exit_codesize;
- masm_->bind(&check_exit_codesize);
-#endif
- // Make sure that the constant pool is not emitted inside of the return
- // sequence.
- { Assembler::BlockConstPoolScope block_const_pool(masm_);
- // Here we use masm_-> instead of the __ macro to avoid the code coverage
- // tool from instrumenting as we rely on the code size here.
- int32_t sp_delta = (info_->scope()->num_parameters() + 1) * kPointerSize;
- CodeGenerator::RecordPositions(masm_, function()->end_position() - 1);
- // TODO(svenpanne) The code below is sometimes 4 words, sometimes 5!
- PredictableCodeSizeScope predictable(masm_, -1);
- __ RecordJSReturn();
- masm_->mov(sp, fp);
- masm_->ldm(ia_w, sp, fp.bit() | lr.bit());
- masm_->add(sp, sp, Operand(sp_delta));
- masm_->Jump(lr);
- }
-
-#ifdef DEBUG
- // Check that the size of the code used for returning is large enough
- // for the debugger's requirements.
- ASSERT(Assembler::kJSReturnSequenceInstructions <=
- masm_->InstructionsGeneratedSince(&check_exit_codesize));
-#endif
- }
-}
-
-
-void FullCodeGenerator::EffectContext::Plug(Variable* var) const {
- ASSERT(var->IsStackAllocated() || var->IsContextSlot());
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::Plug(Variable* var) const {
- ASSERT(var->IsStackAllocated() || var->IsContextSlot());
- codegen()->GetVar(result_register(), var);
-}
-
-
-void FullCodeGenerator::StackValueContext::Plug(Variable* var) const {
- ASSERT(var->IsStackAllocated() || var->IsContextSlot());
- codegen()->GetVar(result_register(), var);
- __ push(result_register());
-}
-
-
-void FullCodeGenerator::TestContext::Plug(Variable* var) const {
- ASSERT(var->IsStackAllocated() || var->IsContextSlot());
- // For simplicity we always test the accumulator register.
- codegen()->GetVar(result_register(), var);
- codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
- codegen()->DoTest(this);
-}
-
-
-void FullCodeGenerator::EffectContext::Plug(Heap::RootListIndex index) const {
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::Plug(
- Heap::RootListIndex index) const {
- __ LoadRoot(result_register(), index);
-}
-
-
-void FullCodeGenerator::StackValueContext::Plug(
- Heap::RootListIndex index) const {
- __ LoadRoot(result_register(), index);
- __ push(result_register());
-}
-
-
-void FullCodeGenerator::TestContext::Plug(Heap::RootListIndex index) const {
- codegen()->PrepareForBailoutBeforeSplit(condition(),
- true,
- true_label_,
- false_label_);
- if (index == Heap::kUndefinedValueRootIndex ||
- index == Heap::kNullValueRootIndex ||
- index == Heap::kFalseValueRootIndex) {
- if (false_label_ != fall_through_) __ b(false_label_);
- } else if (index == Heap::kTrueValueRootIndex) {
- if (true_label_ != fall_through_) __ b(true_label_);
- } else {
- __ LoadRoot(result_register(), index);
- codegen()->DoTest(this);
- }
-}
-
-
-void FullCodeGenerator::EffectContext::Plug(Handle<Object> lit) const {
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::Plug(
- Handle<Object> lit) const {
- __ mov(result_register(), Operand(lit));
-}
-
-
-void FullCodeGenerator::StackValueContext::Plug(Handle<Object> lit) const {
- // Immediates cannot be pushed directly.
- __ mov(result_register(), Operand(lit));
- __ push(result_register());
-}
-
-
-void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
- codegen()->PrepareForBailoutBeforeSplit(condition(),
- true,
- true_label_,
- false_label_);
- ASSERT(!lit->IsUndetectableObject()); // There are no undetectable literals.
- if (lit->IsUndefined() || lit->IsNull() || lit->IsFalse()) {
- if (false_label_ != fall_through_) __ b(false_label_);
- } else if (lit->IsTrue() || lit->IsJSObject()) {
- if (true_label_ != fall_through_) __ b(true_label_);
- } else if (lit->IsString()) {
- if (String::cast(*lit)->length() == 0) {
- if (false_label_ != fall_through_) __ b(false_label_);
- } else {
- if (true_label_ != fall_through_) __ b(true_label_);
- }
- } else if (lit->IsSmi()) {
- if (Smi::cast(*lit)->value() == 0) {
- if (false_label_ != fall_through_) __ b(false_label_);
- } else {
- if (true_label_ != fall_through_) __ b(true_label_);
- }
- } else {
- // For simplicity we always test the accumulator register.
- __ mov(result_register(), Operand(lit));
- codegen()->DoTest(this);
- }
-}
-
-
-void FullCodeGenerator::EffectContext::DropAndPlug(int count,
- Register reg) const {
- ASSERT(count > 0);
- __ Drop(count);
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::DropAndPlug(
- int count,
- Register reg) const {
- ASSERT(count > 0);
- __ Drop(count);
- __ Move(result_register(), reg);
-}
-
-
-void FullCodeGenerator::StackValueContext::DropAndPlug(int count,
- Register reg) const {
- ASSERT(count > 0);
- if (count > 1) __ Drop(count - 1);
- __ str(reg, MemOperand(sp, 0));
-}
-
-
-void FullCodeGenerator::TestContext::DropAndPlug(int count,
- Register reg) const {
- ASSERT(count > 0);
- // For simplicity we always test the accumulator register.
- __ Drop(count);
- __ Move(result_register(), reg);
- codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
- codegen()->DoTest(this);
-}
-
-
-void FullCodeGenerator::EffectContext::Plug(Label* materialize_true,
- Label* materialize_false) const {
- ASSERT(materialize_true == materialize_false);
- __ bind(materialize_true);
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::Plug(
- Label* materialize_true,
- Label* materialize_false) const {
- Label done;
- __ bind(materialize_true);
- __ LoadRoot(result_register(), Heap::kTrueValueRootIndex);
- __ jmp(&done);
- __ bind(materialize_false);
- __ LoadRoot(result_register(), Heap::kFalseValueRootIndex);
- __ bind(&done);
-}
-
-
-void FullCodeGenerator::StackValueContext::Plug(
- Label* materialize_true,
- Label* materialize_false) const {
- Label done;
- __ bind(materialize_true);
- __ LoadRoot(ip, Heap::kTrueValueRootIndex);
- __ push(ip);
- __ jmp(&done);
- __ bind(materialize_false);
- __ LoadRoot(ip, Heap::kFalseValueRootIndex);
- __ push(ip);
- __ bind(&done);
-}
-
-
-void FullCodeGenerator::TestContext::Plug(Label* materialize_true,
- Label* materialize_false) const {
- ASSERT(materialize_true == true_label_);
- ASSERT(materialize_false == false_label_);
-}
-
-
-void FullCodeGenerator::EffectContext::Plug(bool flag) const {
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::Plug(bool flag) const {
- Heap::RootListIndex value_root_index =
- flag ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex;
- __ LoadRoot(result_register(), value_root_index);
-}
-
-
-void FullCodeGenerator::StackValueContext::Plug(bool flag) const {
- Heap::RootListIndex value_root_index =
- flag ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex;
- __ LoadRoot(ip, value_root_index);
- __ push(ip);
-}
-
-
-void FullCodeGenerator::TestContext::Plug(bool flag) const {
- codegen()->PrepareForBailoutBeforeSplit(condition(),
- true,
- true_label_,
- false_label_);
- if (flag) {
- if (true_label_ != fall_through_) __ b(true_label_);
- } else {
- if (false_label_ != fall_through_) __ b(false_label_);
- }
-}
-
-
-void FullCodeGenerator::DoTest(Expression* condition,
- Label* if_true,
- Label* if_false,
- Label* fall_through) {
- ToBooleanStub stub(result_register());
- __ CallStub(&stub, condition->test_id());
- __ tst(result_register(), result_register());
- Split(ne, if_true, if_false, fall_through);
-}
-
-
-void FullCodeGenerator::Split(Condition cond,
- Label* if_true,
- Label* if_false,
- Label* fall_through) {
- if (if_false == fall_through) {
- __ b(cond, if_true);
- } else if (if_true == fall_through) {
- __ b(NegateCondition(cond), if_false);
- } else {
- __ b(cond, if_true);
- __ b(if_false);
- }
-}
-
-
-MemOperand FullCodeGenerator::StackOperand(Variable* var) {
- ASSERT(var->IsStackAllocated());
- // Offset is negative because higher indexes are at lower addresses.
- int offset = -var->index() * kPointerSize;
- // Adjust by a (parameter or local) base offset.
- if (var->IsParameter()) {
- offset += (info_->scope()->num_parameters() + 1) * kPointerSize;
- } else {
- offset += JavaScriptFrameConstants::kLocal0Offset;
- }
- return MemOperand(fp, offset);
-}
-
-
-MemOperand FullCodeGenerator::VarOperand(Variable* var, Register scratch) {
- ASSERT(var->IsContextSlot() || var->IsStackAllocated());
- if (var->IsContextSlot()) {
- int context_chain_length = scope()->ContextChainLength(var->scope());
- __ LoadContext(scratch, context_chain_length);
- return ContextOperand(scratch, var->index());
- } else {
- return StackOperand(var);
- }
-}
-
-
-void FullCodeGenerator::GetVar(Register dest, Variable* var) {
- // Use destination as scratch.
- MemOperand location = VarOperand(var, dest);
- __ ldr(dest, location);
-}
-
-
-void FullCodeGenerator::SetVar(Variable* var,
- Register src,
- Register scratch0,
- Register scratch1) {
- ASSERT(var->IsContextSlot() || var->IsStackAllocated());
- ASSERT(!scratch0.is(src));
- ASSERT(!scratch0.is(scratch1));
- ASSERT(!scratch1.is(src));
- MemOperand location = VarOperand(var, scratch0);
- __ str(src, location);
-
- // Emit the write barrier code if the location is in the heap.
- if (var->IsContextSlot()) {
- __ RecordWriteContextSlot(scratch0,
- location.offset(),
- src,
- scratch1,
- kLRHasBeenSaved,
- kDontSaveFPRegs);
- }
-}
-
-
-void FullCodeGenerator::PrepareForBailoutBeforeSplit(Expression* expr,
- bool should_normalize,
- Label* if_true,
- Label* if_false) {
- // Only prepare for bailouts before splits if we're in a test
- // context. Otherwise, we let the Visit function deal with the
- // preparation to avoid preparing with the same AST id twice.
- if (!context()->IsTest() || !info_->IsOptimizable()) return;
-
- Label skip;
- if (should_normalize) __ b(&skip);
- PrepareForBailout(expr, TOS_REG);
- if (should_normalize) {
- __ LoadRoot(ip, Heap::kTrueValueRootIndex);
- __ cmp(r0, ip);
- Split(eq, if_true, if_false, NULL);
- __ bind(&skip);
- }
-}
-
-
-void FullCodeGenerator::EmitDebugCheckDeclarationContext(Variable* variable) {
- // The variable in the declaration always resides in the current function
- // context.
- ASSERT_EQ(0, scope()->ContextChainLength(variable->scope()));
- if (generate_debug_code_) {
- // Check that we're not inside a with or catch context.
- __ ldr(r1, FieldMemOperand(cp, HeapObject::kMapOffset));
- __ CompareRoot(r1, Heap::kWithContextMapRootIndex);
- __ Check(ne, "Declaration in with context.");
- __ CompareRoot(r1, Heap::kCatchContextMapRootIndex);
- __ Check(ne, "Declaration in catch context.");
- }
-}
-
-
-void FullCodeGenerator::VisitVariableDeclaration(
- VariableDeclaration* declaration) {
- // If it was not possible to allocate the variable at compile time, we
- // need to "declare" it at runtime to make sure it actually exists in the
- // local context.
- VariableProxy* proxy = declaration->proxy();
- VariableMode mode = declaration->mode();
- Variable* variable = proxy->var();
- bool hole_init = mode == CONST || mode == CONST_HARMONY || mode == LET;
- switch (variable->location()) {
- case Variable::UNALLOCATED:
- globals_->Add(variable->name(), zone());
- globals_->Add(variable->binding_needs_init()
- ? isolate()->factory()->the_hole_value()
- : isolate()->factory()->undefined_value(),
- zone());
- globals_->Add(isolate()->factory()->ToBoolean(variable->is_qml_global()),
- zone());
- break;
-
- case Variable::PARAMETER:
- case Variable::LOCAL:
- if (hole_init) {
- Comment cmnt(masm_, "[ VariableDeclaration");
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ str(ip, StackOperand(variable));
- }
- break;
-
- case Variable::CONTEXT:
- if (hole_init) {
- Comment cmnt(masm_, "[ VariableDeclaration");
- EmitDebugCheckDeclarationContext(variable);
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ str(ip, ContextOperand(cp, variable->index()));
- // No write barrier since the_hole_value is in old space.
- PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
- }
- break;
-
- case Variable::LOOKUP: {
- Comment cmnt(masm_, "[ VariableDeclaration");
- __ mov(r2, Operand(variable->name()));
- // Declaration nodes are always introduced in one of four modes.
- ASSERT(IsDeclaredVariableMode(mode));
- PropertyAttributes attr =
- IsImmutableVariableMode(mode) ? READ_ONLY : NONE;
- __ mov(r1, Operand(Smi::FromInt(attr)));
- // Push initial value, if any.
- // Note: For variables we must not push an initial value (such as
- // 'undefined') because we may have a (legal) redeclaration and we
- // must not destroy the current value.
- if (hole_init) {
- __ LoadRoot(r0, Heap::kTheHoleValueRootIndex);
- __ Push(cp, r2, r1, r0);
- } else {
- __ mov(r0, Operand(Smi::FromInt(0))); // Indicates no initial value.
- __ Push(cp, r2, r1, r0);
- }
- __ CallRuntime(Runtime::kDeclareContextSlot, 4);
- break;
- }
- }
-}
-
-
-void FullCodeGenerator::VisitFunctionDeclaration(
- FunctionDeclaration* declaration) {
- VariableProxy* proxy = declaration->proxy();
- Variable* variable = proxy->var();
- switch (variable->location()) {
- case Variable::UNALLOCATED: {
- globals_->Add(variable->name(), zone());
- Handle<SharedFunctionInfo> function =
- Compiler::BuildFunctionInfo(declaration->fun(), script());
- // Check for stack-overflow exception.
- if (function.is_null()) return SetStackOverflow();
- globals_->Add(function, zone());
- globals_->Add(isolate()->factory()->ToBoolean(variable->is_qml_global()),
- zone());
- break;
- }
-
- case Variable::PARAMETER:
- case Variable::LOCAL: {
- Comment cmnt(masm_, "[ FunctionDeclaration");
- VisitForAccumulatorValue(declaration->fun());
- __ str(result_register(), StackOperand(variable));
- break;
- }
-
- case Variable::CONTEXT: {
- Comment cmnt(masm_, "[ FunctionDeclaration");
- EmitDebugCheckDeclarationContext(variable);
- VisitForAccumulatorValue(declaration->fun());
- __ str(result_register(), ContextOperand(cp, variable->index()));
- int offset = Context::SlotOffset(variable->index());
- // We know that we have written a function, which is not a smi.
- __ RecordWriteContextSlot(cp,
- offset,
- result_register(),
- r2,
- kLRHasBeenSaved,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
- break;
- }
-
- case Variable::LOOKUP: {
- Comment cmnt(masm_, "[ FunctionDeclaration");
- __ mov(r2, Operand(variable->name()));
- __ mov(r1, Operand(Smi::FromInt(NONE)));
- __ Push(cp, r2, r1);
- // Push initial value for function declaration.
- VisitForStackValue(declaration->fun());
- __ CallRuntime(Runtime::kDeclareContextSlot, 4);
- break;
- }
- }
-}
-
-
-void FullCodeGenerator::VisitModuleDeclaration(ModuleDeclaration* declaration) {
- Variable* variable = declaration->proxy()->var();
- ASSERT(variable->location() == Variable::CONTEXT);
- ASSERT(variable->interface()->IsFrozen());
-
- Comment cmnt(masm_, "[ ModuleDeclaration");
- EmitDebugCheckDeclarationContext(variable);
-
- // Load instance object.
- __ LoadContext(r1, scope_->ContextChainLength(scope_->GlobalScope()));
- __ ldr(r1, ContextOperand(r1, variable->interface()->Index()));
- __ ldr(r1, ContextOperand(r1, Context::EXTENSION_INDEX));
-
- // Assign it.
- __ str(r1, ContextOperand(cp, variable->index()));
- // We know that we have written a module, which is not a smi.
- __ RecordWriteContextSlot(cp,
- Context::SlotOffset(variable->index()),
- r1,
- r3,
- kLRHasBeenSaved,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- PrepareForBailoutForId(declaration->proxy()->id(), NO_REGISTERS);
-
- // Traverse into body.
- Visit(declaration->module());
-}
-
-
-void FullCodeGenerator::VisitImportDeclaration(ImportDeclaration* declaration) {
- VariableProxy* proxy = declaration->proxy();
- Variable* variable = proxy->var();
- switch (variable->location()) {
- case Variable::UNALLOCATED:
- // TODO(rossberg)
- break;
-
- case Variable::CONTEXT: {
- Comment cmnt(masm_, "[ ImportDeclaration");
- EmitDebugCheckDeclarationContext(variable);
- // TODO(rossberg)
- break;
- }
-
- case Variable::PARAMETER:
- case Variable::LOCAL:
- case Variable::LOOKUP:
- UNREACHABLE();
- }
-}
-
-
-void FullCodeGenerator::VisitExportDeclaration(ExportDeclaration* declaration) {
- // TODO(rossberg)
-}
-
-
-void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
- // Call the runtime to declare the globals.
- // The context is the first argument.
- __ mov(r1, Operand(pairs));
- __ mov(r0, Operand(Smi::FromInt(DeclareGlobalsFlags())));
- __ Push(cp, r1, r0);
- __ CallRuntime(Runtime::kDeclareGlobals, 3);
- // Return value is ignored.
-}
-
-
-void FullCodeGenerator::DeclareModules(Handle<FixedArray> descriptions) {
- // Call the runtime to declare the modules.
- __ Push(descriptions);
- __ CallRuntime(Runtime::kDeclareModules, 1);
- // Return value is ignored.
-}
-
-
-void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
- Comment cmnt(masm_, "[ SwitchStatement");
- Breakable nested_statement(this, stmt);
- SetStatementPosition(stmt);
-
- // Keep the switch value on the stack until a case matches.
- VisitForStackValue(stmt->tag());
- PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
-
- ZoneList<CaseClause*>* clauses = stmt->cases();
- CaseClause* default_clause = NULL; // Can occur anywhere in the list.
-
- Label next_test; // Recycled for each test.
- // Compile all the tests with branches to their bodies.
- for (int i = 0; i < clauses->length(); i++) {
- CaseClause* clause = clauses->at(i);
- clause->body_target()->Unuse();
-
- // The default is not a test, but remember it as final fall through.
- if (clause->is_default()) {
- default_clause = clause;
- continue;
- }
-
- Comment cmnt(masm_, "[ Case comparison");
- __ bind(&next_test);
- next_test.Unuse();
-
- // Compile the label expression.
- VisitForAccumulatorValue(clause->label());
-
- // Perform the comparison as if via '==='.
- __ ldr(r1, MemOperand(sp, 0)); // Switch value.
- bool inline_smi_code = ShouldInlineSmiCase(Token::EQ_STRICT);
- JumpPatchSite patch_site(masm_);
- if (inline_smi_code) {
- Label slow_case;
- __ orr(r2, r1, r0);
- patch_site.EmitJumpIfNotSmi(r2, &slow_case);
-
- __ cmp(r1, r0);
- __ b(ne, &next_test);
- __ Drop(1); // Switch value is no longer needed.
- __ b(clause->body_target());
- __ bind(&slow_case);
- }
-
- // Record position before stub call for type feedback.
- SetSourcePosition(clause->position());
- Handle<Code> ic = CompareIC::GetUninitialized(isolate(), Token::EQ_STRICT);
- CallIC(ic, RelocInfo::CODE_TARGET, clause->CompareId());
- patch_site.EmitPatchInfo();
-
- __ cmp(r0, Operand::Zero());
- __ b(ne, &next_test);
- __ Drop(1); // Switch value is no longer needed.
- __ b(clause->body_target());
- }
-
- // Discard the test value and jump to the default if present, otherwise to
- // the end of the statement.
- __ bind(&next_test);
- __ Drop(1); // Switch value is no longer needed.
- if (default_clause == NULL) {
- __ b(nested_statement.break_label());
- } else {
- __ b(default_clause->body_target());
- }
-
- // Compile all the case bodies.
- for (int i = 0; i < clauses->length(); i++) {
- Comment cmnt(masm_, "[ Case body");
- CaseClause* clause = clauses->at(i);
- __ bind(clause->body_target());
- PrepareForBailoutForId(clause->EntryId(), NO_REGISTERS);
- VisitStatements(clause->statements());
- }
-
- __ bind(nested_statement.break_label());
- PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
-}
-
-
-void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
- Comment cmnt(masm_, "[ ForInStatement");
- SetStatementPosition(stmt);
-
- Label loop, exit;
- ForIn loop_statement(this, stmt);
- increment_loop_depth();
-
- // Get the object to enumerate over. Both SpiderMonkey and JSC
- // ignore null and undefined in contrast to the specification; see
- // ECMA-262 section 12.6.4.
- VisitForAccumulatorValue(stmt->enumerable());
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ cmp(r0, ip);
- __ b(eq, &exit);
- Register null_value = r5;
- __ LoadRoot(null_value, Heap::kNullValueRootIndex);
- __ cmp(r0, null_value);
- __ b(eq, &exit);
-
- PrepareForBailoutForId(stmt->PrepareId(), TOS_REG);
-
- // Convert the object to a JS object.
- Label convert, done_convert;
- __ JumpIfSmi(r0, &convert);
- __ CompareObjectType(r0, r1, r1, FIRST_SPEC_OBJECT_TYPE);
- __ b(ge, &done_convert);
- __ bind(&convert);
- __ push(r0);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
- __ bind(&done_convert);
- __ push(r0);
-
- // Check for proxies.
- Label call_runtime;
- STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
- __ CompareObjectType(r0, r1, r1, LAST_JS_PROXY_TYPE);
- __ b(le, &call_runtime);
-
- // Check cache validity in generated code. This is a fast case for
- // the JSObject::IsSimpleEnum cache validity checks. If we cannot
- // guarantee cache validity, call the runtime system to check cache
- // validity or get the property names in a fixed array.
- __ CheckEnumCache(null_value, &call_runtime);
-
- // The enum cache is valid. Load the map of the object being
- // iterated over and use the cache for the iteration.
- Label use_cache;
- __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ b(&use_cache);
-
- // Get the set of properties to enumerate.
- __ bind(&call_runtime);
- __ push(r0); // Duplicate the enumerable object on the stack.
- __ CallRuntime(Runtime::kGetPropertyNamesFast, 1);
-
- // If we got a map from the runtime call, we can do a fast
- // modification check. Otherwise, we got a fixed array, and we have
- // to do a slow check.
- Label fixed_array;
- __ ldr(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kMetaMapRootIndex);
- __ cmp(r2, ip);
- __ b(ne, &fixed_array);
-
- // We got a map in register r0. Get the enumeration cache from it.
- Label no_descriptors;
- __ bind(&use_cache);
-
- __ EnumLength(r1, r0);
- __ cmp(r1, Operand(Smi::FromInt(0)));
- __ b(eq, &no_descriptors);
-
- __ LoadInstanceDescriptors(r0, r2);
- __ ldr(r2, FieldMemOperand(r2, DescriptorArray::kEnumCacheOffset));
- __ ldr(r2, FieldMemOperand(r2, DescriptorArray::kEnumCacheBridgeCacheOffset));
-
- // Set up the four remaining stack slots.
- __ push(r0); // Map.
- __ mov(r0, Operand(Smi::FromInt(0)));
- // Push enumeration cache, enumeration cache length (as smi) and zero.
- __ Push(r2, r1, r0);
- __ jmp(&loop);
-
- __ bind(&no_descriptors);
- __ Drop(1);
- __ jmp(&exit);
-
- // We got a fixed array in register r0. Iterate through that.
- Label non_proxy;
- __ bind(&fixed_array);
-
- Handle<JSGlobalPropertyCell> cell =
- isolate()->factory()->NewJSGlobalPropertyCell(
- Handle<Object>(
- Smi::FromInt(TypeFeedbackCells::kForInFastCaseMarker),
- isolate()));
- RecordTypeFeedbackCell(stmt->ForInFeedbackId(), cell);
- __ LoadHeapObject(r1, cell);
- __ mov(r2, Operand(Smi::FromInt(TypeFeedbackCells::kForInSlowCaseMarker)));
- __ str(r2, FieldMemOperand(r1, JSGlobalPropertyCell::kValueOffset));
-
- __ mov(r1, Operand(Smi::FromInt(1))); // Smi indicates slow check
- __ ldr(r2, MemOperand(sp, 0 * kPointerSize)); // Get enumerated object
- STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
- __ CompareObjectType(r2, r3, r3, LAST_JS_PROXY_TYPE);
- __ b(gt, &non_proxy);
- __ mov(r1, Operand(Smi::FromInt(0))); // Zero indicates proxy
- __ bind(&non_proxy);
- __ Push(r1, r0); // Smi and array
- __ ldr(r1, FieldMemOperand(r0, FixedArray::kLengthOffset));
- __ mov(r0, Operand(Smi::FromInt(0)));
- __ Push(r1, r0); // Fixed array length (as smi) and initial index.
-
- // Generate code for doing the condition check.
- PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
- __ bind(&loop);
- // Load the current count to r0, load the length to r1.
- __ Ldrd(r0, r1, MemOperand(sp, 0 * kPointerSize));
- __ cmp(r0, r1); // Compare to the array length.
- __ b(hs, loop_statement.break_label());
-
- // Get the current entry of the array into register r3.
- __ ldr(r2, MemOperand(sp, 2 * kPointerSize));
- __ add(r2, r2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ ldr(r3, MemOperand(r2, r0, LSL, kPointerSizeLog2 - kSmiTagSize));
-
- // Get the expected map from the stack or a smi in the
- // permanent slow case into register r2.
- __ ldr(r2, MemOperand(sp, 3 * kPointerSize));
-
- // Check if the expected map still matches that of the enumerable.
- // If not, we may have to filter the key.
- Label update_each;
- __ ldr(r1, MemOperand(sp, 4 * kPointerSize));
- __ ldr(r4, FieldMemOperand(r1, HeapObject::kMapOffset));
- __ cmp(r4, Operand(r2));
- __ b(eq, &update_each);
-
- // For proxies, no filtering is done.
- // TODO(rossberg): What if only a prototype is a proxy? Not specified yet.
- __ cmp(r2, Operand(Smi::FromInt(0)));
- __ b(eq, &update_each);
-
- // Convert the entry to a string or (smi) 0 if it isn't a property
- // any more. If the property has been removed while iterating, we
- // just skip it.
- __ push(r1); // Enumerable.
- __ push(r3); // Current entry.
- __ InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION);
- __ mov(r3, Operand(r0), SetCC);
- __ b(eq, loop_statement.continue_label());
-
- // Update the 'each' property or variable from the possibly filtered
- // entry in register r3.
- __ bind(&update_each);
- __ mov(result_register(), r3);
- // Perform the assignment as if via '='.
- { EffectContext context(this);
- EmitAssignment(stmt->each());
- }
-
- // Generate code for the body of the loop.
- Visit(stmt->body());
-
- // Generate code for the going to the next element by incrementing
- // the index (smi) stored on top of the stack.
- __ bind(loop_statement.continue_label());
- __ pop(r0);
- __ add(r0, r0, Operand(Smi::FromInt(1)));
- __ push(r0);
-
- EmitBackEdgeBookkeeping(stmt, &loop);
- __ b(&loop);
-
- // Remove the pointers stored on the stack.
- __ bind(loop_statement.break_label());
- __ Drop(5);
-
- // Exit and decrement the loop depth.
- PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
- __ bind(&exit);
- decrement_loop_depth();
-}
-
-
-void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
- bool pretenure) {
- // Use the fast case closure allocation code that allocates in new
- // space for nested functions that don't need literals cloning. If
- // we're running with the --always-opt or the --prepare-always-opt
- // flag, we need to use the runtime function so that the new function
- // we are creating here gets a chance to have its code optimized and
- // doesn't just get a copy of the existing unoptimized code.
- if (!FLAG_always_opt &&
- !FLAG_prepare_always_opt &&
- !pretenure &&
- scope()->is_function_scope() &&
- info->num_literals() == 0) {
- FastNewClosureStub stub(info->language_mode());
- __ mov(r0, Operand(info));
- __ push(r0);
- __ CallStub(&stub);
- } else {
- __ mov(r0, Operand(info));
- __ LoadRoot(r1, pretenure ? Heap::kTrueValueRootIndex
- : Heap::kFalseValueRootIndex);
- __ Push(cp, r0, r1);
- __ CallRuntime(Runtime::kNewClosure, 3);
- }
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
- Comment cmnt(masm_, "[ VariableProxy");
- EmitVariableLoad(expr);
-}
-
-
-void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
- TypeofState typeof_state,
- Label* slow) {
- Register current = cp;
- Register next = r1;
- Register temp = r2;
-
- Scope* s = scope();
- while (s != NULL) {
- if (s->num_heap_slots() > 0) {
- if (s->calls_non_strict_eval()) {
- // Check that extension is NULL.
- __ ldr(temp, ContextOperand(current, Context::EXTENSION_INDEX));
- __ tst(temp, temp);
- __ b(ne, slow);
- }
- // Load next context in chain.
- __ ldr(next, ContextOperand(current, Context::PREVIOUS_INDEX));
- // Walk the rest of the chain without clobbering cp.
- current = next;
- }
- // If no outer scope calls eval, we do not need to check more
- // context extensions.
- if (!s->outer_scope_calls_non_strict_eval() || s->is_eval_scope()) break;
- s = s->outer_scope();
- }
-
- if (s->is_eval_scope()) {
- Label loop, fast;
- if (!current.is(next)) {
- __ Move(next, current);
- }
- __ bind(&loop);
- // Terminate at native context.
- __ ldr(temp, FieldMemOperand(next, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kNativeContextMapRootIndex);
- __ cmp(temp, ip);
- __ b(eq, &fast);
- // Check that extension is NULL.
- __ ldr(temp, ContextOperand(next, Context::EXTENSION_INDEX));
- __ tst(temp, temp);
- __ b(ne, slow);
- // Load next context in chain.
- __ ldr(next, ContextOperand(next, Context::PREVIOUS_INDEX));
- __ b(&loop);
- __ bind(&fast);
- }
-
- __ ldr(r0, var->is_qml_global()
- ? QmlGlobalObjectOperand()
- : GlobalObjectOperand());
- __ mov(r2, Operand(var->name()));
- RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF)
- ? RelocInfo::CODE_TARGET
- : RelocInfo::CODE_TARGET_CONTEXT;
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallIC(ic, mode);
-}
-
-
-MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
- Label* slow) {
- ASSERT(var->IsContextSlot());
- Register context = cp;
- Register next = r3;
- Register temp = r4;
-
- for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) {
- if (s->num_heap_slots() > 0) {
- if (s->calls_non_strict_eval()) {
- // Check that extension is NULL.
- __ ldr(temp, ContextOperand(context, Context::EXTENSION_INDEX));
- __ tst(temp, temp);
- __ b(ne, slow);
- }
- __ ldr(next, ContextOperand(context, Context::PREVIOUS_INDEX));
- // Walk the rest of the chain without clobbering cp.
- context = next;
- }
- }
- // Check that last extension is NULL.
- __ ldr(temp, ContextOperand(context, Context::EXTENSION_INDEX));
- __ tst(temp, temp);
- __ b(ne, slow);
-
- // This function is used only for loads, not stores, so it's safe to
- // return an cp-based operand (the write barrier cannot be allowed to
- // destroy the cp register).
- return ContextOperand(context, var->index());
-}
-
-
-void FullCodeGenerator::EmitDynamicLookupFastCase(Variable* var,
- TypeofState typeof_state,
- Label* slow,
- Label* done) {
- // Generate fast-case code for variables that might be shadowed by
- // eval-introduced variables. Eval is used a lot without
- // introducing variables. In those cases, we do not want to
- // perform a runtime call for all variables in the scope
- // containing the eval.
- if (var->mode() == DYNAMIC_GLOBAL) {
- EmitLoadGlobalCheckExtensions(var, typeof_state, slow);
- __ jmp(done);
- } else if (var->mode() == DYNAMIC_LOCAL) {
- Variable* local = var->local_if_not_shadowed();
- __ ldr(r0, ContextSlotOperandCheckExtensions(local, slow));
- if (local->mode() == LET ||
- local->mode() == CONST ||
- local->mode() == CONST_HARMONY) {
- __ CompareRoot(r0, Heap::kTheHoleValueRootIndex);
- if (local->mode() == CONST) {
- __ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
- } else { // LET || CONST_HARMONY
- __ b(ne, done);
- __ mov(r0, Operand(var->name()));
- __ push(r0);
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
- }
- }
- __ jmp(done);
- }
-}
-
-
-void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
- // Record position before possible IC call.
- SetSourcePosition(proxy->position());
- Variable* var = proxy->var();
-
- // Three cases: global variables, lookup variables, and all other types of
- // variables.
- switch (var->location()) {
- case Variable::UNALLOCATED: {
- Comment cmnt(masm_, "Global variable");
- // Use inline caching. Variable name is passed in r2 and the global
- // object (receiver) in r0.
- __ ldr(r0, var->is_qml_global()
- ? QmlGlobalObjectOperand()
- : GlobalObjectOperand());
- __ mov(r2, Operand(var->name()));
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
- context()->Plug(r0);
- break;
- }
-
- case Variable::PARAMETER:
- case Variable::LOCAL:
- case Variable::CONTEXT: {
- Comment cmnt(masm_, var->IsContextSlot()
- ? "Context variable"
- : "Stack variable");
- if (var->binding_needs_init()) {
- // var->scope() may be NULL when the proxy is located in eval code and
- // refers to a potential outside binding. Currently those bindings are
- // always looked up dynamically, i.e. in that case
- // var->location() == LOOKUP.
- // always holds.
- ASSERT(var->scope() != NULL);
-
- // Check if the binding really needs an initialization check. The check
- // can be skipped in the following situation: we have a LET or CONST
- // binding in harmony mode, both the Variable and the VariableProxy have
- // the same declaration scope (i.e. they are both in global code, in the
- // same function or in the same eval code) and the VariableProxy is in
- // the source physically located after the initializer of the variable.
- //
- // We cannot skip any initialization checks for CONST in non-harmony
- // mode because const variables may be declared but never initialized:
- // if (false) { const x; }; var y = x;
- //
- // The condition on the declaration scopes is a conservative check for
- // nested functions that access a binding and are called before the
- // binding is initialized:
- // function() { f(); let x = 1; function f() { x = 2; } }
- //
- bool skip_init_check;
- if (var->scope()->DeclarationScope() != scope()->DeclarationScope()) {
- skip_init_check = false;
- } else {
- // Check that we always have valid source position.
- ASSERT(var->initializer_position() != RelocInfo::kNoPosition);
- ASSERT(proxy->position() != RelocInfo::kNoPosition);
- skip_init_check = var->mode() != CONST &&
- var->initializer_position() < proxy->position();
- }
-
- if (!skip_init_check) {
- // Let and const need a read barrier.
- GetVar(r0, var);
- __ CompareRoot(r0, Heap::kTheHoleValueRootIndex);
- if (var->mode() == LET || var->mode() == CONST_HARMONY) {
- // Throw a reference error when using an uninitialized let/const
- // binding in harmony mode.
- Label done;
- __ b(ne, &done);
- __ mov(r0, Operand(var->name()));
- __ push(r0);
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
- __ bind(&done);
- } else {
- // Uninitalized const bindings outside of harmony mode are unholed.
- ASSERT(var->mode() == CONST);
- __ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
- }
- context()->Plug(r0);
- break;
- }
- }
- context()->Plug(var);
- break;
- }
-
- case Variable::LOOKUP: {
- Label done, slow;
- // Generate code for loading from variables potentially shadowed
- // by eval-introduced variables.
- EmitDynamicLookupFastCase(var, NOT_INSIDE_TYPEOF, &slow, &done);
- __ bind(&slow);
- Comment cmnt(masm_, "Lookup variable");
- __ mov(r1, Operand(var->name()));
- __ Push(cp, r1); // Context and name.
- __ CallRuntime(Runtime::kLoadContextSlot, 2);
- __ bind(&done);
- context()->Plug(r0);
- }
- }
-}
-
-
-void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
- Comment cmnt(masm_, "[ RegExpLiteral");
- Label materialized;
- // Registers will be used as follows:
- // r5 = materialized value (RegExp literal)
- // r4 = JS function, literals array
- // r3 = literal index
- // r2 = RegExp pattern
- // r1 = RegExp flags
- // r0 = RegExp literal clone
- __ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ ldr(r4, FieldMemOperand(r0, JSFunction::kLiteralsOffset));
- int literal_offset =
- FixedArray::kHeaderSize + expr->literal_index() * kPointerSize;
- __ ldr(r5, FieldMemOperand(r4, literal_offset));
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ cmp(r5, ip);
- __ b(ne, &materialized);
-
- // Create regexp literal using runtime function.
- // Result will be in r0.
- __ mov(r3, Operand(Smi::FromInt(expr->literal_index())));
- __ mov(r2, Operand(expr->pattern()));
- __ mov(r1, Operand(expr->flags()));
- __ Push(r4, r3, r2, r1);
- __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
- __ mov(r5, r0);
-
- __ bind(&materialized);
- int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
- Label allocated, runtime_allocate;
- __ AllocateInNewSpace(size, r0, r2, r3, &runtime_allocate, TAG_OBJECT);
- __ jmp(&allocated);
-
- __ bind(&runtime_allocate);
- __ push(r5);
- __ mov(r0, Operand(Smi::FromInt(size)));
- __ push(r0);
- __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
- __ pop(r5);
-
- __ bind(&allocated);
- // After this, registers are used as follows:
- // r0: Newly allocated regexp.
- // r5: Materialized regexp.
- // r2: temp.
- __ CopyFields(r0, r5, r2.bit(), size / kPointerSize);
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitAccessor(Expression* expression) {
- if (expression == NULL) {
- __ LoadRoot(r1, Heap::kNullValueRootIndex);
- __ push(r1);
- } else {
- VisitForStackValue(expression);
- }
-}
-
-
-void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
- Comment cmnt(masm_, "[ ObjectLiteral");
- Handle<FixedArray> constant_properties = expr->constant_properties();
- __ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ ldr(r3, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
- __ mov(r2, Operand(Smi::FromInt(expr->literal_index())));
- __ mov(r1, Operand(constant_properties));
- int flags = expr->fast_elements()
- ? ObjectLiteral::kFastElements
- : ObjectLiteral::kNoFlags;
- flags |= expr->has_function()
- ? ObjectLiteral::kHasFunction
- : ObjectLiteral::kNoFlags;
- __ mov(r0, Operand(Smi::FromInt(flags)));
- int properties_count = constant_properties->length() / 2;
- if (expr->depth() > 1) {
- __ Push(r3, r2, r1, r0);
- __ CallRuntime(Runtime::kCreateObjectLiteral, 4);
- } else if (Serializer::enabled() || flags != ObjectLiteral::kFastElements ||
- properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
- __ Push(r3, r2, r1, r0);
- __ CallRuntime(Runtime::kCreateObjectLiteralShallow, 4);
- } else {
- FastCloneShallowObjectStub stub(properties_count);
- __ CallStub(&stub);
- }
-
- // If result_saved is true the result is on top of the stack. If
- // result_saved is false the result is in r0.
- bool result_saved = false;
-
- // Mark all computed expressions that are bound to a key that
- // is shadowed by a later occurrence of the same key. For the
- // marked expressions, no store code is emitted.
- expr->CalculateEmitStore(zone());
-
- AccessorTable accessor_table(zone());
- for (int i = 0; i < expr->properties()->length(); i++) {
- ObjectLiteral::Property* property = expr->properties()->at(i);
- if (property->IsCompileTimeValue()) continue;
-
- Literal* key = property->key();
- Expression* value = property->value();
- if (!result_saved) {
- __ push(r0); // Save result on stack
- result_saved = true;
- }
- switch (property->kind()) {
- case ObjectLiteral::Property::CONSTANT:
- UNREACHABLE();
- case ObjectLiteral::Property::MATERIALIZED_LITERAL:
- ASSERT(!CompileTimeValue::IsCompileTimeValue(property->value()));
- // Fall through.
- case ObjectLiteral::Property::COMPUTED:
- if (key->handle()->IsInternalizedString()) {
- if (property->emit_store()) {
- VisitForAccumulatorValue(value);
- __ mov(r2, Operand(key->handle()));
- __ ldr(r1, MemOperand(sp));
- Handle<Code> ic = is_classic_mode()
- ? isolate()->builtins()->StoreIC_Initialize()
- : isolate()->builtins()->StoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, key->LiteralFeedbackId());
- PrepareForBailoutForId(key->id(), NO_REGISTERS);
- } else {
- VisitForEffect(value);
- }
- break;
- }
- // Fall through.
- case ObjectLiteral::Property::PROTOTYPE:
- // Duplicate receiver on stack.
- __ ldr(r0, MemOperand(sp));
- __ push(r0);
- VisitForStackValue(key);
- VisitForStackValue(value);
- if (property->emit_store()) {
- __ mov(r0, Operand(Smi::FromInt(NONE))); // PropertyAttributes
- __ push(r0);
- __ CallRuntime(Runtime::kSetProperty, 4);
- } else {
- __ Drop(3);
- }
- break;
- case ObjectLiteral::Property::GETTER:
- accessor_table.lookup(key)->second->getter = value;
- break;
- case ObjectLiteral::Property::SETTER:
- accessor_table.lookup(key)->second->setter = value;
- break;
- }
- }
-
- // Emit code to define accessors, using only a single call to the runtime for
- // each pair of corresponding getters and setters.
- for (AccessorTable::Iterator it = accessor_table.begin();
- it != accessor_table.end();
- ++it) {
- __ ldr(r0, MemOperand(sp)); // Duplicate receiver.
- __ push(r0);
- VisitForStackValue(it->first);
- EmitAccessor(it->second->getter);
- EmitAccessor(it->second->setter);
- __ mov(r0, Operand(Smi::FromInt(NONE)));
- __ push(r0);
- __ CallRuntime(Runtime::kDefineOrRedefineAccessorProperty, 5);
- }
-
- if (expr->has_function()) {
- ASSERT(result_saved);
- __ ldr(r0, MemOperand(sp));
- __ push(r0);
- __ CallRuntime(Runtime::kToFastProperties, 1);
- }
-
- if (result_saved) {
- context()->PlugTOS();
- } else {
- context()->Plug(r0);
- }
-}
-
-
-void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
- Comment cmnt(masm_, "[ ArrayLiteral");
-
- ZoneList<Expression*>* subexprs = expr->values();
- int length = subexprs->length();
- Handle<FixedArray> constant_elements = expr->constant_elements();
- ASSERT_EQ(2, constant_elements->length());
- ElementsKind constant_elements_kind =
- static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value());
- bool has_fast_elements = IsFastObjectElementsKind(constant_elements_kind);
- Handle<FixedArrayBase> constant_elements_values(
- FixedArrayBase::cast(constant_elements->get(1)));
-
- __ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ ldr(r3, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
- __ mov(r2, Operand(Smi::FromInt(expr->literal_index())));
- __ mov(r1, Operand(constant_elements));
- __ Push(r3, r2, r1);
- if (has_fast_elements && constant_elements_values->map() ==
- isolate()->heap()->fixed_cow_array_map()) {
- FastCloneShallowArrayStub stub(
- FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS,
- DONT_TRACK_ALLOCATION_SITE,
- length);
- __ CallStub(&stub);
- __ IncrementCounter(
- isolate()->counters()->cow_arrays_created_stub(), 1, r1, r2);
- } else if (expr->depth() > 1) {
- __ CallRuntime(Runtime::kCreateArrayLiteral, 3);
- } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
- __ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
- } else {
- ASSERT(IsFastSmiOrObjectElementsKind(constant_elements_kind) ||
- FLAG_smi_only_arrays);
- FastCloneShallowArrayStub::Mode mode =
- FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS;
- AllocationSiteMode allocation_site_mode = FLAG_track_allocation_sites
- ? TRACK_ALLOCATION_SITE : DONT_TRACK_ALLOCATION_SITE;
-
- if (has_fast_elements) {
- mode = FastCloneShallowArrayStub::CLONE_ELEMENTS;
- allocation_site_mode = DONT_TRACK_ALLOCATION_SITE;
- }
-
- FastCloneShallowArrayStub stub(mode, allocation_site_mode, length);
- __ CallStub(&stub);
- }
-
- bool result_saved = false; // Is the result saved to the stack?
-
- // Emit code to evaluate all the non-constant subexpressions and to store
- // them into the newly cloned array.
- for (int i = 0; i < length; i++) {
- Expression* subexpr = subexprs->at(i);
- // If the subexpression is a literal or a simple materialized literal it
- // is already set in the cloned array.
- if (subexpr->AsLiteral() != NULL ||
- CompileTimeValue::IsCompileTimeValue(subexpr)) {
- continue;
- }
-
- if (!result_saved) {
- __ push(r0);
- result_saved = true;
- }
- VisitForAccumulatorValue(subexpr);
-
- if (IsFastObjectElementsKind(constant_elements_kind)) {
- int offset = FixedArray::kHeaderSize + (i * kPointerSize);
- __ ldr(r6, MemOperand(sp)); // Copy of array literal.
- __ ldr(r1, FieldMemOperand(r6, JSObject::kElementsOffset));
- __ str(result_register(), FieldMemOperand(r1, offset));
- // Update the write barrier for the array store.
- __ RecordWriteField(r1, offset, result_register(), r2,
- kLRHasBeenSaved, kDontSaveFPRegs,
- EMIT_REMEMBERED_SET, INLINE_SMI_CHECK);
- } else {
- __ ldr(r1, MemOperand(sp)); // Copy of array literal.
- __ ldr(r2, FieldMemOperand(r1, JSObject::kMapOffset));
- __ mov(r3, Operand(Smi::FromInt(i)));
- __ mov(r4, Operand(Smi::FromInt(expr->literal_index())));
- StoreArrayLiteralElementStub stub;
- __ CallStub(&stub);
- }
-
- PrepareForBailoutForId(expr->GetIdForElement(i), NO_REGISTERS);
- }
-
- if (result_saved) {
- context()->PlugTOS();
- } else {
- context()->Plug(r0);
- }
-}
-
-
-void FullCodeGenerator::VisitAssignment(Assignment* expr) {
- Comment cmnt(masm_, "[ Assignment");
- // Invalid left-hand sides are rewritten to have a 'throw ReferenceError'
- // on the left-hand side.
- if (!expr->target()->IsValidLeftHandSide()) {
- VisitForEffect(expr->target());
- return;
- }
-
- // Left-hand side can only be a property, a global or a (parameter or local)
- // slot.
- enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
- LhsKind assign_type = VARIABLE;
- Property* property = expr->target()->AsProperty();
- if (property != NULL) {
- assign_type = (property->key()->IsPropertyName())
- ? NAMED_PROPERTY
- : KEYED_PROPERTY;
- }
-
- // Evaluate LHS expression.
- switch (assign_type) {
- case VARIABLE:
- // Nothing to do here.
- break;
- case NAMED_PROPERTY:
- if (expr->is_compound()) {
- // We need the receiver both on the stack and in the accumulator.
- VisitForAccumulatorValue(property->obj());
- __ push(result_register());
- } else {
- VisitForStackValue(property->obj());
- }
- break;
- case KEYED_PROPERTY:
- if (expr->is_compound()) {
- VisitForStackValue(property->obj());
- VisitForAccumulatorValue(property->key());
- __ ldr(r1, MemOperand(sp, 0));
- __ push(r0);
- } else {
- VisitForStackValue(property->obj());
- VisitForStackValue(property->key());
- }
- break;
- }
-
- // For compound assignments we need another deoptimization point after the
- // variable/property load.
- if (expr->is_compound()) {
- { AccumulatorValueContext context(this);
- switch (assign_type) {
- case VARIABLE:
- EmitVariableLoad(expr->target()->AsVariableProxy());
- PrepareForBailout(expr->target(), TOS_REG);
- break;
- case NAMED_PROPERTY:
- EmitNamedPropertyLoad(property);
- PrepareForBailoutForId(property->LoadId(), TOS_REG);
- break;
- case KEYED_PROPERTY:
- EmitKeyedPropertyLoad(property);
- PrepareForBailoutForId(property->LoadId(), TOS_REG);
- break;
- }
- }
-
- Token::Value op = expr->binary_op();
- __ push(r0); // Left operand goes on the stack.
- VisitForAccumulatorValue(expr->value());
-
- OverwriteMode mode = expr->value()->ResultOverwriteAllowed()
- ? OVERWRITE_RIGHT
- : NO_OVERWRITE;
- SetSourcePosition(expr->position() + 1);
- AccumulatorValueContext context(this);
- if (ShouldInlineSmiCase(op)) {
- EmitInlineSmiBinaryOp(expr->binary_operation(),
- op,
- mode,
- expr->target(),
- expr->value());
- } else {
- EmitBinaryOp(expr->binary_operation(), op, mode);
- }
-
- // Deoptimization point in case the binary operation may have side effects.
- PrepareForBailout(expr->binary_operation(), TOS_REG);
- } else {
- VisitForAccumulatorValue(expr->value());
- }
-
- // Record source position before possible IC call.
- SetSourcePosition(expr->position());
-
- // Store the value.
- switch (assign_type) {
- case VARIABLE:
- EmitVariableAssignment(expr->target()->AsVariableProxy()->var(),
- expr->op());
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
- context()->Plug(r0);
- break;
- case NAMED_PROPERTY:
- EmitNamedPropertyAssignment(expr);
- break;
- case KEYED_PROPERTY:
- EmitKeyedPropertyAssignment(expr);
- break;
- }
-}
-
-
-void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
- SetSourcePosition(prop->position());
- Literal* key = prop->key()->AsLiteral();
- __ mov(r2, Operand(key->handle()));
- // Call load IC. It has arguments receiver and property name r0 and r2.
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallIC(ic, RelocInfo::CODE_TARGET, prop->PropertyFeedbackId());
-}
-
-
-void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
- SetSourcePosition(prop->position());
- // Call keyed load IC. It has arguments key and receiver in r0 and r1.
- Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- CallIC(ic, RelocInfo::CODE_TARGET, prop->PropertyFeedbackId());
-}
-
-
-void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
- Token::Value op,
- OverwriteMode mode,
- Expression* left_expr,
- Expression* right_expr) {
- Label done, smi_case, stub_call;
-
- Register scratch1 = r2;
- Register scratch2 = r3;
-
- // Get the arguments.
- Register left = r1;
- Register right = r0;
- __ pop(left);
-
- // Perform combined smi check on both operands.
- __ orr(scratch1, left, Operand(right));
- STATIC_ASSERT(kSmiTag == 0);
- JumpPatchSite patch_site(masm_);
- patch_site.EmitJumpIfSmi(scratch1, &smi_case);
-
- __ bind(&stub_call);
- BinaryOpStub stub(op, mode);
- CallIC(stub.GetCode(isolate()), RelocInfo::CODE_TARGET,
- expr->BinaryOperationFeedbackId());
- patch_site.EmitPatchInfo();
- __ jmp(&done);
-
- __ bind(&smi_case);
- // Smi case. This code works the same way as the smi-smi case in the type
- // recording binary operation stub, see
- // BinaryOpStub::GenerateSmiSmiOperation for comments.
- switch (op) {
- case Token::SAR:
- __ b(&stub_call);
- __ GetLeastBitsFromSmi(scratch1, right, 5);
- __ mov(right, Operand(left, ASR, scratch1));
- __ bic(right, right, Operand(kSmiTagMask));
- break;
- case Token::SHL: {
- __ b(&stub_call);
- __ SmiUntag(scratch1, left);
- __ GetLeastBitsFromSmi(scratch2, right, 5);
- __ mov(scratch1, Operand(scratch1, LSL, scratch2));
- __ add(scratch2, scratch1, Operand(0x40000000), SetCC);
- __ b(mi, &stub_call);
- __ SmiTag(right, scratch1);
- break;
- }
- case Token::SHR: {
- __ b(&stub_call);
- __ SmiUntag(scratch1, left);
- __ GetLeastBitsFromSmi(scratch2, right, 5);
- __ mov(scratch1, Operand(scratch1, LSR, scratch2));
- __ tst(scratch1, Operand(0xc0000000));
- __ b(ne, &stub_call);
- __ SmiTag(right, scratch1);
- break;
- }
- case Token::ADD:
- __ add(scratch1, left, Operand(right), SetCC);
- __ b(vs, &stub_call);
- __ mov(right, scratch1);
- break;
- case Token::SUB:
- __ sub(scratch1, left, Operand(right), SetCC);
- __ b(vs, &stub_call);
- __ mov(right, scratch1);
- break;
- case Token::MUL: {
- __ SmiUntag(ip, right);
- __ smull(scratch1, scratch2, left, ip);
- __ mov(ip, Operand(scratch1, ASR, 31));
- __ cmp(ip, Operand(scratch2));
- __ b(ne, &stub_call);
- __ cmp(scratch1, Operand::Zero());
- __ mov(right, Operand(scratch1), LeaveCC, ne);
- __ b(ne, &done);
- __ add(scratch2, right, Operand(left), SetCC);
- __ mov(right, Operand(Smi::FromInt(0)), LeaveCC, pl);
- __ b(mi, &stub_call);
- break;
- }
- case Token::BIT_OR:
- __ orr(right, left, Operand(right));
- break;
- case Token::BIT_AND:
- __ and_(right, left, Operand(right));
- break;
- case Token::BIT_XOR:
- __ eor(right, left, Operand(right));
- break;
- default:
- UNREACHABLE();
- }
-
- __ bind(&done);
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
- Token::Value op,
- OverwriteMode mode) {
- __ pop(r1);
- BinaryOpStub stub(op, mode);
- JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
- CallIC(stub.GetCode(isolate()), RelocInfo::CODE_TARGET,
- expr->BinaryOperationFeedbackId());
- patch_site.EmitPatchInfo();
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitAssignment(Expression* expr) {
- // Invalid left-hand sides are rewritten by the parser to have a 'throw
- // ReferenceError' on the left-hand side.
- if (!expr->IsValidLeftHandSide()) {
- VisitForEffect(expr);
- return;
- }
-
- // Left-hand side can only be a property, a global or a (parameter or local)
- // slot.
- enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
- LhsKind assign_type = VARIABLE;
- Property* prop = expr->AsProperty();
- if (prop != NULL) {
- assign_type = (prop->key()->IsPropertyName())
- ? NAMED_PROPERTY
- : KEYED_PROPERTY;
- }
-
- switch (assign_type) {
- case VARIABLE: {
- Variable* var = expr->AsVariableProxy()->var();
- EffectContext context(this);
- EmitVariableAssignment(var, Token::ASSIGN);
- break;
- }
- case NAMED_PROPERTY: {
- __ push(r0); // Preserve value.
- VisitForAccumulatorValue(prop->obj());
- __ mov(r1, r0);
- __ pop(r0); // Restore value.
- __ mov(r2, Operand(prop->key()->AsLiteral()->handle()));
- Handle<Code> ic = is_classic_mode()
- ? isolate()->builtins()->StoreIC_Initialize()
- : isolate()->builtins()->StoreIC_Initialize_Strict();
- CallIC(ic);
- break;
- }
- case KEYED_PROPERTY: {
- __ push(r0); // Preserve value.
- VisitForStackValue(prop->obj());
- VisitForAccumulatorValue(prop->key());
- __ mov(r1, r0);
- __ pop(r2);
- __ pop(r0); // Restore value.
- Handle<Code> ic = is_classic_mode()
- ? isolate()->builtins()->KeyedStoreIC_Initialize()
- : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
- CallIC(ic);
- break;
- }
- }
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitVariableAssignment(Variable* var,
- Token::Value op) {
- if (var->IsUnallocated()) {
- // Global var, const, or let.
- __ mov(r2, Operand(var->name()));
- __ ldr(r1, var->is_qml_global()
- ? QmlGlobalObjectOperand()
- : GlobalObjectOperand());
- Handle<Code> ic = is_classic_mode()
- ? isolate()->builtins()->StoreIC_Initialize()
- : isolate()->builtins()->StoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
-
- } else if (op == Token::INIT_CONST) {
- // Const initializers need a write barrier.
- ASSERT(!var->IsParameter()); // No const parameters.
- if (var->IsStackLocal()) {
- Label skip;
- __ ldr(r1, StackOperand(var));
- __ CompareRoot(r1, Heap::kTheHoleValueRootIndex);
- __ b(ne, &skip);
- __ str(result_register(), StackOperand(var));
- __ bind(&skip);
- } else {
- ASSERT(var->IsContextSlot() || var->IsLookupSlot());
- // Like var declarations, const declarations are hoisted to function
- // scope. However, unlike var initializers, const initializers are
- // able to drill a hole to that function context, even from inside a
- // 'with' context. We thus bypass the normal static scope lookup for
- // var->IsContextSlot().
- __ push(r0);
- __ mov(r0, Operand(var->name()));
- __ Push(cp, r0); // Context and name.
- __ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
- }
-
- } else if (var->mode() == LET && op != Token::INIT_LET) {
- // Non-initializing assignment to let variable needs a write barrier.
- if (var->IsLookupSlot()) {
- __ push(r0); // Value.
- __ mov(r1, Operand(var->name()));
- __ mov(r0, Operand(Smi::FromInt(language_mode())));
- __ Push(cp, r1, r0); // Context, name, strict mode.
- __ CallRuntime(Runtime::kStoreContextSlot, 4);
- } else {
- ASSERT(var->IsStackAllocated() || var->IsContextSlot());
- Label assign;
- MemOperand location = VarOperand(var, r1);
- __ ldr(r3, location);
- __ CompareRoot(r3, Heap::kTheHoleValueRootIndex);
- __ b(ne, &assign);
- __ mov(r3, Operand(var->name()));
- __ push(r3);
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
- // Perform the assignment.
- __ bind(&assign);
- __ str(result_register(), location);
- if (var->IsContextSlot()) {
- // RecordWrite may destroy all its register arguments.
- __ mov(r3, result_register());
- int offset = Context::SlotOffset(var->index());
- __ RecordWriteContextSlot(
- r1, offset, r3, r2, kLRHasBeenSaved, kDontSaveFPRegs);
- }
- }
-
- } else if (!var->is_const_mode() || op == Token::INIT_CONST_HARMONY) {
- // Assignment to var or initializing assignment to let/const
- // in harmony mode.
- if (var->IsStackAllocated() || var->IsContextSlot()) {
- MemOperand location = VarOperand(var, r1);
- if (generate_debug_code_ && op == Token::INIT_LET) {
- // Check for an uninitialized let binding.
- __ ldr(r2, location);
- __ CompareRoot(r2, Heap::kTheHoleValueRootIndex);
- __ Check(eq, "Let binding re-initialization.");
- }
- // Perform the assignment.
- __ str(r0, location);
- if (var->IsContextSlot()) {
- __ mov(r3, r0);
- int offset = Context::SlotOffset(var->index());
- __ RecordWriteContextSlot(
- r1, offset, r3, r2, kLRHasBeenSaved, kDontSaveFPRegs);
- }
- } else {
- ASSERT(var->IsLookupSlot());
- __ push(r0); // Value.
- __ mov(r1, Operand(var->name()));
- __ mov(r0, Operand(Smi::FromInt(language_mode())));
- __ Push(cp, r1, r0); // Context, name, strict mode.
- __ CallRuntime(Runtime::kStoreContextSlot, 4);
- }
- }
- // Non-initializing assignments to consts are ignored.
-}
-
-
-void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
- // Assignment to a property, using a named store IC.
- Property* prop = expr->target()->AsProperty();
- ASSERT(prop != NULL);
- ASSERT(prop->key()->AsLiteral() != NULL);
-
- // Record source code position before IC call.
- SetSourcePosition(expr->position());
- __ mov(r2, Operand(prop->key()->AsLiteral()->handle()));
- __ pop(r1);
-
- Handle<Code> ic = is_classic_mode()
- ? isolate()->builtins()->StoreIC_Initialize()
- : isolate()->builtins()->StoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, expr->AssignmentFeedbackId());
-
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
- // Assignment to a property, using a keyed store IC.
-
- // Record source code position before IC call.
- SetSourcePosition(expr->position());
- __ pop(r1); // Key.
- __ pop(r2);
-
- Handle<Code> ic = is_classic_mode()
- ? isolate()->builtins()->KeyedStoreIC_Initialize()
- : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, expr->AssignmentFeedbackId());
-
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::VisitProperty(Property* expr) {
- Comment cmnt(masm_, "[ Property");
- Expression* key = expr->key();
-
- if (key->IsPropertyName()) {
- VisitForAccumulatorValue(expr->obj());
- EmitNamedPropertyLoad(expr);
- PrepareForBailoutForId(expr->LoadId(), TOS_REG);
- context()->Plug(r0);
- } else {
- VisitForStackValue(expr->obj());
- VisitForAccumulatorValue(expr->key());
- __ pop(r1);
- EmitKeyedPropertyLoad(expr);
- context()->Plug(r0);
- }
-}
-
-
-void FullCodeGenerator::CallIC(Handle<Code> code,
- RelocInfo::Mode rmode,
- TypeFeedbackId ast_id) {
- ic_total_count_++;
- // All calls must have a predictable size in full-codegen code to ensure that
- // the debugger can patch them correctly.
- __ Call(code, rmode, ast_id, al, NEVER_INLINE_TARGET_ADDRESS);
-}
-
-void FullCodeGenerator::EmitCallWithIC(Call* expr,
- Handle<Object> name,
- RelocInfo::Mode mode) {
- // Code common for calls using the IC.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
- { PreservePositionScope scope(masm()->positions_recorder());
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
- __ mov(r2, Operand(name));
- }
- // Record source position for debugger.
- SetSourcePosition(expr->position());
- // Call the IC initialization code.
- Handle<Code> ic =
- isolate()->stub_cache()->ComputeCallInitialize(arg_count, mode);
- CallIC(ic, mode, expr->CallFeedbackId());
- RecordJSReturnSite(expr);
- // Restore context register.
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr,
- Expression* key) {
- // Load the key.
- VisitForAccumulatorValue(key);
-
- // Swap the name of the function and the receiver on the stack to follow
- // the calling convention for call ICs.
- __ pop(r1);
- __ push(r0);
- __ push(r1);
-
- // Code common for calls using the IC.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
- { PreservePositionScope scope(masm()->positions_recorder());
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
- }
- // Record source position for debugger.
- SetSourcePosition(expr->position());
- // Call the IC initialization code.
- Handle<Code> ic =
- isolate()->stub_cache()->ComputeKeyedCallInitialize(arg_count);
- __ ldr(r2, MemOperand(sp, (arg_count + 1) * kPointerSize)); // Key.
- CallIC(ic, RelocInfo::CODE_TARGET, expr->CallFeedbackId());
- RecordJSReturnSite(expr);
- // Restore context register.
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- context()->DropAndPlug(1, r0); // Drop the key still on the stack.
-}
-
-
-void FullCodeGenerator::EmitCallWithStub(Call* expr, CallFunctionFlags flags) {
- // Code common for calls using the call stub.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
- { PreservePositionScope scope(masm()->positions_recorder());
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
- }
- // Record source position for debugger.
- SetSourcePosition(expr->position());
-
- // Record call targets in unoptimized code.
- flags = static_cast<CallFunctionFlags>(flags | RECORD_CALL_TARGET);
- Handle<Object> uninitialized =
- TypeFeedbackCells::UninitializedSentinel(isolate());
- Handle<JSGlobalPropertyCell> cell =
- isolate()->factory()->NewJSGlobalPropertyCell(uninitialized);
- RecordTypeFeedbackCell(expr->CallFeedbackId(), cell);
- __ mov(r2, Operand(cell));
-
- CallFunctionStub stub(arg_count, flags);
- __ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
- __ CallStub(&stub, expr->CallFeedbackId());
- RecordJSReturnSite(expr);
- // Restore context register.
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- context()->DropAndPlug(1, r0);
-}
-
-
-void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
- // Push copy of the first argument or undefined if it doesn't exist.
- if (arg_count > 0) {
- __ ldr(r1, MemOperand(sp, arg_count * kPointerSize));
- } else {
- __ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
- }
- __ push(r1);
-
- // Push the receiver of the enclosing function.
- int receiver_offset = 2 + info_->scope()->num_parameters();
- __ ldr(r1, MemOperand(fp, receiver_offset * kPointerSize));
- __ push(r1);
- // Push the language mode.
- __ mov(r1, Operand(Smi::FromInt(language_mode())));
- __ push(r1);
-
- // Push the start position of the scope the calls resides in.
- __ mov(r1, Operand(Smi::FromInt(scope()->start_position())));
- __ push(r1);
-
- // Push the qml mode flag.
- __ mov(r1, Operand(Smi::FromInt(is_qml_mode())));
- __ push(r1);
-
- // Do the runtime call.
- __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 6);
-}
-
-
-void FullCodeGenerator::VisitCall(Call* expr) {
-#ifdef DEBUG
- // We want to verify that RecordJSReturnSite gets called on all paths
- // through this function. Avoid early returns.
- expr->return_is_recorded_ = false;
-#endif
-
- Comment cmnt(masm_, "[ Call");
- Expression* callee = expr->expression();
- VariableProxy* proxy = callee->AsVariableProxy();
- Property* property = callee->AsProperty();
-
- if (proxy != NULL && proxy->var()->is_possibly_eval(isolate())) {
- // In a call to eval, we first call %ResolvePossiblyDirectEval to
- // resolve the function we need to call and the receiver of the
- // call. Then we call the resolved function using the given
- // arguments.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
-
- { PreservePositionScope pos_scope(masm()->positions_recorder());
- VisitForStackValue(callee);
- __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
- __ push(r2); // Reserved receiver slot.
-
- // Push the arguments.
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
-
- // Push a copy of the function (found below the arguments) and
- // resolve eval.
- __ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
- __ push(r1);
- EmitResolvePossiblyDirectEval(arg_count);
-
- // The runtime call returns a pair of values in r0 (function) and
- // r1 (receiver). Touch up the stack with the right values.
- __ str(r0, MemOperand(sp, (arg_count + 1) * kPointerSize));
- __ str(r1, MemOperand(sp, arg_count * kPointerSize));
- }
-
- // Record source position for debugger.
- SetSourcePosition(expr->position());
- CallFunctionStub stub(arg_count, RECEIVER_MIGHT_BE_IMPLICIT);
- __ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
- __ CallStub(&stub);
- RecordJSReturnSite(expr);
- // Restore context register.
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- context()->DropAndPlug(1, r0);
- } else if (proxy != NULL && proxy->var()->IsUnallocated()) {
- // Push global object as receiver for the call IC.
- __ ldr(r0, proxy->var()->is_qml_global()
- ? QmlGlobalObjectOperand()
- : GlobalObjectOperand());
- __ push(r0);
- EmitCallWithIC(expr, proxy->name(), RelocInfo::CODE_TARGET_CONTEXT);
- } else if (proxy != NULL && proxy->var()->IsLookupSlot()) {
- // Call to a lookup slot (dynamically introduced variable).
- Label slow, done;
-
- { PreservePositionScope scope(masm()->positions_recorder());
- // Generate code for loading from variables potentially shadowed
- // by eval-introduced variables.
- EmitDynamicLookupFastCase(proxy->var(), NOT_INSIDE_TYPEOF, &slow, &done);
- }
-
- __ bind(&slow);
- // Call the runtime to find the function to call (returned in r0)
- // and the object holding it (returned in edx).
- __ push(context_register());
- __ mov(r2, Operand(proxy->name()));
- __ push(r2);
- __ CallRuntime(Runtime::kLoadContextSlot, 2);
- __ Push(r0, r1); // Function, receiver.
-
- // If fast case code has been generated, emit code to push the
- // function and receiver and have the slow path jump around this
- // code.
- if (done.is_linked()) {
- Label call;
- __ b(&call);
- __ bind(&done);
- // Push function.
- __ push(r0);
- // The receiver is implicitly the global receiver. Indicate this
- // by passing the hole to the call function stub.
- __ LoadRoot(r1, Heap::kTheHoleValueRootIndex);
- __ push(r1);
- __ bind(&call);
- }
-
- // The receiver is either the global receiver or an object found
- // by LoadContextSlot. That object could be the hole if the
- // receiver is implicitly the global object.
- EmitCallWithStub(expr, RECEIVER_MIGHT_BE_IMPLICIT);
- } else if (property != NULL) {
- { PreservePositionScope scope(masm()->positions_recorder());
- VisitForStackValue(property->obj());
- }
- if (property->key()->IsPropertyName()) {
- EmitCallWithIC(expr,
- property->key()->AsLiteral()->handle(),
- RelocInfo::CODE_TARGET);
- } else {
- EmitKeyedCallWithIC(expr, property->key());
- }
- } else {
- // Call to an arbitrary expression not handled specially above.
- { PreservePositionScope scope(masm()->positions_recorder());
- VisitForStackValue(callee);
- }
- // Load global receiver object.
- __ ldr(r1, GlobalObjectOperand());
- __ ldr(r1, FieldMemOperand(r1, GlobalObject::kGlobalReceiverOffset));
- __ push(r1);
- // Emit function call.
- EmitCallWithStub(expr, NO_CALL_FUNCTION_FLAGS);
- }
-
-#ifdef DEBUG
- // RecordJSReturnSite should have been called.
- ASSERT(expr->return_is_recorded_);
-#endif
-}
-
-
-void FullCodeGenerator::VisitCallNew(CallNew* expr) {
- Comment cmnt(masm_, "[ CallNew");
- // According to ECMA-262, section 11.2.2, page 44, the function
- // expression in new calls must be evaluated before the
- // arguments.
-
- // Push constructor on the stack. If it's not a function it's used as
- // receiver for CALL_NON_FUNCTION, otherwise the value on the stack is
- // ignored.
- VisitForStackValue(expr->expression());
-
- // Push the arguments ("left-to-right") on the stack.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
-
- // Call the construct call builtin that handles allocation and
- // constructor invocation.
- SetSourcePosition(expr->position());
-
- // Load function and argument count into r1 and r0.
- __ mov(r0, Operand(arg_count));
- __ ldr(r1, MemOperand(sp, arg_count * kPointerSize));
-
- // Record call targets in unoptimized code.
- Handle<Object> uninitialized =
- TypeFeedbackCells::UninitializedSentinel(isolate());
- Handle<JSGlobalPropertyCell> cell =
- isolate()->factory()->NewJSGlobalPropertyCell(uninitialized);
- RecordTypeFeedbackCell(expr->CallNewFeedbackId(), cell);
- __ mov(r2, Operand(cell));
-
- CallConstructStub stub(RECORD_CALL_TARGET);
- __ Call(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL);
- PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- __ tst(r0, Operand(kSmiTagMask));
- Split(eq, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsNonNegativeSmi(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- __ tst(r0, Operand(kSmiTagMask | 0x80000000));
- Split(eq, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsObject(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ JumpIfSmi(r0, if_false);
- __ LoadRoot(ip, Heap::kNullValueRootIndex);
- __ cmp(r0, ip);
- __ b(eq, if_true);
- __ ldr(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
- // Undetectable objects behave like undefined when tested with typeof.
- __ ldrb(r1, FieldMemOperand(r2, Map::kBitFieldOffset));
- __ tst(r1, Operand(1 << Map::kIsUndetectable));
- __ b(ne, if_false);
- __ ldrb(r1, FieldMemOperand(r2, Map::kInstanceTypeOffset));
- __ cmp(r1, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
- __ b(lt, if_false);
- __ cmp(r1, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(le, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsSpecObject(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ JumpIfSmi(r0, if_false);
- __ CompareObjectType(r0, r1, r1, FIRST_SPEC_OBJECT_TYPE);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(ge, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsUndetectableObject(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ JumpIfSmi(r0, if_false);
- __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ ldrb(r1, FieldMemOperand(r1, Map::kBitFieldOffset));
- __ tst(r1, Operand(1 << Map::kIsUndetectable));
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(ne, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
- CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ AssertNotSmi(r0);
-
- __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ ldrb(ip, FieldMemOperand(r1, Map::kBitField2Offset));
- __ tst(ip, Operand(1 << Map::kStringWrapperSafeForDefaultValueOf));
- __ b(ne, if_true);
-
- // Check for fast case object. Generate false result for slow case object.
- __ ldr(r2, FieldMemOperand(r0, JSObject::kPropertiesOffset));
- __ ldr(r2, FieldMemOperand(r2, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
- __ cmp(r2, ip);
- __ b(eq, if_false);
-
- // Look for valueOf name in the descriptor array, and indicate false if
- // found. Since we omit an enumeration index check, if it is added via a
- // transition that shares its descriptor array, this is a false positive.
- Label entry, loop, done;
-
- // Skip loop if no descriptors are valid.
- __ NumberOfOwnDescriptors(r3, r1);
- __ cmp(r3, Operand::Zero());
- __ b(eq, &done);
-
- __ LoadInstanceDescriptors(r1, r4);
- // r4: descriptor array.
- // r3: valid entries in the descriptor array.
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize == 1);
- STATIC_ASSERT(kPointerSize == 4);
- __ mov(ip, Operand(DescriptorArray::kDescriptorSize));
- __ mul(r3, r3, ip);
- // Calculate location of the first key name.
- __ add(r4, r4, Operand(DescriptorArray::kFirstOffset - kHeapObjectTag));
- // Calculate the end of the descriptor array.
- __ mov(r2, r4);
- __ add(r2, r2, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
-
- // Loop through all the keys in the descriptor array. If one of these is the
- // string "valueOf" the result is false.
- // The use of ip to store the valueOf string assumes that it is not otherwise
- // used in the loop below.
- __ mov(ip, Operand(FACTORY->value_of_string()));
- __ jmp(&entry);
- __ bind(&loop);
- __ ldr(r3, MemOperand(r4, 0));
- __ cmp(r3, ip);
- __ b(eq, if_false);
- __ add(r4, r4, Operand(DescriptorArray::kDescriptorSize * kPointerSize));
- __ bind(&entry);
- __ cmp(r4, Operand(r2));
- __ b(ne, &loop);
-
- __ bind(&done);
- // If a valueOf property is not found on the object check that its
- // prototype is the un-modified String prototype. If not result is false.
- __ ldr(r2, FieldMemOperand(r1, Map::kPrototypeOffset));
- __ JumpIfSmi(r2, if_false);
- __ ldr(r2, FieldMemOperand(r2, HeapObject::kMapOffset));
- __ ldr(r3, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
- __ ldr(r3, FieldMemOperand(r3, GlobalObject::kNativeContextOffset));
- __ ldr(r3, ContextOperand(r3, Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX));
- __ cmp(r2, r3);
- __ b(ne, if_false);
-
- // Set the bit in the map to indicate that it has been checked safe for
- // default valueOf and set true result.
- __ ldrb(r2, FieldMemOperand(r1, Map::kBitField2Offset));
- __ orr(r2, r2, Operand(1 << Map::kStringWrapperSafeForDefaultValueOf));
- __ strb(r2, FieldMemOperand(r1, Map::kBitField2Offset));
- __ jmp(if_true);
-
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsSymbol(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ JumpIfSmi(r0, if_false);
- __ CompareObjectType(r0, r1, r2, SYMBOL_TYPE);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(eq, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ JumpIfSmi(r0, if_false);
- __ CompareObjectType(r0, r1, r2, JS_FUNCTION_TYPE);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(eq, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsArray(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ JumpIfSmi(r0, if_false);
- __ CompareObjectType(r0, r1, r1, JS_ARRAY_TYPE);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(eq, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsRegExp(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ JumpIfSmi(r0, if_false);
- __ CompareObjectType(r0, r1, r1, JS_REGEXP_TYPE);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(eq, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-
-void FullCodeGenerator::EmitIsConstructCall(CallRuntime* expr) {
- ASSERT(expr->arguments()->length() == 0);
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- // Get the frame pointer for the calling frame.
- __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
-
- // Skip the arguments adaptor frame if it exists.
- Label check_frame_marker;
- __ ldr(r1, MemOperand(r2, StandardFrameConstants::kContextOffset));
- __ cmp(r1, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ b(ne, &check_frame_marker);
- __ ldr(r2, MemOperand(r2, StandardFrameConstants::kCallerFPOffset));
-
- // Check the marker in the calling frame.
- __ bind(&check_frame_marker);
- __ ldr(r1, MemOperand(r2, StandardFrameConstants::kMarkerOffset));
- __ cmp(r1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(eq, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitObjectEquals(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 2);
-
- // Load the two objects into registers and perform the comparison.
- VisitForStackValue(args->at(0));
- VisitForAccumulatorValue(args->at(1));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ pop(r1);
- __ cmp(r0, r1);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(eq, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitArguments(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
-
- // ArgumentsAccessStub expects the key in edx and the formal
- // parameter count in r0.
- VisitForAccumulatorValue(args->at(0));
- __ mov(r1, r0);
- __ mov(r0, Operand(Smi::FromInt(info_->scope()->num_parameters())));
- ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
- __ CallStub(&stub);
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitArgumentsLength(CallRuntime* expr) {
- ASSERT(expr->arguments()->length() == 0);
- Label exit;
- // Get the number of formal parameters.
- __ mov(r0, Operand(Smi::FromInt(info_->scope()->num_parameters())));
-
- // Check if the calling frame is an arguments adaptor frame.
- __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
- __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ b(ne, &exit);
-
- // Arguments adaptor case: Read the arguments length from the
- // adaptor frame.
- __ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
-
- __ bind(&exit);
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
- Label done, null, function, non_function_constructor;
-
- VisitForAccumulatorValue(args->at(0));
-
- // If the object is a smi, we return null.
- __ JumpIfSmi(r0, &null);
-
- // Check that the object is a JS object but take special care of JS
- // functions to make sure they have 'Function' as their class.
- // Assume that there are only two callable types, and one of them is at
- // either end of the type range for JS object types. Saves extra comparisons.
- STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
- __ CompareObjectType(r0, r0, r1, FIRST_SPEC_OBJECT_TYPE);
- // Map is now in r0.
- __ b(lt, &null);
- STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
- FIRST_SPEC_OBJECT_TYPE + 1);
- __ b(eq, &function);
-
- __ cmp(r1, Operand(LAST_SPEC_OBJECT_TYPE));
- STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
- LAST_SPEC_OBJECT_TYPE - 1);
- __ b(eq, &function);
- // Assume that there is no larger type.
- STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == LAST_TYPE - 1);
-
- // Check if the constructor in the map is a JS function.
- __ ldr(r0, FieldMemOperand(r0, Map::kConstructorOffset));
- __ CompareObjectType(r0, r1, r1, JS_FUNCTION_TYPE);
- __ b(ne, &non_function_constructor);
-
- // r0 now contains the constructor function. Grab the
- // instance class name from there.
- __ ldr(r0, FieldMemOperand(r0, JSFunction::kSharedFunctionInfoOffset));
- __ ldr(r0, FieldMemOperand(r0, SharedFunctionInfo::kInstanceClassNameOffset));
- __ b(&done);
-
- // Functions have class 'Function'.
- __ bind(&function);
- __ LoadRoot(r0, Heap::kfunction_class_stringRootIndex);
- __ jmp(&done);
-
- // Objects with a non-function constructor have class 'Object'.
- __ bind(&non_function_constructor);
- __ LoadRoot(r0, Heap::kObject_stringRootIndex);
- __ jmp(&done);
-
- // Non-JS objects have class null.
- __ bind(&null);
- __ LoadRoot(r0, Heap::kNullValueRootIndex);
-
- // All done.
- __ bind(&done);
-
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitLog(CallRuntime* expr) {
- // Conditionally generate a log call.
- // Args:
- // 0 (literal string): The type of logging (corresponds to the flags).
- // This is used to determine whether or not to generate the log call.
- // 1 (string): Format string. Access the string at argument index 2
- // with '%2s' (see Logger::LogRuntime for all the formats).
- // 2 (array): Arguments to the format string.
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT_EQ(args->length(), 3);
- if (CodeGenerator::ShouldGenerateLog(args->at(0))) {
- VisitForStackValue(args->at(1));
- VisitForStackValue(args->at(2));
- __ CallRuntime(Runtime::kLog, 2);
- }
-
- // Finally, we're expected to leave a value on the top of the stack.
- __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitRandomHeapNumber(CallRuntime* expr) {
- ASSERT(expr->arguments()->length() == 0);
- Label slow_allocate_heapnumber;
- Label heapnumber_allocated;
-
- __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(r4, r1, r2, r6, &slow_allocate_heapnumber);
- __ jmp(&heapnumber_allocated);
-
- __ bind(&slow_allocate_heapnumber);
- // Allocate a heap number.
- __ CallRuntime(Runtime::kNumberAlloc, 0);
- __ mov(r4, Operand(r0));
-
- __ bind(&heapnumber_allocated);
-
- // Convert 32 random bits in r0 to 0.(32 random bits) in a double
- // by computing:
- // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
- if (CpuFeatures::IsSupported(VFP2)) {
- __ PrepareCallCFunction(1, r0);
- __ ldr(r0,
- ContextOperand(context_register(), Context::GLOBAL_OBJECT_INDEX));
- __ ldr(r0, FieldMemOperand(r0, GlobalObject::kNativeContextOffset));
- __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
-
- CpuFeatures::Scope scope(VFP2);
- // 0x41300000 is the top half of 1.0 x 2^20 as a double.
- // Create this constant using mov/orr to avoid PC relative load.
- __ mov(r1, Operand(0x41000000));
- __ orr(r1, r1, Operand(0x300000));
- // Move 0x41300000xxxxxxxx (x = random bits) to VFP.
- __ vmov(d7, r0, r1);
- // Move 0x4130000000000000 to VFP.
- __ mov(r0, Operand::Zero());
- __ vmov(d8, r0, r1);
- // Subtract and store the result in the heap number.
- __ vsub(d7, d7, d8);
- __ sub(r0, r4, Operand(kHeapObjectTag));
- __ vstr(d7, r0, HeapNumber::kValueOffset);
- __ mov(r0, r4);
- } else {
- __ PrepareCallCFunction(2, r0);
- __ ldr(r1,
- ContextOperand(context_register(), Context::GLOBAL_OBJECT_INDEX));
- __ mov(r0, Operand(r4));
- __ ldr(r1, FieldMemOperand(r1, GlobalObject::kNativeContextOffset));
- __ CallCFunction(
- ExternalReference::fill_heap_number_with_random_function(isolate()), 2);
- }
-
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitSubString(CallRuntime* expr) {
- // Load the arguments on the stack and call the stub.
- SubStringStub stub;
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 3);
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
- VisitForStackValue(args->at(2));
- __ CallStub(&stub);
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitRegExpExec(CallRuntime* expr) {
- // Load the arguments on the stack and call the stub.
- RegExpExecStub stub;
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 4);
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
- VisitForStackValue(args->at(2));
- VisitForStackValue(args->at(3));
- __ CallStub(&stub);
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitValueOf(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
- VisitForAccumulatorValue(args->at(0)); // Load the object.
-
- Label done;
- // If the object is a smi return the object.
- __ JumpIfSmi(r0, &done);
- // If the object is not a value type, return the object.
- __ CompareObjectType(r0, r1, r1, JS_VALUE_TYPE);
- __ b(ne, &done);
- __ ldr(r0, FieldMemOperand(r0, JSValue::kValueOffset));
-
- __ bind(&done);
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 2);
- ASSERT_NE(NULL, args->at(1)->AsLiteral());
- Smi* index = Smi::cast(*(args->at(1)->AsLiteral()->handle()));
-
- VisitForAccumulatorValue(args->at(0)); // Load the object.
-
- Label runtime, done, not_date_object;
- Register object = r0;
- Register result = r0;
- Register scratch0 = r9;
- Register scratch1 = r1;
-
- __ JumpIfSmi(object, &not_date_object);
- __ CompareObjectType(object, scratch1, scratch1, JS_DATE_TYPE);
- __ b(ne, &not_date_object);
-
- if (index->value() == 0) {
- __ ldr(result, FieldMemOperand(object, JSDate::kValueOffset));
- __ jmp(&done);
- } else {
- if (index->value() < JSDate::kFirstUncachedField) {
- ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
- __ mov(scratch1, Operand(stamp));
- __ ldr(scratch1, MemOperand(scratch1));
- __ ldr(scratch0, FieldMemOperand(object, JSDate::kCacheStampOffset));
- __ cmp(scratch1, scratch0);
- __ b(ne, &runtime);
- __ ldr(result, FieldMemOperand(object, JSDate::kValueOffset +
- kPointerSize * index->value()));
- __ jmp(&done);
- }
- __ bind(&runtime);
- __ PrepareCallCFunction(2, scratch1);
- __ mov(r1, Operand(index));
- __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
- __ jmp(&done);
- }
-
- __ bind(&not_date_object);
- __ CallRuntime(Runtime::kThrowNotDateError, 0);
- __ bind(&done);
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT_EQ(3, args->length());
-
- VisitForStackValue(args->at(1)); // index
- VisitForStackValue(args->at(2)); // value
- __ pop(r2);
- __ pop(r1);
- VisitForAccumulatorValue(args->at(0)); // string
-
- static const String::Encoding encoding = String::ONE_BYTE_ENCODING;
- SeqStringSetCharGenerator::Generate(masm_, encoding, r0, r1, r2);
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT_EQ(3, args->length());
-
- VisitForStackValue(args->at(1)); // index
- VisitForStackValue(args->at(2)); // value
- __ pop(r2);
- __ pop(r1);
- VisitForAccumulatorValue(args->at(0)); // string
-
- static const String::Encoding encoding = String::TWO_BYTE_ENCODING;
- SeqStringSetCharGenerator::Generate(masm_, encoding, r0, r1, r2);
- context()->Plug(r0);
-}
-
-
-
-void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
- // Load the arguments on the stack and call the runtime function.
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 2);
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
- if (CpuFeatures::IsSupported(VFP2)) {
- MathPowStub stub(MathPowStub::ON_STACK);
- __ CallStub(&stub);
- } else {
- __ CallRuntime(Runtime::kMath_pow, 2);
- }
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitSetValueOf(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 2);
- VisitForStackValue(args->at(0)); // Load the object.
- VisitForAccumulatorValue(args->at(1)); // Load the value.
- __ pop(r1); // r0 = value. r1 = object.
-
- Label done;
- // If the object is a smi, return the value.
- __ JumpIfSmi(r1, &done);
-
- // If the object is not a value type, return the value.
- __ CompareObjectType(r1, r2, r2, JS_VALUE_TYPE);
- __ b(ne, &done);
-
- // Store the value.
- __ str(r0, FieldMemOperand(r1, JSValue::kValueOffset));
- // Update the write barrier. Save the value as it will be
- // overwritten by the write barrier code and is needed afterward.
- __ mov(r2, r0);
- __ RecordWriteField(
- r1, JSValue::kValueOffset, r2, r3, kLRHasBeenSaved, kDontSaveFPRegs);
-
- __ bind(&done);
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT_EQ(args->length(), 1);
- // Load the argument on the stack and call the stub.
- VisitForStackValue(args->at(0));
-
- NumberToStringStub stub;
- __ CallStub(&stub);
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
- VisitForAccumulatorValue(args->at(0));
-
- Label done;
- StringCharFromCodeGenerator generator(r0, r1);
- generator.GenerateFast(masm_);
- __ jmp(&done);
-
- NopRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm_, call_helper);
-
- __ bind(&done);
- context()->Plug(r1);
-}
-
-
-void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 2);
- VisitForStackValue(args->at(0));
- VisitForAccumulatorValue(args->at(1));
-
- Register object = r1;
- Register index = r0;
- Register result = r3;
-
- __ pop(object);
-
- Label need_conversion;
- Label index_out_of_range;
- Label done;
- StringCharCodeAtGenerator generator(object,
- index,
- result,
- &need_conversion,
- &need_conversion,
- &index_out_of_range,
- STRING_INDEX_IS_NUMBER);
- generator.GenerateFast(masm_);
- __ jmp(&done);
-
- __ bind(&index_out_of_range);
- // When the index is out of range, the spec requires us to return
- // NaN.
- __ LoadRoot(result, Heap::kNanValueRootIndex);
- __ jmp(&done);
-
- __ bind(&need_conversion);
- // Load the undefined value into the result register, which will
- // trigger conversion.
- __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
- __ jmp(&done);
-
- NopRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm_, call_helper);
-
- __ bind(&done);
- context()->Plug(result);
-}
-
-
-void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 2);
- VisitForStackValue(args->at(0));
- VisitForAccumulatorValue(args->at(1));
-
- Register object = r1;
- Register index = r0;
- Register scratch = r3;
- Register result = r0;
-
- __ pop(object);
-
- Label need_conversion;
- Label index_out_of_range;
- Label done;
- StringCharAtGenerator generator(object,
- index,
- scratch,
- result,
- &need_conversion,
- &need_conversion,
- &index_out_of_range,
- STRING_INDEX_IS_NUMBER);
- generator.GenerateFast(masm_);
- __ jmp(&done);
-
- __ bind(&index_out_of_range);
- // When the index is out of range, the spec requires us to return
- // the empty string.
- __ LoadRoot(result, Heap::kempty_stringRootIndex);
- __ jmp(&done);
-
- __ bind(&need_conversion);
- // Move smi zero into the result register, which will trigger
- // conversion.
- __ mov(result, Operand(Smi::FromInt(0)));
- __ jmp(&done);
-
- NopRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm_, call_helper);
-
- __ bind(&done);
- context()->Plug(result);
-}
-
-
-void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT_EQ(2, args->length());
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
-
- StringAddStub stub(NO_STRING_ADD_FLAGS);
- __ CallStub(&stub);
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitStringCompare(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT_EQ(2, args->length());
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
-
- StringCompareStub stub;
- __ CallStub(&stub);
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitMathSin(CallRuntime* expr) {
- // Load the argument on the stack and call the stub.
- TranscendentalCacheStub stub(TranscendentalCache::SIN,
- TranscendentalCacheStub::TAGGED);
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
- VisitForStackValue(args->at(0));
- __ CallStub(&stub);
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitMathCos(CallRuntime* expr) {
- // Load the argument on the stack and call the stub.
- TranscendentalCacheStub stub(TranscendentalCache::COS,
- TranscendentalCacheStub::TAGGED);
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
- VisitForStackValue(args->at(0));
- __ CallStub(&stub);
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitMathTan(CallRuntime* expr) {
- // Load the argument on the stack and call the stub.
- TranscendentalCacheStub stub(TranscendentalCache::TAN,
- TranscendentalCacheStub::TAGGED);
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
- VisitForStackValue(args->at(0));
- __ CallStub(&stub);
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitMathLog(CallRuntime* expr) {
- // Load the argument on the stack and call the stub.
- TranscendentalCacheStub stub(TranscendentalCache::LOG,
- TranscendentalCacheStub::TAGGED);
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
- VisitForStackValue(args->at(0));
- __ CallStub(&stub);
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitMathSqrt(CallRuntime* expr) {
- // Load the argument on the stack and call the runtime function.
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
- VisitForStackValue(args->at(0));
- __ CallRuntime(Runtime::kMath_sqrt, 1);
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() >= 2);
-
- int arg_count = args->length() - 2; // 2 ~ receiver and function.
- for (int i = 0; i < arg_count + 1; i++) {
- VisitForStackValue(args->at(i));
- }
- VisitForAccumulatorValue(args->last()); // Function.
-
- Label runtime, done;
- // Check for non-function argument (including proxy).
- __ JumpIfSmi(r0, &runtime);
- __ CompareObjectType(r0, r1, r1, JS_FUNCTION_TYPE);
- __ b(ne, &runtime);
-
- // InvokeFunction requires the function in r1. Move it in there.
- __ mov(r1, result_register());
- ParameterCount count(arg_count);
- __ InvokeFunction(r1, count, CALL_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ jmp(&done);
-
- __ bind(&runtime);
- __ push(r0);
- __ CallRuntime(Runtime::kCall, args->length());
- __ bind(&done);
-
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
- RegExpConstructResultStub stub;
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 3);
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
- VisitForStackValue(args->at(2));
- __ CallStub(&stub);
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT_EQ(2, args->length());
- ASSERT_NE(NULL, args->at(0)->AsLiteral());
- int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->handle()))->value();
-
- Handle<FixedArray> jsfunction_result_caches(
- isolate()->native_context()->jsfunction_result_caches());
- if (jsfunction_result_caches->length() <= cache_id) {
- __ Abort("Attempt to use undefined cache.");
- __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
- context()->Plug(r0);
- return;
- }
-
- VisitForAccumulatorValue(args->at(1));
-
- Register key = r0;
- Register cache = r1;
- __ ldr(cache, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
- __ ldr(cache, FieldMemOperand(cache, GlobalObject::kNativeContextOffset));
- __ ldr(cache, ContextOperand(cache, Context::JSFUNCTION_RESULT_CACHES_INDEX));
- __ ldr(cache,
- FieldMemOperand(cache, FixedArray::OffsetOfElementAt(cache_id)));
-
-
- Label done, not_found;
- // tmp now holds finger offset as a smi.
- STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
- __ ldr(r2, FieldMemOperand(cache, JSFunctionResultCache::kFingerOffset));
- // r2 now holds finger offset as a smi.
- __ add(r3, cache, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- // r3 now points to the start of fixed array elements.
- __ ldr(r2, MemOperand(r3, r2, LSL, kPointerSizeLog2 - kSmiTagSize, PreIndex));
- // Note side effect of PreIndex: r3 now points to the key of the pair.
- __ cmp(key, r2);
- __ b(ne, &not_found);
-
- __ ldr(r0, MemOperand(r3, kPointerSize));
- __ b(&done);
-
- __ bind(&not_found);
- // Call runtime to perform the lookup.
- __ Push(cache, key);
- __ CallRuntime(Runtime::kGetFromCache, 2);
-
- __ bind(&done);
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitIsRegExpEquivalent(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT_EQ(2, args->length());
-
- Register right = r0;
- Register left = r1;
- Register tmp = r2;
- Register tmp2 = r3;
-
- VisitForStackValue(args->at(0));
- VisitForAccumulatorValue(args->at(1));
- __ pop(left);
-
- Label done, fail, ok;
- __ cmp(left, Operand(right));
- __ b(eq, &ok);
- // Fail if either is a non-HeapObject.
- __ and_(tmp, left, Operand(right));
- __ JumpIfSmi(tmp, &fail);
- __ ldr(tmp, FieldMemOperand(left, HeapObject::kMapOffset));
- __ ldrb(tmp2, FieldMemOperand(tmp, Map::kInstanceTypeOffset));
- __ cmp(tmp2, Operand(JS_REGEXP_TYPE));
- __ b(ne, &fail);
- __ ldr(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
- __ cmp(tmp, Operand(tmp2));
- __ b(ne, &fail);
- __ ldr(tmp, FieldMemOperand(left, JSRegExp::kDataOffset));
- __ ldr(tmp2, FieldMemOperand(right, JSRegExp::kDataOffset));
- __ cmp(tmp, tmp2);
- __ b(eq, &ok);
- __ bind(&fail);
- __ LoadRoot(r0, Heap::kFalseValueRootIndex);
- __ jmp(&done);
- __ bind(&ok);
- __ LoadRoot(r0, Heap::kTrueValueRootIndex);
- __ bind(&done);
-
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ ldr(r0, FieldMemOperand(r0, String::kHashFieldOffset));
- __ tst(r0, Operand(String::kContainsCachedArrayIndexMask));
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(eq, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
- VisitForAccumulatorValue(args->at(0));
-
- __ AssertString(r0);
-
- __ ldr(r0, FieldMemOperand(r0, String::kHashFieldOffset));
- __ IndexFromHash(r0, r0);
-
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
- Label bailout, done, one_char_separator, long_separator,
- non_trivial_array, not_size_one_array, loop,
- empty_separator_loop, one_char_separator_loop,
- one_char_separator_loop_entry, long_separator_loop;
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 2);
- VisitForStackValue(args->at(1));
- VisitForAccumulatorValue(args->at(0));
-
- // All aliases of the same register have disjoint lifetimes.
- Register array = r0;
- Register elements = no_reg; // Will be r0.
- Register result = no_reg; // Will be r0.
- Register separator = r1;
- Register array_length = r2;
- Register result_pos = no_reg; // Will be r2
- Register string_length = r3;
- Register string = r4;
- Register element = r5;
- Register elements_end = r6;
- Register scratch1 = r7;
- Register scratch2 = r9;
-
- // Separator operand is on the stack.
- __ pop(separator);
-
- // Check that the array is a JSArray.
- __ JumpIfSmi(array, &bailout);
- __ CompareObjectType(array, scratch1, scratch2, JS_ARRAY_TYPE);
- __ b(ne, &bailout);
-
- // Check that the array has fast elements.
- __ CheckFastElements(scratch1, scratch2, &bailout);
-
- // If the array has length zero, return the empty string.
- __ ldr(array_length, FieldMemOperand(array, JSArray::kLengthOffset));
- __ SmiUntag(array_length, SetCC);
- __ b(ne, &non_trivial_array);
- __ LoadRoot(r0, Heap::kempty_stringRootIndex);
- __ b(&done);
-
- __ bind(&non_trivial_array);
-
- // Get the FixedArray containing array's elements.
- elements = array;
- __ ldr(elements, FieldMemOperand(array, JSArray::kElementsOffset));
- array = no_reg; // End of array's live range.
-
- // Check that all array elements are sequential ASCII strings, and
- // accumulate the sum of their lengths, as a smi-encoded value.
- __ mov(string_length, Operand::Zero());
- __ add(element,
- elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ add(elements_end, element, Operand(array_length, LSL, kPointerSizeLog2));
- // Loop condition: while (element < elements_end).
- // Live values in registers:
- // elements: Fixed array of strings.
- // array_length: Length of the fixed array of strings (not smi)
- // separator: Separator string
- // string_length: Accumulated sum of string lengths (smi).
- // element: Current array element.
- // elements_end: Array end.
- if (generate_debug_code_) {
- __ cmp(array_length, Operand::Zero());
- __ Assert(gt, "No empty arrays here in EmitFastAsciiArrayJoin");
- }
- __ bind(&loop);
- __ ldr(string, MemOperand(element, kPointerSize, PostIndex));
- __ JumpIfSmi(string, &bailout);
- __ ldr(scratch1, FieldMemOperand(string, HeapObject::kMapOffset));
- __ ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
- __ JumpIfInstanceTypeIsNotSequentialAscii(scratch1, scratch2, &bailout);
- __ ldr(scratch1, FieldMemOperand(string, SeqOneByteString::kLengthOffset));
- __ add(string_length, string_length, Operand(scratch1), SetCC);
- __ b(vs, &bailout);
- __ cmp(element, elements_end);
- __ b(lt, &loop);
-
- // If array_length is 1, return elements[0], a string.
- __ cmp(array_length, Operand(1));
- __ b(ne, &not_size_one_array);
- __ ldr(r0, FieldMemOperand(elements, FixedArray::kHeaderSize));
- __ b(&done);
-
- __ bind(&not_size_one_array);
-
- // Live values in registers:
- // separator: Separator string
- // array_length: Length of the array.
- // string_length: Sum of string lengths (smi).
- // elements: FixedArray of strings.
-
- // Check that the separator is a flat ASCII string.
- __ JumpIfSmi(separator, &bailout);
- __ ldr(scratch1, FieldMemOperand(separator, HeapObject::kMapOffset));
- __ ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
- __ JumpIfInstanceTypeIsNotSequentialAscii(scratch1, scratch2, &bailout);
-
- // Add (separator length times array_length) - separator length to the
- // string_length to get the length of the result string. array_length is not
- // smi but the other values are, so the result is a smi
- __ ldr(scratch1, FieldMemOperand(separator, SeqOneByteString::kLengthOffset));
- __ sub(string_length, string_length, Operand(scratch1));
- __ smull(scratch2, ip, array_length, scratch1);
- // Check for smi overflow. No overflow if higher 33 bits of 64-bit result are
- // zero.
- __ cmp(ip, Operand::Zero());
- __ b(ne, &bailout);
- __ tst(scratch2, Operand(0x80000000));
- __ b(ne, &bailout);
- __ add(string_length, string_length, Operand(scratch2), SetCC);
- __ b(vs, &bailout);
- __ SmiUntag(string_length);
-
- // Get first element in the array to free up the elements register to be used
- // for the result.
- __ add(element,
- elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- result = elements; // End of live range for elements.
- elements = no_reg;
- // Live values in registers:
- // element: First array element
- // separator: Separator string
- // string_length: Length of result string (not smi)
- // array_length: Length of the array.
- __ AllocateAsciiString(result,
- string_length,
- scratch1,
- scratch2,
- elements_end,
- &bailout);
- // Prepare for looping. Set up elements_end to end of the array. Set
- // result_pos to the position of the result where to write the first
- // character.
- __ add(elements_end, element, Operand(array_length, LSL, kPointerSizeLog2));
- result_pos = array_length; // End of live range for array_length.
- array_length = no_reg;
- __ add(result_pos,
- result,
- Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
-
- // Check the length of the separator.
- __ ldr(scratch1, FieldMemOperand(separator, SeqOneByteString::kLengthOffset));
- __ cmp(scratch1, Operand(Smi::FromInt(1)));
- __ b(eq, &one_char_separator);
- __ b(gt, &long_separator);
-
- // Empty separator case
- __ bind(&empty_separator_loop);
- // Live values in registers:
- // result_pos: the position to which we are currently copying characters.
- // element: Current array element.
- // elements_end: Array end.
-
- // Copy next array element to the result.
- __ ldr(string, MemOperand(element, kPointerSize, PostIndex));
- __ ldr(string_length, FieldMemOperand(string, String::kLengthOffset));
- __ SmiUntag(string_length);
- __ add(string,
- string,
- Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
- __ CopyBytes(string, result_pos, string_length, scratch1);
- __ cmp(element, elements_end);
- __ b(lt, &empty_separator_loop); // End while (element < elements_end).
- ASSERT(result.is(r0));
- __ b(&done);
-
- // One-character separator case
- __ bind(&one_char_separator);
- // Replace separator with its ASCII character value.
- __ ldrb(separator, FieldMemOperand(separator, SeqOneByteString::kHeaderSize));
- // Jump into the loop after the code that copies the separator, so the first
- // element is not preceded by a separator
- __ jmp(&one_char_separator_loop_entry);
-
- __ bind(&one_char_separator_loop);
- // Live values in registers:
- // result_pos: the position to which we are currently copying characters.
- // element: Current array element.
- // elements_end: Array end.
- // separator: Single separator ASCII char (in lower byte).
-
- // Copy the separator character to the result.
- __ strb(separator, MemOperand(result_pos, 1, PostIndex));
-
- // Copy next array element to the result.
- __ bind(&one_char_separator_loop_entry);
- __ ldr(string, MemOperand(element, kPointerSize, PostIndex));
- __ ldr(string_length, FieldMemOperand(string, String::kLengthOffset));
- __ SmiUntag(string_length);
- __ add(string,
- string,
- Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
- __ CopyBytes(string, result_pos, string_length, scratch1);
- __ cmp(element, elements_end);
- __ b(lt, &one_char_separator_loop); // End while (element < elements_end).
- ASSERT(result.is(r0));
- __ b(&done);
-
- // Long separator case (separator is more than one character). Entry is at the
- // label long_separator below.
- __ bind(&long_separator_loop);
- // Live values in registers:
- // result_pos: the position to which we are currently copying characters.
- // element: Current array element.
- // elements_end: Array end.
- // separator: Separator string.
-
- // Copy the separator to the result.
- __ ldr(string_length, FieldMemOperand(separator, String::kLengthOffset));
- __ SmiUntag(string_length);
- __ add(string,
- separator,
- Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
- __ CopyBytes(string, result_pos, string_length, scratch1);
-
- __ bind(&long_separator);
- __ ldr(string, MemOperand(element, kPointerSize, PostIndex));
- __ ldr(string_length, FieldMemOperand(string, String::kLengthOffset));
- __ SmiUntag(string_length);
- __ add(string,
- string,
- Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
- __ CopyBytes(string, result_pos, string_length, scratch1);
- __ cmp(element, elements_end);
- __ b(lt, &long_separator_loop); // End while (element < elements_end).
- ASSERT(result.is(r0));
- __ b(&done);
-
- __ bind(&bailout);
- __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
- __ bind(&done);
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
- Handle<String> name = expr->name();
- if (name->length() > 0 && name->Get(0) == '_') {
- Comment cmnt(masm_, "[ InlineRuntimeCall");
- EmitInlineRuntimeCall(expr);
- return;
- }
-
- Comment cmnt(masm_, "[ CallRuntime");
- ZoneList<Expression*>* args = expr->arguments();
-
- if (expr->is_jsruntime()) {
- // Prepare for calling JS runtime function.
- __ ldr(r0, GlobalObjectOperand());
- __ ldr(r0, FieldMemOperand(r0, GlobalObject::kBuiltinsOffset));
- __ push(r0);
- }
-
- // Push the arguments ("left-to-right").
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
-
- if (expr->is_jsruntime()) {
- // Call the JS runtime function.
- __ mov(r2, Operand(expr->name()));
- RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
- Handle<Code> ic =
- isolate()->stub_cache()->ComputeCallInitialize(arg_count, mode);
- CallIC(ic, mode, expr->CallRuntimeFeedbackId());
- // Restore context register.
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- } else {
- // Call the C runtime function.
- __ CallRuntime(expr->function(), arg_count);
- }
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
- switch (expr->op()) {
- case Token::DELETE: {
- Comment cmnt(masm_, "[ UnaryOperation (DELETE)");
- Property* property = expr->expression()->AsProperty();
- VariableProxy* proxy = expr->expression()->AsVariableProxy();
-
- if (property != NULL) {
- VisitForStackValue(property->obj());
- VisitForStackValue(property->key());
- StrictModeFlag strict_mode_flag = (language_mode() == CLASSIC_MODE)
- ? kNonStrictMode : kStrictMode;
- __ mov(r1, Operand(Smi::FromInt(strict_mode_flag)));
- __ push(r1);
- __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
- context()->Plug(r0);
- } else if (proxy != NULL) {
- Variable* var = proxy->var();
- // Delete of an unqualified identifier is disallowed in strict mode
- // but "delete this" is allowed.
- ASSERT(language_mode() == CLASSIC_MODE || var->is_this());
- if (var->IsUnallocated()) {
- __ ldr(r2, var->is_qml_global()
- ? QmlGlobalObjectOperand()
- : GlobalObjectOperand());
- __ mov(r1, Operand(var->name()));
- __ mov(r0, Operand(Smi::FromInt(kNonStrictMode)));
- __ Push(r2, r1, r0);
- __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
- context()->Plug(r0);
- } else if (var->IsStackAllocated() || var->IsContextSlot()) {
- // Result of deleting non-global, non-dynamic variables is false.
- // The subexpression does not have side effects.
- context()->Plug(var->is_this());
- } else {
- // Non-global variable. Call the runtime to try to delete from the
- // context where the variable was introduced.
- __ push(context_register());
- __ mov(r2, Operand(var->name()));
- __ push(r2);
- __ CallRuntime(Runtime::kDeleteContextSlot, 2);
- context()->Plug(r0);
- }
- } else {
- // Result of deleting non-property, non-variable reference is true.
- // The subexpression may have side effects.
- VisitForEffect(expr->expression());
- context()->Plug(true);
- }
- break;
- }
-
- case Token::VOID: {
- Comment cmnt(masm_, "[ UnaryOperation (VOID)");
- VisitForEffect(expr->expression());
- context()->Plug(Heap::kUndefinedValueRootIndex);
- break;
- }
-
- case Token::NOT: {
- Comment cmnt(masm_, "[ UnaryOperation (NOT)");
- if (context()->IsEffect()) {
- // Unary NOT has no side effects so it's only necessary to visit the
- // subexpression. Match the optimizing compiler by not branching.
- VisitForEffect(expr->expression());
- } else if (context()->IsTest()) {
- const TestContext* test = TestContext::cast(context());
- // The labels are swapped for the recursive call.
- VisitForControl(expr->expression(),
- test->false_label(),
- test->true_label(),
- test->fall_through());
- context()->Plug(test->true_label(), test->false_label());
- } else {
- // We handle value contexts explicitly rather than simply visiting
- // for control and plugging the control flow into the context,
- // because we need to prepare a pair of extra administrative AST ids
- // for the optimizing compiler.
- ASSERT(context()->IsAccumulatorValue() || context()->IsStackValue());
- Label materialize_true, materialize_false, done;
- VisitForControl(expr->expression(),
- &materialize_false,
- &materialize_true,
- &materialize_true);
- __ bind(&materialize_true);
- PrepareForBailoutForId(expr->MaterializeTrueId(), NO_REGISTERS);
- __ LoadRoot(r0, Heap::kTrueValueRootIndex);
- if (context()->IsStackValue()) __ push(r0);
- __ jmp(&done);
- __ bind(&materialize_false);
- PrepareForBailoutForId(expr->MaterializeFalseId(), NO_REGISTERS);
- __ LoadRoot(r0, Heap::kFalseValueRootIndex);
- if (context()->IsStackValue()) __ push(r0);
- __ bind(&done);
- }
- break;
- }
-
- case Token::TYPEOF: {
- Comment cmnt(masm_, "[ UnaryOperation (TYPEOF)");
- { StackValueContext context(this);
- VisitForTypeofValue(expr->expression());
- }
- __ CallRuntime(Runtime::kTypeof, 1);
- context()->Plug(r0);
- break;
- }
-
- case Token::ADD: {
- Comment cmt(masm_, "[ UnaryOperation (ADD)");
- VisitForAccumulatorValue(expr->expression());
- Label no_conversion;
- __ JumpIfSmi(result_register(), &no_conversion);
- ToNumberStub convert_stub;
- __ CallStub(&convert_stub);
- __ bind(&no_conversion);
- context()->Plug(result_register());
- break;
- }
-
- case Token::SUB:
- EmitUnaryOperation(expr, "[ UnaryOperation (SUB)");
- break;
-
- case Token::BIT_NOT:
- EmitUnaryOperation(expr, "[ UnaryOperation (BIT_NOT)");
- break;
-
- default:
- UNREACHABLE();
- }
-}
-
-
-void FullCodeGenerator::EmitUnaryOperation(UnaryOperation* expr,
- const char* comment) {
- // TODO(svenpanne): Allowing format strings in Comment would be nice here...
- Comment cmt(masm_, comment);
- bool can_overwrite = expr->expression()->ResultOverwriteAllowed();
- UnaryOverwriteMode overwrite =
- can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
- UnaryOpStub stub(expr->op(), overwrite);
- // UnaryOpStub expects the argument to be in the
- // accumulator register r0.
- VisitForAccumulatorValue(expr->expression());
- SetSourcePosition(expr->position());
- CallIC(stub.GetCode(isolate()), RelocInfo::CODE_TARGET,
- expr->UnaryOperationFeedbackId());
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
- Comment cmnt(masm_, "[ CountOperation");
- SetSourcePosition(expr->position());
-
- // Invalid left-hand sides are rewritten to have a 'throw ReferenceError'
- // as the left-hand side.
- if (!expr->expression()->IsValidLeftHandSide()) {
- VisitForEffect(expr->expression());
- return;
- }
-
- // Expression can only be a property, a global or a (parameter or local)
- // slot.
- enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
- LhsKind assign_type = VARIABLE;
- Property* prop = expr->expression()->AsProperty();
- // In case of a property we use the uninitialized expression context
- // of the key to detect a named property.
- if (prop != NULL) {
- assign_type =
- (prop->key()->IsPropertyName()) ? NAMED_PROPERTY : KEYED_PROPERTY;
- }
-
- // Evaluate expression and get value.
- if (assign_type == VARIABLE) {
- ASSERT(expr->expression()->AsVariableProxy()->var() != NULL);
- AccumulatorValueContext context(this);
- EmitVariableLoad(expr->expression()->AsVariableProxy());
- } else {
- // Reserve space for result of postfix operation.
- if (expr->is_postfix() && !context()->IsEffect()) {
- __ mov(ip, Operand(Smi::FromInt(0)));
- __ push(ip);
- }
- if (assign_type == NAMED_PROPERTY) {
- // Put the object both on the stack and in the accumulator.
- VisitForAccumulatorValue(prop->obj());
- __ push(r0);
- EmitNamedPropertyLoad(prop);
- } else {
- VisitForStackValue(prop->obj());
- VisitForAccumulatorValue(prop->key());
- __ ldr(r1, MemOperand(sp, 0));
- __ push(r0);
- EmitKeyedPropertyLoad(prop);
- }
- }
-
- // We need a second deoptimization point after loading the value
- // in case evaluating the property load my have a side effect.
- if (assign_type == VARIABLE) {
- PrepareForBailout(expr->expression(), TOS_REG);
- } else {
- PrepareForBailoutForId(prop->LoadId(), TOS_REG);
- }
-
- // Call ToNumber only if operand is not a smi.
- Label no_conversion;
- __ JumpIfSmi(r0, &no_conversion);
- ToNumberStub convert_stub;
- __ CallStub(&convert_stub);
- __ bind(&no_conversion);
-
- // Save result for postfix expressions.
- if (expr->is_postfix()) {
- if (!context()->IsEffect()) {
- // Save the result on the stack. If we have a named or keyed property
- // we store the result under the receiver that is currently on top
- // of the stack.
- switch (assign_type) {
- case VARIABLE:
- __ push(r0);
- break;
- case NAMED_PROPERTY:
- __ str(r0, MemOperand(sp, kPointerSize));
- break;
- case KEYED_PROPERTY:
- __ str(r0, MemOperand(sp, 2 * kPointerSize));
- break;
- }
- }
- }
-
-
- // Inline smi case if we are in a loop.
- Label stub_call, done;
- JumpPatchSite patch_site(masm_);
-
- int count_value = expr->op() == Token::INC ? 1 : -1;
- if (ShouldInlineSmiCase(expr->op())) {
- __ add(r0, r0, Operand(Smi::FromInt(count_value)), SetCC);
- __ b(vs, &stub_call);
- // We could eliminate this smi check if we split the code at
- // the first smi check before calling ToNumber.
- patch_site.EmitJumpIfSmi(r0, &done);
-
- __ bind(&stub_call);
- // Call stub. Undo operation first.
- __ sub(r0, r0, Operand(Smi::FromInt(count_value)));
- }
- __ mov(r1, r0);
- __ mov(r0, Operand(Smi::FromInt(count_value)));
-
- // Record position before stub call.
- SetSourcePosition(expr->position());
-
- BinaryOpStub stub(Token::ADD, NO_OVERWRITE);
- CallIC(stub.GetCode(isolate()),
- RelocInfo::CODE_TARGET,
- expr->CountBinOpFeedbackId());
- patch_site.EmitPatchInfo();
- __ bind(&done);
-
- // Store the value returned in r0.
- switch (assign_type) {
- case VARIABLE:
- if (expr->is_postfix()) {
- { EffectContext context(this);
- EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
- Token::ASSIGN);
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
- context.Plug(r0);
- }
- // For all contexts except EffectConstant We have the result on
- // top of the stack.
- if (!context()->IsEffect()) {
- context()->PlugTOS();
- }
- } else {
- EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
- Token::ASSIGN);
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
- context()->Plug(r0);
- }
- break;
- case NAMED_PROPERTY: {
- __ mov(r2, Operand(prop->key()->AsLiteral()->handle()));
- __ pop(r1);
- Handle<Code> ic = is_classic_mode()
- ? isolate()->builtins()->StoreIC_Initialize()
- : isolate()->builtins()->StoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, expr->CountStoreFeedbackId());
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
- if (expr->is_postfix()) {
- if (!context()->IsEffect()) {
- context()->PlugTOS();
- }
- } else {
- context()->Plug(r0);
- }
- break;
- }
- case KEYED_PROPERTY: {
- __ pop(r1); // Key.
- __ pop(r2); // Receiver.
- Handle<Code> ic = is_classic_mode()
- ? isolate()->builtins()->KeyedStoreIC_Initialize()
- : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, expr->CountStoreFeedbackId());
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
- if (expr->is_postfix()) {
- if (!context()->IsEffect()) {
- context()->PlugTOS();
- }
- } else {
- context()->Plug(r0);
- }
- break;
- }
- }
-}
-
-
-void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
- ASSERT(!context()->IsEffect());
- ASSERT(!context()->IsTest());
- VariableProxy* proxy = expr->AsVariableProxy();
- if (proxy != NULL && proxy->var()->IsUnallocated()) {
- Comment cmnt(masm_, "Global variable");
- __ ldr(r0, proxy->var()->is_qml_global()
- ? QmlGlobalObjectOperand()
- : GlobalObjectOperand());
- __ mov(r2, Operand(proxy->name()));
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- // Use a regular load, not a contextual load, to avoid a reference
- // error.
- CallIC(ic);
- PrepareForBailout(expr, TOS_REG);
- context()->Plug(r0);
- } else if (proxy != NULL && proxy->var()->IsLookupSlot()) {
- Label done, slow;
-
- // Generate code for loading from variables potentially shadowed
- // by eval-introduced variables.
- EmitDynamicLookupFastCase(proxy->var(), INSIDE_TYPEOF, &slow, &done);
-
- __ bind(&slow);
- __ mov(r0, Operand(proxy->name()));
- __ Push(cp, r0);
- __ CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
- PrepareForBailout(expr, TOS_REG);
- __ bind(&done);
-
- context()->Plug(r0);
- } else {
- // This expression cannot throw a reference error at the top level.
- VisitInDuplicateContext(expr);
- }
-}
-
-
-void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
- Expression* sub_expr,
- Handle<String> check) {
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- { AccumulatorValueContext context(this);
- VisitForTypeofValue(sub_expr);
- }
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
-
- if (check->Equals(isolate()->heap()->number_string())) {
- __ JumpIfSmi(r0, if_true);
- __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
- __ cmp(r0, ip);
- Split(eq, if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->string_string())) {
- __ JumpIfSmi(r0, if_false);
- // Check for undetectable objects => false.
- __ CompareObjectType(r0, r0, r1, FIRST_NONSTRING_TYPE);
- __ b(ge, if_false);
- __ ldrb(r1, FieldMemOperand(r0, Map::kBitFieldOffset));
- __ tst(r1, Operand(1 << Map::kIsUndetectable));
- Split(eq, if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->boolean_string())) {
- __ CompareRoot(r0, Heap::kTrueValueRootIndex);
- __ b(eq, if_true);
- __ CompareRoot(r0, Heap::kFalseValueRootIndex);
- Split(eq, if_true, if_false, fall_through);
- } else if (FLAG_harmony_typeof &&
- check->Equals(isolate()->heap()->null_string())) {
- __ CompareRoot(r0, Heap::kNullValueRootIndex);
- Split(eq, if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->undefined_string())) {
- __ CompareRoot(r0, Heap::kUndefinedValueRootIndex);
- __ b(eq, if_true);
- __ JumpIfSmi(r0, if_false);
- // Check for undetectable objects => true.
- __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ ldrb(r1, FieldMemOperand(r0, Map::kBitFieldOffset));
- __ tst(r1, Operand(1 << Map::kIsUndetectable));
- Split(ne, if_true, if_false, fall_through);
-
- } else if (check->Equals(isolate()->heap()->function_string())) {
- __ JumpIfSmi(r0, if_false);
- STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
- __ CompareObjectType(r0, r0, r1, JS_FUNCTION_TYPE);
- __ b(eq, if_true);
- __ cmp(r1, Operand(JS_FUNCTION_PROXY_TYPE));
- Split(eq, if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->object_string())) {
- __ JumpIfSmi(r0, if_false);
- if (!FLAG_harmony_typeof) {
- __ CompareRoot(r0, Heap::kNullValueRootIndex);
- __ b(eq, if_true);
- }
- if (FLAG_harmony_symbols) {
- __ CompareObjectType(r0, r0, r1, SYMBOL_TYPE);
- __ b(eq, if_true);
- }
- // Check for JS objects => true.
- __ CompareObjectType(r0, r0, r1, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
- __ b(lt, if_false);
- __ CompareInstanceType(r0, r1, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
- __ b(gt, if_false);
- // Check for undetectable objects => false.
- __ ldrb(r1, FieldMemOperand(r0, Map::kBitFieldOffset));
- __ tst(r1, Operand(1 << Map::kIsUndetectable));
- Split(eq, if_true, if_false, fall_through);
- } else {
- if (if_false != fall_through) __ jmp(if_false);
- }
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
- Comment cmnt(masm_, "[ CompareOperation");
- SetSourcePosition(expr->position());
-
- // First we try a fast inlined version of the compare when one of
- // the operands is a literal.
- if (TryLiteralCompare(expr)) return;
-
- // Always perform the comparison for its control flow. Pack the result
- // into the expression's context after the comparison is performed.
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- Token::Value op = expr->op();
- VisitForStackValue(expr->left());
- switch (op) {
- case Token::IN:
- VisitForStackValue(expr->right());
- __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION);
- PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
- __ LoadRoot(ip, Heap::kTrueValueRootIndex);
- __ cmp(r0, ip);
- Split(eq, if_true, if_false, fall_through);
- break;
-
- case Token::INSTANCEOF: {
- VisitForStackValue(expr->right());
- InstanceofStub stub(InstanceofStub::kNoFlags);
- __ CallStub(&stub);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- // The stub returns 0 for true.
- __ tst(r0, r0);
- Split(eq, if_true, if_false, fall_through);
- break;
- }
-
- default: {
- VisitForAccumulatorValue(expr->right());
- Condition cond = CompareIC::ComputeCondition(op);
- __ pop(r1);
-
- bool inline_smi_code = ShouldInlineSmiCase(op);
- JumpPatchSite patch_site(masm_);
- if (inline_smi_code) {
- Label slow_case;
- __ orr(r2, r0, Operand(r1));
- patch_site.EmitJumpIfNotSmi(r2, &slow_case);
- __ cmp(r1, r0);
- Split(cond, if_true, if_false, NULL);
- __ bind(&slow_case);
- }
-
- // Record position and call the compare IC.
- SetSourcePosition(expr->position());
- Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
- CallIC(ic, RelocInfo::CODE_TARGET, expr->CompareOperationFeedbackId());
- patch_site.EmitPatchInfo();
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- __ cmp(r0, Operand::Zero());
- Split(cond, if_true, if_false, fall_through);
- }
- }
-
- // Convert the result of the comparison into one expected for this
- // expression's context.
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
- Expression* sub_expr,
- NilValue nil) {
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- VisitForAccumulatorValue(sub_expr);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Heap::RootListIndex nil_value = nil == kNullValue ?
- Heap::kNullValueRootIndex :
- Heap::kUndefinedValueRootIndex;
- __ LoadRoot(r1, nil_value);
- __ cmp(r0, r1);
- if (expr->op() == Token::EQ_STRICT) {
- Split(eq, if_true, if_false, fall_through);
- } else {
- Heap::RootListIndex other_nil_value = nil == kNullValue ?
- Heap::kUndefinedValueRootIndex :
- Heap::kNullValueRootIndex;
- __ b(eq, if_true);
- __ LoadRoot(r1, other_nil_value);
- __ cmp(r0, r1);
- __ b(eq, if_true);
- __ JumpIfSmi(r0, if_false);
- // It can be an undetectable object.
- __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ ldrb(r1, FieldMemOperand(r1, Map::kBitFieldOffset));
- __ and_(r1, r1, Operand(1 << Map::kIsUndetectable));
- __ cmp(r1, Operand(1 << Map::kIsUndetectable));
- Split(eq, if_true, if_false, fall_through);
- }
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::VisitThisFunction(ThisFunction* expr) {
- __ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- context()->Plug(r0);
-}
-
-
-Register FullCodeGenerator::result_register() {
- return r0;
-}
-
-
-Register FullCodeGenerator::context_register() {
- return cp;
-}
-
-
-void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
- ASSERT_EQ(POINTER_SIZE_ALIGN(frame_offset), frame_offset);
- __ str(value, MemOperand(fp, frame_offset));
-}
-
-
-void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
- __ ldr(dst, ContextOperand(cp, context_index));
-}
-
-
-void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
- Scope* declaration_scope = scope()->DeclarationScope();
- if (declaration_scope->is_global_scope() ||
- declaration_scope->is_module_scope()) {
- // Contexts nested in the native context have a canonical empty function
- // as their closure, not the anonymous closure containing the global
- // code. Pass a smi sentinel and let the runtime look up the empty
- // function.
- __ mov(ip, Operand(Smi::FromInt(0)));
- } else if (declaration_scope->is_eval_scope()) {
- // Contexts created by a call to eval have the same closure as the
- // context calling eval, not the anonymous closure containing the eval
- // code. Fetch it from the context.
- __ ldr(ip, ContextOperand(cp, Context::CLOSURE_INDEX));
- } else {
- ASSERT(declaration_scope->is_function_scope());
- __ ldr(ip, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- }
- __ push(ip);
-}
-
-
-// ----------------------------------------------------------------------------
-// Non-local control flow support.
-
-void FullCodeGenerator::EnterFinallyBlock() {
- ASSERT(!result_register().is(r1));
- // Store result register while executing finally block.
- __ push(result_register());
- // Cook return address in link register to stack (smi encoded Code* delta)
- __ sub(r1, lr, Operand(masm_->CodeObject()));
- ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize);
- STATIC_ASSERT(kSmiTag == 0);
- __ add(r1, r1, Operand(r1)); // Convert to smi.
-
- // Store result register while executing finally block.
- __ push(r1);
-
- // Store pending message while executing finally block.
- ExternalReference pending_message_obj =
- ExternalReference::address_of_pending_message_obj(isolate());
- __ mov(ip, Operand(pending_message_obj));
- __ ldr(r1, MemOperand(ip));
- __ push(r1);
-
- ExternalReference has_pending_message =
- ExternalReference::address_of_has_pending_message(isolate());
- __ mov(ip, Operand(has_pending_message));
- __ ldr(r1, MemOperand(ip));
- __ SmiTag(r1);
- __ push(r1);
-
- ExternalReference pending_message_script =
- ExternalReference::address_of_pending_message_script(isolate());
- __ mov(ip, Operand(pending_message_script));
- __ ldr(r1, MemOperand(ip));
- __ push(r1);
-}
-
-
-void FullCodeGenerator::ExitFinallyBlock() {
- ASSERT(!result_register().is(r1));
- // Restore pending message from stack.
- __ pop(r1);
- ExternalReference pending_message_script =
- ExternalReference::address_of_pending_message_script(isolate());
- __ mov(ip, Operand(pending_message_script));
- __ str(r1, MemOperand(ip));
-
- __ pop(r1);
- __ SmiUntag(r1);
- ExternalReference has_pending_message =
- ExternalReference::address_of_has_pending_message(isolate());
- __ mov(ip, Operand(has_pending_message));
- __ str(r1, MemOperand(ip));
-
- __ pop(r1);
- ExternalReference pending_message_obj =
- ExternalReference::address_of_pending_message_obj(isolate());
- __ mov(ip, Operand(pending_message_obj));
- __ str(r1, MemOperand(ip));
-
- // Restore result register from stack.
- __ pop(r1);
-
- // Uncook return address and return.
- __ pop(result_register());
- ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize);
- __ mov(r1, Operand(r1, ASR, 1)); // Un-smi-tag value.
- __ add(pc, r1, Operand(masm_->CodeObject()));
-}
-
-
-#undef __
-
-#define __ ACCESS_MASM(masm())
-
-FullCodeGenerator::NestedStatement* FullCodeGenerator::TryFinally::Exit(
- int* stack_depth,
- int* context_length) {
- // The macros used here must preserve the result register.
-
- // Because the handler block contains the context of the finally
- // code, we can restore it directly from there for the finally code
- // rather than iteratively unwinding contexts via their previous
- // links.
- __ Drop(*stack_depth); // Down to the handler block.
- if (*context_length > 0) {
- // Restore the context to its dedicated register and the stack.
- __ ldr(cp, MemOperand(sp, StackHandlerConstants::kContextOffset));
- __ str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- }
- __ PopTryHandler();
- __ bl(finally_entry_);
-
- *stack_depth = 0;
- *context_length = 0;
- return previous_;
-}
-
-
-#undef __
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_ARM
diff --git a/src/3rdparty/v8/src/arm/ic-arm.cc b/src/3rdparty/v8/src/arm/ic-arm.cc
deleted file mode 100644
index e8d0fab..0000000
--- a/src/3rdparty/v8/src/arm/ic-arm.cc
+++ /dev/null
@@ -1,1685 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_ARM)
-
-#include "assembler-arm.h"
-#include "code-stubs.h"
-#include "codegen.h"
-#include "disasm.h"
-#include "ic-inl.h"
-#include "runtime.h"
-#include "stub-cache.h"
-
-namespace v8 {
-namespace internal {
-
-
-// ----------------------------------------------------------------------------
-// Static IC stub generators.
-//
-
-#define __ ACCESS_MASM(masm)
-
-
-static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm,
- Register type,
- Label* global_object) {
- // Register usage:
- // type: holds the receiver instance type on entry.
- __ cmp(type, Operand(JS_GLOBAL_OBJECT_TYPE));
- __ b(eq, global_object);
- __ cmp(type, Operand(JS_BUILTINS_OBJECT_TYPE));
- __ b(eq, global_object);
- __ cmp(type, Operand(JS_GLOBAL_PROXY_TYPE));
- __ b(eq, global_object);
-}
-
-
-// Generated code falls through if the receiver is a regular non-global
-// JS object with slow properties and no interceptors.
-static void GenerateStringDictionaryReceiverCheck(MacroAssembler* masm,
- Register receiver,
- Register elements,
- Register t0,
- Register t1,
- Label* miss) {
- // Register usage:
- // receiver: holds the receiver on entry and is unchanged.
- // elements: holds the property dictionary on fall through.
- // Scratch registers:
- // t0: used to holds the receiver map.
- // t1: used to holds the receiver instance type, receiver bit mask and
- // elements map.
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, miss);
-
- // Check that the receiver is a valid JS object.
- __ CompareObjectType(receiver, t0, t1, FIRST_SPEC_OBJECT_TYPE);
- __ b(lt, miss);
-
- // If this assert fails, we have to check upper bound too.
- STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
-
- GenerateGlobalInstanceTypeCheck(masm, t1, miss);
-
- // Check that the global object does not require access checks.
- __ ldrb(t1, FieldMemOperand(t0, Map::kBitFieldOffset));
- __ tst(t1, Operand((1 << Map::kIsAccessCheckNeeded) |
- (1 << Map::kHasNamedInterceptor)));
- __ b(ne, miss);
-
- __ ldr(elements, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
- __ ldr(t1, FieldMemOperand(elements, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
- __ cmp(t1, ip);
- __ b(ne, miss);
-}
-
-
-// Helper function used from LoadIC/CallIC GenerateNormal.
-//
-// elements: Property dictionary. It is not clobbered if a jump to the miss
-// label is done.
-// name: Property name. It is not clobbered if a jump to the miss label is
-// done
-// result: Register for the result. It is only updated if a jump to the miss
-// label is not done. Can be the same as elements or name clobbering
-// one of these in the case of not jumping to the miss label.
-// The two scratch registers need to be different from elements, name and
-// result.
-// The generated code assumes that the receiver has slow properties,
-// is not a global object and does not have interceptors.
-static void GenerateDictionaryLoad(MacroAssembler* masm,
- Label* miss,
- Register elements,
- Register name,
- Register result,
- Register scratch1,
- Register scratch2) {
- // Main use of the scratch registers.
- // scratch1: Used as temporary and to hold the capacity of the property
- // dictionary.
- // scratch2: Used as temporary.
- Label done;
-
- // Probe the dictionary.
- StringDictionaryLookupStub::GeneratePositiveLookup(masm,
- miss,
- &done,
- elements,
- name,
- scratch1,
- scratch2);
-
- // If probing finds an entry check that the value is a normal
- // property.
- __ bind(&done); // scratch2 == elements + 4 * index
- const int kElementsStartOffset = StringDictionary::kHeaderSize +
- StringDictionary::kElementsStartIndex * kPointerSize;
- const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
- __ ldr(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
- __ tst(scratch1, Operand(PropertyDetails::TypeField::kMask << kSmiTagSize));
- __ b(ne, miss);
-
- // Get the value at the masked, scaled index and return.
- __ ldr(result,
- FieldMemOperand(scratch2, kElementsStartOffset + 1 * kPointerSize));
-}
-
-
-// Helper function used from StoreIC::GenerateNormal.
-//
-// elements: Property dictionary. It is not clobbered if a jump to the miss
-// label is done.
-// name: Property name. It is not clobbered if a jump to the miss label is
-// done
-// value: The value to store.
-// The two scratch registers need to be different from elements, name and
-// result.
-// The generated code assumes that the receiver has slow properties,
-// is not a global object and does not have interceptors.
-static void GenerateDictionaryStore(MacroAssembler* masm,
- Label* miss,
- Register elements,
- Register name,
- Register value,
- Register scratch1,
- Register scratch2) {
- // Main use of the scratch registers.
- // scratch1: Used as temporary and to hold the capacity of the property
- // dictionary.
- // scratch2: Used as temporary.
- Label done;
-
- // Probe the dictionary.
- StringDictionaryLookupStub::GeneratePositiveLookup(masm,
- miss,
- &done,
- elements,
- name,
- scratch1,
- scratch2);
-
- // If probing finds an entry in the dictionary check that the value
- // is a normal property that is not read only.
- __ bind(&done); // scratch2 == elements + 4 * index
- const int kElementsStartOffset = StringDictionary::kHeaderSize +
- StringDictionary::kElementsStartIndex * kPointerSize;
- const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
- const int kTypeAndReadOnlyMask =
- (PropertyDetails::TypeField::kMask |
- PropertyDetails::AttributesField::encode(READ_ONLY)) << kSmiTagSize;
- __ ldr(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
- __ tst(scratch1, Operand(kTypeAndReadOnlyMask));
- __ b(ne, miss);
-
- // Store the value at the masked, scaled index and return.
- const int kValueOffset = kElementsStartOffset + kPointerSize;
- __ add(scratch2, scratch2, Operand(kValueOffset - kHeapObjectTag));
- __ str(value, MemOperand(scratch2));
-
- // Update the write barrier. Make sure not to clobber the value.
- __ mov(scratch1, value);
- __ RecordWrite(
- elements, scratch2, scratch1, kLRHasNotBeenSaved, kDontSaveFPRegs);
-}
-
-
-// Checks the receiver for special cases (value type, slow case bits).
-// Falls through for regular JS object.
-static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
- Register receiver,
- Register map,
- Register scratch,
- int interceptor_bit,
- Label* slow) {
- // Check that the object isn't a smi.
- __ JumpIfSmi(receiver, slow);
- // Get the map of the receiver.
- __ ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
- // Check bit field.
- __ ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
- __ tst(scratch,
- Operand((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit)));
- __ b(ne, slow);
- // Check that the object is some kind of JS object EXCEPT JS Value type.
- // In the case that the object is a value-wrapper object,
- // we enter the runtime system to make sure that indexing into string
- // objects work as intended.
- ASSERT(JS_OBJECT_TYPE > JS_VALUE_TYPE);
- __ ldrb(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
- __ cmp(scratch, Operand(JS_OBJECT_TYPE));
- __ b(lt, slow);
-}
-
-
-// Loads an indexed element from a fast case array.
-// If not_fast_array is NULL, doesn't perform the elements map check.
-static void GenerateFastArrayLoad(MacroAssembler* masm,
- Register receiver,
- Register key,
- Register elements,
- Register scratch1,
- Register scratch2,
- Register result,
- Label* not_fast_array,
- Label* out_of_range) {
- // Register use:
- //
- // receiver - holds the receiver on entry.
- // Unchanged unless 'result' is the same register.
- //
- // key - holds the smi key on entry.
- // Unchanged unless 'result' is the same register.
- //
- // elements - holds the elements of the receiver on exit.
- //
- // result - holds the result on exit if the load succeeded.
- // Allowed to be the the same as 'receiver' or 'key'.
- // Unchanged on bailout so 'receiver' and 'key' can be safely
- // used by further computation.
- //
- // Scratch registers:
- //
- // scratch1 - used to hold elements map and elements length.
- // Holds the elements map if not_fast_array branch is taken.
- //
- // scratch2 - used to hold the loaded value.
-
- __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- if (not_fast_array != NULL) {
- // Check that the object is in fast mode and writable.
- __ ldr(scratch1, FieldMemOperand(elements, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
- __ cmp(scratch1, ip);
- __ b(ne, not_fast_array);
- } else {
- __ AssertFastElements(elements);
- }
- // Check that the key (index) is within bounds.
- __ ldr(scratch1, FieldMemOperand(elements, FixedArray::kLengthOffset));
- __ cmp(key, Operand(scratch1));
- __ b(hs, out_of_range);
- // Fast case: Do the load.
- __ add(scratch1, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- // The key is a smi.
- STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
- __ ldr(scratch2,
- MemOperand(scratch1, key, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ cmp(scratch2, ip);
- // In case the loaded value is the_hole we have to consult GetProperty
- // to ensure the prototype chain is searched.
- __ b(eq, out_of_range);
- __ mov(result, scratch2);
-}
-
-
-// Checks whether a key is an array index string or an internalized string.
-// Falls through if a key is an internalized string.
-static void GenerateKeyStringCheck(MacroAssembler* masm,
- Register key,
- Register map,
- Register hash,
- Label* index_string,
- Label* not_internalized) {
- // The key is not a smi.
- // Is it a string?
- __ CompareObjectType(key, map, hash, FIRST_NONSTRING_TYPE);
- __ b(ge, not_internalized);
-
- // Is the string an array index, with cached numeric value?
- __ ldr(hash, FieldMemOperand(key, String::kHashFieldOffset));
- __ tst(hash, Operand(String::kContainsCachedArrayIndexMask));
- __ b(eq, index_string);
-
- // Is the string internalized?
- // map: key map
- __ ldrb(hash, FieldMemOperand(map, Map::kInstanceTypeOffset));
- STATIC_ASSERT(kInternalizedTag != 0);
- __ tst(hash, Operand(kIsInternalizedMask));
- __ b(eq, not_internalized);
-}
-
-
-// Defined in ic.cc.
-Object* CallIC_Miss(Arguments args);
-
-// The generated code does not accept smi keys.
-// The generated code falls through if both probes miss.
-void CallICBase::GenerateMonomorphicCacheProbe(MacroAssembler* masm,
- int argc,
- Code::Kind kind,
- Code::ExtraICState extra_state) {
- // ----------- S t a t e -------------
- // -- r1 : receiver
- // -- r2 : name
- // -----------------------------------
- Label number, non_number, non_string, boolean, probe, miss;
-
- // Probe the stub cache.
- Code::Flags flags = Code::ComputeFlags(kind,
- MONOMORPHIC,
- extra_state,
- Code::NORMAL,
- argc);
- Isolate::Current()->stub_cache()->GenerateProbe(
- masm, flags, r1, r2, r3, r4, r5, r6);
-
- // If the stub cache probing failed, the receiver might be a value.
- // For value objects, we use the map of the prototype objects for
- // the corresponding JSValue for the cache and that is what we need
- // to probe.
- //
- // Check for number.
- __ JumpIfSmi(r1, &number);
- __ CompareObjectType(r1, r3, r3, HEAP_NUMBER_TYPE);
- __ b(ne, &non_number);
- __ bind(&number);
- StubCompiler::GenerateLoadGlobalFunctionPrototype(
- masm, Context::NUMBER_FUNCTION_INDEX, r1);
- __ b(&probe);
-
- // Check for string.
- __ bind(&non_number);
- __ cmp(r3, Operand(FIRST_NONSTRING_TYPE));
- __ b(hs, &non_string);
- StubCompiler::GenerateLoadGlobalFunctionPrototype(
- masm, Context::STRING_FUNCTION_INDEX, r1);
- __ b(&probe);
-
- // Check for boolean.
- __ bind(&non_string);
- __ LoadRoot(ip, Heap::kTrueValueRootIndex);
- __ cmp(r1, ip);
- __ b(eq, &boolean);
- __ LoadRoot(ip, Heap::kFalseValueRootIndex);
- __ cmp(r1, ip);
- __ b(ne, &miss);
- __ bind(&boolean);
- StubCompiler::GenerateLoadGlobalFunctionPrototype(
- masm, Context::BOOLEAN_FUNCTION_INDEX, r1);
-
- // Probe the stub cache for the value object.
- __ bind(&probe);
- Isolate::Current()->stub_cache()->GenerateProbe(
- masm, flags, r1, r2, r3, r4, r5, r6);
-
- __ bind(&miss);
-}
-
-
-static void GenerateFunctionTailCall(MacroAssembler* masm,
- int argc,
- Label* miss,
- Register scratch) {
- // r1: function
-
- // Check that the value isn't a smi.
- __ JumpIfSmi(r1, miss);
-
- // Check that the value is a JSFunction.
- __ CompareObjectType(r1, scratch, scratch, JS_FUNCTION_TYPE);
- __ b(ne, miss);
-
- // Invoke the function.
- ParameterCount actual(argc);
- __ InvokeFunction(r1, actual, JUMP_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
-}
-
-
-void CallICBase::GenerateNormal(MacroAssembler* masm, int argc) {
- // ----------- S t a t e -------------
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
- Label miss;
-
- // Get the receiver of the function from the stack into r1.
- __ ldr(r1, MemOperand(sp, argc * kPointerSize));
-
- GenerateStringDictionaryReceiverCheck(masm, r1, r0, r3, r4, &miss);
-
- // r0: elements
- // Search the dictionary - put result in register r1.
- GenerateDictionaryLoad(masm, &miss, r0, r2, r1, r3, r4);
-
- GenerateFunctionTailCall(masm, argc, &miss, r4);
-
- __ bind(&miss);
-}
-
-
-void CallICBase::GenerateMiss(MacroAssembler* masm,
- int argc,
- IC::UtilityId id,
- Code::ExtraICState extra_state) {
- // ----------- S t a t e -------------
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
- Isolate* isolate = masm->isolate();
-
- if (id == IC::kCallIC_Miss) {
- __ IncrementCounter(isolate->counters()->call_miss(), 1, r3, r4);
- } else {
- __ IncrementCounter(isolate->counters()->keyed_call_miss(), 1, r3, r4);
- }
-
- // Get the receiver of the function from the stack.
- __ ldr(r3, MemOperand(sp, argc * kPointerSize));
-
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Push the receiver and the name of the function.
- __ Push(r3, r2);
-
- // Call the entry.
- __ mov(r0, Operand(2));
- __ mov(r1, Operand(ExternalReference(IC_Utility(id), isolate)));
-
- CEntryStub stub(1);
- __ CallStub(&stub);
-
- // Move result to r1 and leave the internal frame.
- __ mov(r1, Operand(r0));
- }
-
- // Check if the receiver is a global object of some sort.
- // This can happen only for regular CallIC but not KeyedCallIC.
- if (id == IC::kCallIC_Miss) {
- Label invoke, global;
- __ ldr(r2, MemOperand(sp, argc * kPointerSize)); // receiver
- __ JumpIfSmi(r2, &invoke);
- __ CompareObjectType(r2, r3, r3, JS_GLOBAL_OBJECT_TYPE);
- __ b(eq, &global);
- __ cmp(r3, Operand(JS_BUILTINS_OBJECT_TYPE));
- __ b(ne, &invoke);
-
- // Patch the receiver on the stack.
- __ bind(&global);
- __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalReceiverOffset));
- __ str(r2, MemOperand(sp, argc * kPointerSize));
- __ bind(&invoke);
- }
-
- // Invoke the function.
- CallKind call_kind = CallICBase::Contextual::decode(extra_state)
- ? CALL_AS_FUNCTION
- : CALL_AS_METHOD;
- ParameterCount actual(argc);
- __ InvokeFunction(r1,
- actual,
- JUMP_FUNCTION,
- NullCallWrapper(),
- call_kind);
-}
-
-
-void CallIC::GenerateMegamorphic(MacroAssembler* masm,
- int argc,
- Code::ExtraICState extra_ic_state) {
- // ----------- S t a t e -------------
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
-
- // Get the receiver of the function from the stack into r1.
- __ ldr(r1, MemOperand(sp, argc * kPointerSize));
- GenerateMonomorphicCacheProbe(masm, argc, Code::CALL_IC, extra_ic_state);
- GenerateMiss(masm, argc, extra_ic_state);
-}
-
-
-void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
- // ----------- S t a t e -------------
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
-
- // Get the receiver of the function from the stack into r1.
- __ ldr(r1, MemOperand(sp, argc * kPointerSize));
-
- Label do_call, slow_call, slow_load, slow_reload_receiver;
- Label check_number_dictionary, check_string, lookup_monomorphic_cache;
- Label index_smi, index_string;
-
- // Check that the key is a smi.
- __ JumpIfNotSmi(r2, &check_string);
- __ bind(&index_smi);
- // Now the key is known to be a smi. This place is also jumped to from below
- // where a numeric string is converted to a smi.
-
- GenerateKeyedLoadReceiverCheck(
- masm, r1, r0, r3, Map::kHasIndexedInterceptor, &slow_call);
-
- GenerateFastArrayLoad(
- masm, r1, r2, r4, r3, r0, r1, &check_number_dictionary, &slow_load);
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->keyed_call_generic_smi_fast(), 1, r0, r3);
-
- __ bind(&do_call);
- // receiver in r1 is not used after this point.
- // r2: key
- // r1: function
- GenerateFunctionTailCall(masm, argc, &slow_call, r0);
-
- __ bind(&check_number_dictionary);
- // r2: key
- // r3: elements map
- // r4: elements
- // Check whether the elements is a number dictionary.
- __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
- __ cmp(r3, ip);
- __ b(ne, &slow_load);
- __ mov(r0, Operand(r2, ASR, kSmiTagSize));
- // r0: untagged index
- __ LoadFromNumberDictionary(&slow_load, r4, r2, r1, r0, r3, r5);
- __ IncrementCounter(counters->keyed_call_generic_smi_dict(), 1, r0, r3);
- __ jmp(&do_call);
-
- __ bind(&slow_load);
- // This branch is taken when calling KeyedCallIC_Miss is neither required
- // nor beneficial.
- __ IncrementCounter(counters->keyed_call_generic_slow_load(), 1, r0, r3);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(r2); // save the key
- __ Push(r1, r2); // pass the receiver and the key
- __ CallRuntime(Runtime::kKeyedGetProperty, 2);
- __ pop(r2); // restore the key
- }
- __ mov(r1, r0);
- __ jmp(&do_call);
-
- __ bind(&check_string);
- GenerateKeyStringCheck(masm, r2, r0, r3, &index_string, &slow_call);
-
- // The key is known to be internalized.
- // If the receiver is a regular JS object with slow properties then do
- // a quick inline probe of the receiver's dictionary.
- // Otherwise do the monomorphic cache probe.
- GenerateKeyedLoadReceiverCheck(
- masm, r1, r0, r3, Map::kHasNamedInterceptor, &lookup_monomorphic_cache);
-
- __ ldr(r0, FieldMemOperand(r1, JSObject::kPropertiesOffset));
- __ ldr(r3, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
- __ cmp(r3, ip);
- __ b(ne, &lookup_monomorphic_cache);
-
- GenerateDictionaryLoad(masm, &slow_load, r0, r2, r1, r3, r4);
- __ IncrementCounter(counters->keyed_call_generic_lookup_dict(), 1, r0, r3);
- __ jmp(&do_call);
-
- __ bind(&lookup_monomorphic_cache);
- __ IncrementCounter(counters->keyed_call_generic_lookup_cache(), 1, r0, r3);
- GenerateMonomorphicCacheProbe(masm,
- argc,
- Code::KEYED_CALL_IC,
- Code::kNoExtraICState);
- // Fall through on miss.
-
- __ bind(&slow_call);
- // This branch is taken if:
- // - the receiver requires boxing or access check,
- // - the key is neither smi nor an internalized string,
- // - the value loaded is not a function,
- // - there is hope that the runtime will create a monomorphic call stub
- // that will get fetched next time.
- __ IncrementCounter(counters->keyed_call_generic_slow(), 1, r0, r3);
- GenerateMiss(masm, argc);
-
- __ bind(&index_string);
- __ IndexFromHash(r3, r2);
- // Now jump to the place where smi keys are handled.
- __ jmp(&index_smi);
-}
-
-
-void KeyedCallIC::GenerateNormal(MacroAssembler* masm, int argc) {
- // ----------- S t a t e -------------
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
-
- // Check if the name is a string.
- Label miss;
- __ JumpIfSmi(r2, &miss);
- __ IsObjectJSStringType(r2, r0, &miss);
-
- CallICBase::GenerateNormal(masm, argc);
- __ bind(&miss);
- GenerateMiss(masm, argc);
-}
-
-
-// Defined in ic.cc.
-Object* LoadIC_Miss(Arguments args);
-
-void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r2 : name
- // -- lr : return address
- // -- r0 : receiver
- // -- sp[0] : receiver
- // -----------------------------------
-
- // Probe the stub cache.
- Code::Flags flags = Code::ComputeFlags(
- Code::LOAD_IC, MONOMORPHIC, Code::HANDLER_FRAGMENT);
- Isolate::Current()->stub_cache()->GenerateProbe(
- masm, flags, r0, r2, r3, r4, r5, r6);
-
- // Cache miss: Jump to runtime.
- GenerateMiss(masm);
-}
-
-
-void LoadIC::GenerateNormal(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r2 : name
- // -- lr : return address
- // -- r0 : receiver
- // -- sp[0] : receiver
- // -----------------------------------
- Label miss;
-
- GenerateStringDictionaryReceiverCheck(masm, r0, r1, r3, r4, &miss);
-
- // r1: elements
- GenerateDictionaryLoad(masm, &miss, r1, r2, r0, r3, r4);
- __ Ret();
-
- // Cache miss: Jump to runtime.
- __ bind(&miss);
- GenerateMiss(masm);
-}
-
-
-void LoadIC::GenerateMiss(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r2 : name
- // -- lr : return address
- // -- r0 : receiver
- // -- sp[0] : receiver
- // -----------------------------------
- Isolate* isolate = masm->isolate();
-
- __ IncrementCounter(isolate->counters()->load_miss(), 1, r3, r4);
-
- __ mov(r3, r0);
- __ Push(r3, r2);
-
- // Perform tail call to the entry.
- ExternalReference ref =
- ExternalReference(IC_Utility(kLoadIC_Miss), isolate);
- __ TailCallExternalReference(ref, 2, 1);
-}
-
-
-static MemOperand GenerateMappedArgumentsLookup(MacroAssembler* masm,
- Register object,
- Register key,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* unmapped_case,
- Label* slow_case) {
- Heap* heap = masm->isolate()->heap();
-
- // Check that the receiver is a JSObject. Because of the map check
- // later, we do not need to check for interceptors or whether it
- // requires access checks.
- __ JumpIfSmi(object, slow_case);
- // Check that the object is some kind of JSObject.
- __ CompareObjectType(object, scratch1, scratch2, FIRST_JS_RECEIVER_TYPE);
- __ b(lt, slow_case);
-
- // Check that the key is a positive smi.
- __ tst(key, Operand(0x80000001));
- __ b(ne, slow_case);
-
- // Load the elements into scratch1 and check its map.
- Handle<Map> arguments_map(heap->non_strict_arguments_elements_map());
- __ ldr(scratch1, FieldMemOperand(object, JSObject::kElementsOffset));
- __ CheckMap(scratch1, scratch2, arguments_map, slow_case, DONT_DO_SMI_CHECK);
-
- // Check if element is in the range of mapped arguments. If not, jump
- // to the unmapped lookup with the parameter map in scratch1.
- __ ldr(scratch2, FieldMemOperand(scratch1, FixedArray::kLengthOffset));
- __ sub(scratch2, scratch2, Operand(Smi::FromInt(2)));
- __ cmp(key, Operand(scratch2));
- __ b(cs, unmapped_case);
-
- // Load element index and check whether it is the hole.
- const int kOffset =
- FixedArray::kHeaderSize + 2 * kPointerSize - kHeapObjectTag;
-
- __ mov(scratch3, Operand(kPointerSize >> 1));
- __ mul(scratch3, key, scratch3);
- __ add(scratch3, scratch3, Operand(kOffset));
-
- __ ldr(scratch2, MemOperand(scratch1, scratch3));
- __ LoadRoot(scratch3, Heap::kTheHoleValueRootIndex);
- __ cmp(scratch2, scratch3);
- __ b(eq, unmapped_case);
-
- // Load value from context and return it. We can reuse scratch1 because
- // we do not jump to the unmapped lookup (which requires the parameter
- // map in scratch1).
- __ ldr(scratch1, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
- __ mov(scratch3, Operand(kPointerSize >> 1));
- __ mul(scratch3, scratch2, scratch3);
- __ add(scratch3, scratch3, Operand(Context::kHeaderSize - kHeapObjectTag));
- return MemOperand(scratch1, scratch3);
-}
-
-
-static MemOperand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
- Register key,
- Register parameter_map,
- Register scratch,
- Label* slow_case) {
- // Element is in arguments backing store, which is referenced by the
- // second element of the parameter_map. The parameter_map register
- // must be loaded with the parameter map of the arguments object and is
- // overwritten.
- const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize;
- Register backing_store = parameter_map;
- __ ldr(backing_store, FieldMemOperand(parameter_map, kBackingStoreOffset));
- Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map());
- __ CheckMap(backing_store, scratch, fixed_array_map, slow_case,
- DONT_DO_SMI_CHECK);
- __ ldr(scratch, FieldMemOperand(backing_store, FixedArray::kLengthOffset));
- __ cmp(key, Operand(scratch));
- __ b(cs, slow_case);
- __ mov(scratch, Operand(kPointerSize >> 1));
- __ mul(scratch, key, scratch);
- __ add(scratch,
- scratch,
- Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- return MemOperand(backing_store, scratch);
-}
-
-
-void KeyedLoadIC::GenerateNonStrictArguments(MacroAssembler* masm) {
- // ---------- S t a t e --------------
- // -- lr : return address
- // -- r0 : key
- // -- r1 : receiver
- // -----------------------------------
- Label slow, notin;
- MemOperand mapped_location =
- GenerateMappedArgumentsLookup(masm, r1, r0, r2, r3, r4, &notin, &slow);
- __ ldr(r0, mapped_location);
- __ Ret();
- __ bind(&notin);
- // The unmapped lookup expects that the parameter map is in r2.
- MemOperand unmapped_location =
- GenerateUnmappedArgumentsLookup(masm, r0, r2, r3, &slow);
- __ ldr(r2, unmapped_location);
- __ LoadRoot(r3, Heap::kTheHoleValueRootIndex);
- __ cmp(r2, r3);
- __ b(eq, &slow);
- __ mov(r0, r2);
- __ Ret();
- __ bind(&slow);
- GenerateMiss(masm, MISS);
-}
-
-
-void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
- // ---------- S t a t e --------------
- // -- r0 : value
- // -- r1 : key
- // -- r2 : receiver
- // -- lr : return address
- // -----------------------------------
- Label slow, notin;
- MemOperand mapped_location =
- GenerateMappedArgumentsLookup(masm, r2, r1, r3, r4, r5, &notin, &slow);
- __ str(r0, mapped_location);
- __ add(r6, r3, r5);
- __ mov(r9, r0);
- __ RecordWrite(r3, r6, r9, kLRHasNotBeenSaved, kDontSaveFPRegs);
- __ Ret();
- __ bind(&notin);
- // The unmapped lookup expects that the parameter map is in r3.
- MemOperand unmapped_location =
- GenerateUnmappedArgumentsLookup(masm, r1, r3, r4, &slow);
- __ str(r0, unmapped_location);
- __ add(r6, r3, r4);
- __ mov(r9, r0);
- __ RecordWrite(r3, r6, r9, kLRHasNotBeenSaved, kDontSaveFPRegs);
- __ Ret();
- __ bind(&slow);
- GenerateMiss(masm, MISS);
-}
-
-
-void KeyedCallIC::GenerateNonStrictArguments(MacroAssembler* masm,
- int argc) {
- // ----------- S t a t e -------------
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
- Label slow, notin;
- // Load receiver.
- __ ldr(r1, MemOperand(sp, argc * kPointerSize));
- MemOperand mapped_location =
- GenerateMappedArgumentsLookup(masm, r1, r2, r3, r4, r5, &notin, &slow);
- __ ldr(r1, mapped_location);
- GenerateFunctionTailCall(masm, argc, &slow, r3);
- __ bind(&notin);
- // The unmapped lookup expects that the parameter map is in r3.
- MemOperand unmapped_location =
- GenerateUnmappedArgumentsLookup(masm, r2, r3, r4, &slow);
- __ ldr(r1, unmapped_location);
- __ LoadRoot(r3, Heap::kTheHoleValueRootIndex);
- __ cmp(r1, r3);
- __ b(eq, &slow);
- GenerateFunctionTailCall(masm, argc, &slow, r3);
- __ bind(&slow);
- GenerateMiss(masm, argc);
-}
-
-
-Object* KeyedLoadIC_Miss(Arguments args);
-
-
-void KeyedLoadIC::GenerateMiss(MacroAssembler* masm, ICMissMode miss_mode) {
- // ---------- S t a t e --------------
- // -- lr : return address
- // -- r0 : key
- // -- r1 : receiver
- // -----------------------------------
- Isolate* isolate = masm->isolate();
-
- __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, r3, r4);
-
- __ Push(r1, r0);
-
- // Perform tail call to the entry.
- ExternalReference ref = miss_mode == MISS_FORCE_GENERIC
- ? ExternalReference(IC_Utility(kKeyedLoadIC_MissForceGeneric), isolate)
- : ExternalReference(IC_Utility(kKeyedLoadIC_Miss), isolate);
-
- __ TailCallExternalReference(ref, 2, 1);
-}
-
-
-void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
- // ---------- S t a t e --------------
- // -- lr : return address
- // -- r0 : key
- // -- r1 : receiver
- // -----------------------------------
-
- __ Push(r1, r0);
-
- __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
-}
-
-
-void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
- // ---------- S t a t e --------------
- // -- lr : return address
- // -- r0 : key
- // -- r1 : receiver
- // -----------------------------------
- Label slow, check_string, index_smi, index_string, property_array_property;
- Label probe_dictionary, check_number_dictionary;
-
- Register key = r0;
- Register receiver = r1;
-
- Isolate* isolate = masm->isolate();
-
- // Check that the key is a smi.
- __ JumpIfNotSmi(key, &check_string);
- __ bind(&index_smi);
- // Now the key is known to be a smi. This place is also jumped to from below
- // where a numeric string is converted to a smi.
-
- GenerateKeyedLoadReceiverCheck(
- masm, receiver, r2, r3, Map::kHasIndexedInterceptor, &slow);
-
- // Check the receiver's map to see if it has fast elements.
- __ CheckFastElements(r2, r3, &check_number_dictionary);
-
- GenerateFastArrayLoad(
- masm, receiver, key, r4, r3, r2, r0, NULL, &slow);
- __ IncrementCounter(isolate->counters()->keyed_load_generic_smi(), 1, r2, r3);
- __ Ret();
-
- __ bind(&check_number_dictionary);
- __ ldr(r4, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ ldr(r3, FieldMemOperand(r4, JSObject::kMapOffset));
-
- // Check whether the elements is a number dictionary.
- // r0: key
- // r3: elements map
- // r4: elements
- __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
- __ cmp(r3, ip);
- __ b(ne, &slow);
- __ mov(r2, Operand(r0, ASR, kSmiTagSize));
- __ LoadFromNumberDictionary(&slow, r4, r0, r0, r2, r3, r5);
- __ Ret();
-
- // Slow case, key and receiver still in r0 and r1.
- __ bind(&slow);
- __ IncrementCounter(isolate->counters()->keyed_load_generic_slow(),
- 1, r2, r3);
- GenerateRuntimeGetProperty(masm);
-
- __ bind(&check_string);
- GenerateKeyStringCheck(masm, key, r2, r3, &index_string, &slow);
-
- GenerateKeyedLoadReceiverCheck(
- masm, receiver, r2, r3, Map::kHasNamedInterceptor, &slow);
-
- // If the receiver is a fast-case object, check the keyed lookup
- // cache. Otherwise probe the dictionary.
- __ ldr(r3, FieldMemOperand(r1, JSObject::kPropertiesOffset));
- __ ldr(r4, FieldMemOperand(r3, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
- __ cmp(r4, ip);
- __ b(eq, &probe_dictionary);
-
- // Load the map of the receiver, compute the keyed lookup cache hash
- // based on 32 bits of the map pointer and the string hash.
- __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
- __ mov(r3, Operand(r2, ASR, KeyedLookupCache::kMapHashShift));
- __ ldr(r4, FieldMemOperand(r0, String::kHashFieldOffset));
- __ eor(r3, r3, Operand(r4, ASR, String::kHashShift));
- int mask = KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask;
- __ And(r3, r3, Operand(mask));
-
- // Load the key (consisting of map and internalized string) from the cache and
- // check for match.
- Label load_in_object_property;
- static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket;
- Label hit_on_nth_entry[kEntriesPerBucket];
- ExternalReference cache_keys =
- ExternalReference::keyed_lookup_cache_keys(isolate);
-
- __ mov(r4, Operand(cache_keys));
- __ add(r4, r4, Operand(r3, LSL, kPointerSizeLog2 + 1));
-
- for (int i = 0; i < kEntriesPerBucket - 1; i++) {
- Label try_next_entry;
- // Load map and move r4 to next entry.
- __ ldr(r5, MemOperand(r4, kPointerSize * 2, PostIndex));
- __ cmp(r2, r5);
- __ b(ne, &try_next_entry);
- __ ldr(r5, MemOperand(r4, -kPointerSize)); // Load string
- __ cmp(r0, r5);
- __ b(eq, &hit_on_nth_entry[i]);
- __ bind(&try_next_entry);
- }
-
- // Last entry: Load map and move r4 to string.
- __ ldr(r5, MemOperand(r4, kPointerSize, PostIndex));
- __ cmp(r2, r5);
- __ b(ne, &slow);
- __ ldr(r5, MemOperand(r4));
- __ cmp(r0, r5);
- __ b(ne, &slow);
-
- // Get field offset.
- // r0 : key
- // r1 : receiver
- // r2 : receiver's map
- // r3 : lookup cache index
- ExternalReference cache_field_offsets =
- ExternalReference::keyed_lookup_cache_field_offsets(isolate);
-
- // Hit on nth entry.
- for (int i = kEntriesPerBucket - 1; i >= 0; i--) {
- __ bind(&hit_on_nth_entry[i]);
- __ mov(r4, Operand(cache_field_offsets));
- if (i != 0) {
- __ add(r3, r3, Operand(i));
- }
- __ ldr(r5, MemOperand(r4, r3, LSL, kPointerSizeLog2));
- __ ldrb(r6, FieldMemOperand(r2, Map::kInObjectPropertiesOffset));
- __ sub(r5, r5, r6, SetCC);
- __ b(ge, &property_array_property);
- if (i != 0) {
- __ jmp(&load_in_object_property);
- }
- }
-
- // Load in-object property.
- __ bind(&load_in_object_property);
- __ ldrb(r6, FieldMemOperand(r2, Map::kInstanceSizeOffset));
- __ add(r6, r6, r5); // Index from start of object.
- __ sub(r1, r1, Operand(kHeapObjectTag)); // Remove the heap tag.
- __ ldr(r0, MemOperand(r1, r6, LSL, kPointerSizeLog2));
- __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(),
- 1, r2, r3);
- __ Ret();
-
- // Load property array property.
- __ bind(&property_array_property);
- __ ldr(r1, FieldMemOperand(r1, JSObject::kPropertiesOffset));
- __ add(r1, r1, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ ldr(r0, MemOperand(r1, r5, LSL, kPointerSizeLog2));
- __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(),
- 1, r2, r3);
- __ Ret();
-
- // Do a quick inline probe of the receiver's dictionary, if it
- // exists.
- __ bind(&probe_dictionary);
- // r1: receiver
- // r0: key
- // r3: elements
- __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
- __ ldrb(r2, FieldMemOperand(r2, Map::kInstanceTypeOffset));
- GenerateGlobalInstanceTypeCheck(masm, r2, &slow);
- // Load the property to r0.
- GenerateDictionaryLoad(masm, &slow, r3, r0, r0, r2, r4);
- __ IncrementCounter(
- isolate->counters()->keyed_load_generic_symbol(), 1, r2, r3);
- __ Ret();
-
- __ bind(&index_string);
- __ IndexFromHash(r3, key);
- // Now jump to the place where smi keys are handled.
- __ jmp(&index_smi);
-}
-
-
-void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
- // ---------- S t a t e --------------
- // -- lr : return address
- // -- r0 : key (index)
- // -- r1 : receiver
- // -----------------------------------
- Label miss;
-
- Register receiver = r1;
- Register index = r0;
- Register scratch = r3;
- Register result = r0;
-
- StringCharAtGenerator char_at_generator(receiver,
- index,
- scratch,
- result,
- &miss, // When not a string.
- &miss, // When not a number.
- &miss, // When index out of range.
- STRING_INDEX_IS_ARRAY_INDEX);
- char_at_generator.GenerateFast(masm);
- __ Ret();
-
- StubRuntimeCallHelper call_helper;
- char_at_generator.GenerateSlow(masm, call_helper);
-
- __ bind(&miss);
- GenerateMiss(masm, MISS);
-}
-
-
-void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
- // ---------- S t a t e --------------
- // -- lr : return address
- // -- r0 : key
- // -- r1 : receiver
- // -----------------------------------
- Label slow;
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(r1, &slow);
-
- // Check that the key is an array index, that is Uint32.
- __ tst(r0, Operand(kSmiTagMask | kSmiSignMask));
- __ b(ne, &slow);
-
- // Get the map of the receiver.
- __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
-
- // Check that it has indexed interceptor and access checks
- // are not enabled for this object.
- __ ldrb(r3, FieldMemOperand(r2, Map::kBitFieldOffset));
- __ and_(r3, r3, Operand(kSlowCaseBitFieldMask));
- __ cmp(r3, Operand(1 << Map::kHasIndexedInterceptor));
- __ b(ne, &slow);
-
- // Everything is fine, call runtime.
- __ Push(r1, r0); // Receiver, key.
-
- // Perform tail call to the entry.
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(kKeyedLoadPropertyWithInterceptor),
- masm->isolate()),
- 2,
- 1);
-
- __ bind(&slow);
- GenerateMiss(masm, MISS);
-}
-
-
-void KeyedStoreIC::GenerateMiss(MacroAssembler* masm, ICMissMode miss_mode) {
- // ---------- S t a t e --------------
- // -- r0 : value
- // -- r1 : key
- // -- r2 : receiver
- // -- lr : return address
- // -----------------------------------
-
- // Push receiver, key and value for runtime call.
- __ Push(r2, r1, r0);
-
- ExternalReference ref = miss_mode == MISS_FORCE_GENERIC
- ? ExternalReference(IC_Utility(kKeyedStoreIC_MissForceGeneric),
- masm->isolate())
- : ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
- __ TailCallExternalReference(ref, 3, 1);
-}
-
-
-void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
- // ---------- S t a t e --------------
- // -- r0 : value
- // -- r1 : key
- // -- r2 : receiver
- // -- lr : return address
- // -----------------------------------
-
- // Push receiver, key and value for runtime call.
- __ Push(r2, r1, r0);
-
- // The slow case calls into the runtime to complete the store without causing
- // an IC miss that would otherwise cause a transition to the generic stub.
- ExternalReference ref =
- ExternalReference(IC_Utility(kKeyedStoreIC_Slow), masm->isolate());
- __ TailCallExternalReference(ref, 3, 1);
-}
-
-
-void KeyedStoreIC::GenerateTransitionElementsSmiToDouble(MacroAssembler* masm) {
- // ---------- S t a t e --------------
- // -- r2 : receiver
- // -- r3 : target map
- // -- lr : return address
- // -----------------------------------
- // Must return the modified receiver in r0.
- if (!FLAG_trace_elements_transitions) {
- Label fail;
- AllocationSiteMode mode = AllocationSiteInfo::GetMode(FAST_SMI_ELEMENTS,
- FAST_DOUBLE_ELEMENTS);
- ElementsTransitionGenerator::GenerateSmiToDouble(masm, mode, &fail);
- __ mov(r0, r2);
- __ Ret();
- __ bind(&fail);
- }
-
- __ push(r2);
- __ TailCallRuntime(Runtime::kTransitionElementsSmiToDouble, 1, 1);
-}
-
-
-void KeyedStoreIC::GenerateTransitionElementsDoubleToObject(
- MacroAssembler* masm) {
- // ---------- S t a t e --------------
- // -- r2 : receiver
- // -- r3 : target map
- // -- lr : return address
- // -----------------------------------
- // Must return the modified receiver in r0.
- if (!FLAG_trace_elements_transitions) {
- Label fail;
- AllocationSiteMode mode = AllocationSiteInfo::GetMode(FAST_DOUBLE_ELEMENTS,
- FAST_ELEMENTS);
- ElementsTransitionGenerator::GenerateDoubleToObject(masm, mode, &fail);
- __ mov(r0, r2);
- __ Ret();
- __ bind(&fail);
- }
-
- __ push(r2);
- __ TailCallRuntime(Runtime::kTransitionElementsDoubleToObject, 1, 1);
-}
-
-
-void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
- StrictModeFlag strict_mode) {
- // ---------- S t a t e --------------
- // -- r0 : value
- // -- r1 : key
- // -- r2 : receiver
- // -- lr : return address
- // -----------------------------------
-
- // Push receiver, key and value for runtime call.
- __ Push(r2, r1, r0);
-
- __ mov(r1, Operand(Smi::FromInt(NONE))); // PropertyAttributes
- __ mov(r0, Operand(Smi::FromInt(strict_mode))); // Strict mode.
- __ Push(r1, r0);
-
- __ TailCallRuntime(Runtime::kSetProperty, 5, 1);
-}
-
-
-static void KeyedStoreGenerateGenericHelper(
- MacroAssembler* masm,
- Label* fast_object,
- Label* fast_double,
- Label* slow,
- KeyedStoreCheckMap check_map,
- KeyedStoreIncrementLength increment_length,
- Register value,
- Register key,
- Register receiver,
- Register receiver_map,
- Register elements_map,
- Register elements) {
- Label transition_smi_elements;
- Label finish_object_store, non_double_value, transition_double_elements;
- Label fast_double_without_map_check;
-
- // Fast case: Do the store, could be either Object or double.
- __ bind(fast_object);
- Register scratch_value = r4;
- Register address = r5;
- if (check_map == kCheckMap) {
- __ ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
- __ cmp(elements_map,
- Operand(masm->isolate()->factory()->fixed_array_map()));
- __ b(ne, fast_double);
- }
- // Smi stores don't require further checks.
- Label non_smi_value;
- __ JumpIfNotSmi(value, &non_smi_value);
-
- if (increment_length == kIncrementLength) {
- // Add 1 to receiver->length.
- __ add(scratch_value, key, Operand(Smi::FromInt(1)));
- __ str(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset));
- }
- // It's irrelevant whether array is smi-only or not when writing a smi.
- __ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ add(address, address, Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ str(value, MemOperand(address));
- __ Ret();
-
- __ bind(&non_smi_value);
- // Escape to elements kind transition case.
- __ CheckFastObjectElements(receiver_map, scratch_value,
- &transition_smi_elements);
-
- // Fast elements array, store the value to the elements backing store.
- __ bind(&finish_object_store);
- if (increment_length == kIncrementLength) {
- // Add 1 to receiver->length.
- __ add(scratch_value, key, Operand(Smi::FromInt(1)));
- __ str(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset));
- }
- __ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ add(address, address, Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ str(value, MemOperand(address));
- // Update write barrier for the elements array address.
- __ mov(scratch_value, value); // Preserve the value which is returned.
- __ RecordWrite(elements,
- address,
- scratch_value,
- kLRHasNotBeenSaved,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- __ Ret();
-
- __ bind(fast_double);
- if (check_map == kCheckMap) {
- // Check for fast double array case. If this fails, call through to the
- // runtime.
- __ CompareRoot(elements_map, Heap::kFixedDoubleArrayMapRootIndex);
- __ b(ne, slow);
- }
- __ bind(&fast_double_without_map_check);
- __ StoreNumberToDoubleElements(value,
- key,
- elements, // Overwritten.
- r3, // Scratch regs...
- r4,
- r5,
- r6,
- &transition_double_elements);
- if (increment_length == kIncrementLength) {
- // Add 1 to receiver->length.
- __ add(scratch_value, key, Operand(Smi::FromInt(1)));
- __ str(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset));
- }
- __ Ret();
-
- __ bind(&transition_smi_elements);
- // Transition the array appropriately depending on the value type.
- __ ldr(r4, FieldMemOperand(value, HeapObject::kMapOffset));
- __ CompareRoot(r4, Heap::kHeapNumberMapRootIndex);
- __ b(ne, &non_double_value);
-
- // Value is a double. Transition FAST_SMI_ELEMENTS ->
- // FAST_DOUBLE_ELEMENTS and complete the store.
- __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- FAST_DOUBLE_ELEMENTS,
- receiver_map,
- r4,
- slow);
- ASSERT(receiver_map.is(r3)); // Transition code expects map in r3
- AllocationSiteMode mode = AllocationSiteInfo::GetMode(FAST_SMI_ELEMENTS,
- FAST_DOUBLE_ELEMENTS);
- ElementsTransitionGenerator::GenerateSmiToDouble(masm, mode, slow);
- __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ jmp(&fast_double_without_map_check);
-
- __ bind(&non_double_value);
- // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
- __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- FAST_ELEMENTS,
- receiver_map,
- r4,
- slow);
- ASSERT(receiver_map.is(r3)); // Transition code expects map in r3
- mode = AllocationSiteInfo::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
- ElementsTransitionGenerator::GenerateMapChangeElementsTransition(masm, mode,
- slow);
- __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ jmp(&finish_object_store);
-
- __ bind(&transition_double_elements);
- // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
- // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
- // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
- __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS,
- FAST_ELEMENTS,
- receiver_map,
- r4,
- slow);
- ASSERT(receiver_map.is(r3)); // Transition code expects map in r3
- mode = AllocationSiteInfo::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
- ElementsTransitionGenerator::GenerateDoubleToObject(masm, mode, slow);
- __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ jmp(&finish_object_store);
-}
-
-
-void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
- StrictModeFlag strict_mode) {
- // ---------- S t a t e --------------
- // -- r0 : value
- // -- r1 : key
- // -- r2 : receiver
- // -- lr : return address
- // -----------------------------------
- Label slow, fast_object, fast_object_grow;
- Label fast_double, fast_double_grow;
- Label array, extra, check_if_double_array;
-
- // Register usage.
- Register value = r0;
- Register key = r1;
- Register receiver = r2;
- Register receiver_map = r3;
- Register elements_map = r6;
- Register elements = r7; // Elements array of the receiver.
- // r4 and r5 are used as general scratch registers.
-
- // Check that the key is a smi.
- __ JumpIfNotSmi(key, &slow);
- // Check that the object isn't a smi.
- __ JumpIfSmi(receiver, &slow);
- // Get the map of the object.
- __ ldr(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
- // Check that the receiver does not require access checks. We need
- // to do this because this generic stub does not perform map checks.
- __ ldrb(ip, FieldMemOperand(receiver_map, Map::kBitFieldOffset));
- __ tst(ip, Operand(1 << Map::kIsAccessCheckNeeded));
- __ b(ne, &slow);
- // Check if the object is a JS array or not.
- __ ldrb(r4, FieldMemOperand(receiver_map, Map::kInstanceTypeOffset));
- __ cmp(r4, Operand(JS_ARRAY_TYPE));
- __ b(eq, &array);
- // Check that the object is some kind of JSObject.
- __ cmp(r4, Operand(FIRST_JS_OBJECT_TYPE));
- __ b(lt, &slow);
-
- // Object case: Check key against length in the elements array.
- __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- // Check array bounds. Both the key and the length of FixedArray are smis.
- __ ldr(ip, FieldMemOperand(elements, FixedArray::kLengthOffset));
- __ cmp(key, Operand(ip));
- __ b(lo, &fast_object);
-
- // Slow case, handle jump to runtime.
- __ bind(&slow);
- // Entry registers are intact.
- // r0: value.
- // r1: key.
- // r2: receiver.
- GenerateRuntimeSetProperty(masm, strict_mode);
-
- // Extra capacity case: Check if there is extra capacity to
- // perform the store and update the length. Used for adding one
- // element to the array by writing to array[array.length].
- __ bind(&extra);
- // Condition code from comparing key and array length is still available.
- __ b(ne, &slow); // Only support writing to writing to array[array.length].
- // Check for room in the elements backing store.
- // Both the key and the length of FixedArray are smis.
- __ ldr(ip, FieldMemOperand(elements, FixedArray::kLengthOffset));
- __ cmp(key, Operand(ip));
- __ b(hs, &slow);
- __ ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
- __ cmp(elements_map,
- Operand(masm->isolate()->factory()->fixed_array_map()));
- __ b(ne, &check_if_double_array);
- __ jmp(&fast_object_grow);
-
- __ bind(&check_if_double_array);
- __ cmp(elements_map,
- Operand(masm->isolate()->factory()->fixed_double_array_map()));
- __ b(ne, &slow);
- __ jmp(&fast_double_grow);
-
- // Array case: Get the length and the elements array from the JS
- // array. Check that the array is in fast mode (and writable); if it
- // is the length is always a smi.
- __ bind(&array);
- __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-
- // Check the key against the length in the array.
- __ ldr(ip, FieldMemOperand(receiver, JSArray::kLengthOffset));
- __ cmp(key, Operand(ip));
- __ b(hs, &extra);
-
- KeyedStoreGenerateGenericHelper(masm, &fast_object, &fast_double,
- &slow, kCheckMap, kDontIncrementLength,
- value, key, receiver, receiver_map,
- elements_map, elements);
- KeyedStoreGenerateGenericHelper(masm, &fast_object_grow, &fast_double_grow,
- &slow, kDontCheckMap, kIncrementLength,
- value, key, receiver, receiver_map,
- elements_map, elements);
-}
-
-
-void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
- StrictModeFlag strict_mode) {
- // ----------- S t a t e -------------
- // -- r0 : value
- // -- r1 : receiver
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
-
- // Get the receiver from the stack and probe the stub cache.
- Code::Flags flags =
- Code::ComputeFlags(Code::STORE_IC, MONOMORPHIC, strict_mode);
-
- Isolate::Current()->stub_cache()->GenerateProbe(
- masm, flags, r1, r2, r3, r4, r5, r6);
-
- // Cache miss: Jump to runtime.
- GenerateMiss(masm);
-}
-
-
-void StoreIC::GenerateMiss(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r0 : value
- // -- r1 : receiver
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
-
- __ Push(r1, r2, r0);
-
- // Perform tail call to the entry.
- ExternalReference ref =
- ExternalReference(IC_Utility(kStoreIC_Miss), masm->isolate());
- __ TailCallExternalReference(ref, 3, 1);
-}
-
-
-void StoreIC::GenerateNormal(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r0 : value
- // -- r1 : receiver
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
- Label miss;
-
- GenerateStringDictionaryReceiverCheck(masm, r1, r3, r4, r5, &miss);
-
- GenerateDictionaryStore(masm, &miss, r3, r2, r0, r4, r5);
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->store_normal_hit(),
- 1, r4, r5);
- __ Ret();
-
- __ bind(&miss);
- __ IncrementCounter(counters->store_normal_miss(), 1, r4, r5);
- GenerateMiss(masm);
-}
-
-
-void StoreIC::GenerateGlobalProxy(MacroAssembler* masm,
- StrictModeFlag strict_mode) {
- // ----------- S t a t e -------------
- // -- r0 : value
- // -- r1 : receiver
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
-
- __ Push(r1, r2, r0);
-
- __ mov(r1, Operand(Smi::FromInt(NONE))); // PropertyAttributes
- __ mov(r0, Operand(Smi::FromInt(strict_mode)));
- __ Push(r1, r0);
-
- // Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kSetProperty, 5, 1);
-}
-
-
-#undef __
-
-
-Condition CompareIC::ComputeCondition(Token::Value op) {
- switch (op) {
- case Token::EQ_STRICT:
- case Token::EQ:
- return eq;
- case Token::LT:
- return lt;
- case Token::GT:
- return gt;
- case Token::LTE:
- return le;
- case Token::GTE:
- return ge;
- default:
- UNREACHABLE();
- return kNoCondition;
- }
-}
-
-
-bool CompareIC::HasInlinedSmiCode(Address address) {
- // The address of the instruction following the call.
- Address cmp_instruction_address =
- Assembler::return_address_from_call_start(address);
-
- // If the instruction following the call is not a cmp rx, #yyy, nothing
- // was inlined.
- Instr instr = Assembler::instr_at(cmp_instruction_address);
- return Assembler::IsCmpImmediate(instr);
-}
-
-
-void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
- Address cmp_instruction_address =
- Assembler::return_address_from_call_start(address);
-
- // If the instruction following the call is not a cmp rx, #yyy, nothing
- // was inlined.
- Instr instr = Assembler::instr_at(cmp_instruction_address);
- if (!Assembler::IsCmpImmediate(instr)) {
- return;
- }
-
- // The delta to the start of the map check instruction and the
- // condition code uses at the patched jump.
- int delta = Assembler::GetCmpImmediateRawImmediate(instr);
- delta +=
- Assembler::GetCmpImmediateRegister(instr).code() * kOff12Mask;
- // If the delta is 0 the instruction is cmp r0, #0 which also signals that
- // nothing was inlined.
- if (delta == 0) {
- return;
- }
-
-#ifdef DEBUG
- if (FLAG_trace_ic) {
- PrintF("[ patching ic at %p, cmp=%p, delta=%d\n",
- address, cmp_instruction_address, delta);
- }
-#endif
-
- Address patch_address =
- cmp_instruction_address - delta * Instruction::kInstrSize;
- Instr instr_at_patch = Assembler::instr_at(patch_address);
- Instr branch_instr =
- Assembler::instr_at(patch_address + Instruction::kInstrSize);
- // This is patching a conditional "jump if not smi/jump if smi" site.
- // Enabling by changing from
- // cmp rx, rx
- // b eq/ne, <target>
- // to
- // tst rx, #kSmiTagMask
- // b ne/eq, <target>
- // and vice-versa to be disabled again.
- CodePatcher patcher(patch_address, 2);
- Register reg = Assembler::GetRn(instr_at_patch);
- if (check == ENABLE_INLINED_SMI_CHECK) {
- ASSERT(Assembler::IsCmpRegister(instr_at_patch));
- ASSERT_EQ(Assembler::GetRn(instr_at_patch).code(),
- Assembler::GetRm(instr_at_patch).code());
- patcher.masm()->tst(reg, Operand(kSmiTagMask));
- } else {
- ASSERT(check == DISABLE_INLINED_SMI_CHECK);
- ASSERT(Assembler::IsTstImmediate(instr_at_patch));
- patcher.masm()->cmp(reg, reg);
- }
- ASSERT(Assembler::IsBranch(branch_instr));
- if (Assembler::GetCondition(branch_instr) == eq) {
- patcher.EmitCondition(ne);
- } else {
- ASSERT(Assembler::GetCondition(branch_instr) == ne);
- patcher.EmitCondition(eq);
- }
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_ARM
diff --git a/src/3rdparty/v8/src/arm/lithium-arm.cc b/src/3rdparty/v8/src/arm/lithium-arm.cc
deleted file mode 100644
index 3385b43..0000000
--- a/src/3rdparty/v8/src/arm/lithium-arm.cc
+++ /dev/null
@@ -1,2515 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "lithium-allocator-inl.h"
-#include "arm/lithium-arm.h"
-#include "arm/lithium-codegen-arm.h"
-
-namespace v8 {
-namespace internal {
-
-#define DEFINE_COMPILE(type) \
- void L##type::CompileToNative(LCodeGen* generator) { \
- generator->Do##type(this); \
- }
-LITHIUM_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE)
-#undef DEFINE_COMPILE
-
-LOsrEntry::LOsrEntry() {
- for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) {
- register_spills_[i] = NULL;
- }
- for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); ++i) {
- double_register_spills_[i] = NULL;
- }
-}
-
-
-void LOsrEntry::MarkSpilledRegister(int allocation_index,
- LOperand* spill_operand) {
- ASSERT(spill_operand->IsStackSlot());
- ASSERT(register_spills_[allocation_index] == NULL);
- register_spills_[allocation_index] = spill_operand;
-}
-
-
-#ifdef DEBUG
-void LInstruction::VerifyCall() {
- // Call instructions can use only fixed registers as temporaries and
- // outputs because all registers are blocked by the calling convention.
- // Inputs operands must use a fixed register or use-at-start policy or
- // a non-register policy.
- ASSERT(Output() == NULL ||
- LUnallocated::cast(Output())->HasFixedPolicy() ||
- !LUnallocated::cast(Output())->HasRegisterPolicy());
- for (UseIterator it(this); !it.Done(); it.Advance()) {
- LUnallocated* operand = LUnallocated::cast(it.Current());
- ASSERT(operand->HasFixedPolicy() ||
- operand->IsUsedAtStart());
- }
- for (TempIterator it(this); !it.Done(); it.Advance()) {
- LUnallocated* operand = LUnallocated::cast(it.Current());
- ASSERT(operand->HasFixedPolicy() ||!operand->HasRegisterPolicy());
- }
-}
-#endif
-
-
-void LOsrEntry::MarkSpilledDoubleRegister(int allocation_index,
- LOperand* spill_operand) {
- ASSERT(spill_operand->IsDoubleStackSlot());
- ASSERT(double_register_spills_[allocation_index] == NULL);
- double_register_spills_[allocation_index] = spill_operand;
-}
-
-
-void LInstruction::PrintTo(StringStream* stream) {
- stream->Add("%s ", this->Mnemonic());
-
- PrintOutputOperandTo(stream);
-
- PrintDataTo(stream);
-
- if (HasEnvironment()) {
- stream->Add(" ");
- environment()->PrintTo(stream);
- }
-
- if (HasPointerMap()) {
- stream->Add(" ");
- pointer_map()->PrintTo(stream);
- }
-}
-
-
-void LInstruction::PrintDataTo(StringStream* stream) {
- stream->Add("= ");
- for (int i = 0; i < InputCount(); i++) {
- if (i > 0) stream->Add(" ");
- if (InputAt(i) == NULL) {
- stream->Add("NULL");
- } else {
- InputAt(i)->PrintTo(stream);
- }
- }
-}
-
-
-void LInstruction::PrintOutputOperandTo(StringStream* stream) {
- if (HasResult()) result()->PrintTo(stream);
-}
-
-
-void LLabel::PrintDataTo(StringStream* stream) {
- LGap::PrintDataTo(stream);
- LLabel* rep = replacement();
- if (rep != NULL) {
- stream->Add(" Dead block replaced with B%d", rep->block_id());
- }
-}
-
-
-bool LGap::IsRedundant() const {
- for (int i = 0; i < 4; i++) {
- if (parallel_moves_[i] != NULL && !parallel_moves_[i]->IsRedundant()) {
- return false;
- }
- }
-
- return true;
-}
-
-
-void LGap::PrintDataTo(StringStream* stream) {
- for (int i = 0; i < 4; i++) {
- stream->Add("(");
- if (parallel_moves_[i] != NULL) {
- parallel_moves_[i]->PrintDataTo(stream);
- }
- stream->Add(") ");
- }
-}
-
-
-const char* LArithmeticD::Mnemonic() const {
- switch (op()) {
- case Token::ADD: return "add-d";
- case Token::SUB: return "sub-d";
- case Token::MUL: return "mul-d";
- case Token::DIV: return "div-d";
- case Token::MOD: return "mod-d";
- default:
- UNREACHABLE();
- return NULL;
- }
-}
-
-
-const char* LArithmeticT::Mnemonic() const {
- switch (op()) {
- case Token::ADD: return "add-t";
- case Token::SUB: return "sub-t";
- case Token::MUL: return "mul-t";
- case Token::MOD: return "mod-t";
- case Token::DIV: return "div-t";
- case Token::BIT_AND: return "bit-and-t";
- case Token::BIT_OR: return "bit-or-t";
- case Token::BIT_XOR: return "bit-xor-t";
- case Token::ROR: return "ror-t";
- case Token::SHL: return "shl-t";
- case Token::SAR: return "sar-t";
- case Token::SHR: return "shr-t";
- default:
- UNREACHABLE();
- return NULL;
- }
-}
-
-
-void LGoto::PrintDataTo(StringStream* stream) {
- stream->Add("B%d", block_id());
-}
-
-
-void LBranch::PrintDataTo(StringStream* stream) {
- stream->Add("B%d | B%d on ", true_block_id(), false_block_id());
- value()->PrintTo(stream);
-}
-
-
-void LCmpIDAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if ");
- left()->PrintTo(stream);
- stream->Add(" %s ", Token::String(op()));
- right()->PrintTo(stream);
- stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LIsNilAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if ");
- value()->PrintTo(stream);
- stream->Add(kind() == kStrictEquality ? " === " : " == ");
- stream->Add(nil() == kNullValue ? "null" : "undefined");
- stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LIsObjectAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if is_object(");
- value()->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LIsStringAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if is_string(");
- value()->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LIsSmiAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if is_smi(");
- value()->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LIsUndetectableAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if is_undetectable(");
- value()->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LStringCompareAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if string_compare(");
- left()->PrintTo(stream);
- right()->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if has_instance_type(");
- value()->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LHasCachedArrayIndexAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if has_cached_array_index(");
- value()->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if class_of_test(");
- value()->PrintTo(stream);
- stream->Add(", \"%o\") then B%d else B%d",
- *hydrogen()->class_name(),
- true_block_id(),
- false_block_id());
-}
-
-
-void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if typeof ");
- value()->PrintTo(stream);
- stream->Add(" == \"%s\" then B%d else B%d",
- *hydrogen()->type_literal()->ToCString(),
- true_block_id(), false_block_id());
-}
-
-
-void LCallConstantFunction::PrintDataTo(StringStream* stream) {
- stream->Add("#%d / ", arity());
-}
-
-
-void LUnaryMathOperation::PrintDataTo(StringStream* stream) {
- stream->Add("/%s ", hydrogen()->OpName());
- value()->PrintTo(stream);
-}
-
-
-void LMathExp::PrintDataTo(StringStream* stream) {
- value()->PrintTo(stream);
-}
-
-
-void LLoadContextSlot::PrintDataTo(StringStream* stream) {
- context()->PrintTo(stream);
- stream->Add("[%d]", slot_index());
-}
-
-
-void LStoreContextSlot::PrintDataTo(StringStream* stream) {
- context()->PrintTo(stream);
- stream->Add("[%d] <- ", slot_index());
- value()->PrintTo(stream);
-}
-
-
-void LInvokeFunction::PrintDataTo(StringStream* stream) {
- stream->Add("= ");
- function()->PrintTo(stream);
- stream->Add(" #%d / ", arity());
-}
-
-
-void LCallKeyed::PrintDataTo(StringStream* stream) {
- stream->Add("[r2] #%d / ", arity());
-}
-
-
-void LCallNamed::PrintDataTo(StringStream* stream) {
- SmartArrayPointer<char> name_string = name()->ToCString();
- stream->Add("%s #%d / ", *name_string, arity());
-}
-
-
-void LCallGlobal::PrintDataTo(StringStream* stream) {
- SmartArrayPointer<char> name_string = name()->ToCString();
- stream->Add("%s #%d / ", *name_string, arity());
-}
-
-
-void LCallKnownGlobal::PrintDataTo(StringStream* stream) {
- stream->Add("#%d / ", arity());
-}
-
-
-void LCallNew::PrintDataTo(StringStream* stream) {
- stream->Add("= ");
- constructor()->PrintTo(stream);
- stream->Add(" #%d / ", arity());
-}
-
-
-void LCallNewArray::PrintDataTo(StringStream* stream) {
- stream->Add("= ");
- constructor()->PrintTo(stream);
- stream->Add(" #%d / ", arity());
- ASSERT(hydrogen()->property_cell()->value()->IsSmi());
- ElementsKind kind = static_cast<ElementsKind>(
- Smi::cast(hydrogen()->property_cell()->value())->value());
- stream->Add(" (%s) ", ElementsKindToString(kind));
-}
-
-
-void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
- arguments()->PrintTo(stream);
- stream->Add(" length ");
- length()->PrintTo(stream);
- stream->Add(" index ");
- index()->PrintTo(stream);
-}
-
-
-void LStoreNamedField::PrintDataTo(StringStream* stream) {
- object()->PrintTo(stream);
- stream->Add(".");
- stream->Add(*String::cast(*name())->ToCString());
- stream->Add(" <- ");
- value()->PrintTo(stream);
-}
-
-
-void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
- object()->PrintTo(stream);
- stream->Add(".");
- stream->Add(*String::cast(*name())->ToCString());
- stream->Add(" <- ");
- value()->PrintTo(stream);
-}
-
-
-void LLoadKeyed::PrintDataTo(StringStream* stream) {
- elements()->PrintTo(stream);
- stream->Add("[");
- key()->PrintTo(stream);
- if (hydrogen()->IsDehoisted()) {
- stream->Add(" + %d]", additional_index());
- } else {
- stream->Add("]");
- }
-}
-
-
-void LStoreKeyed::PrintDataTo(StringStream* stream) {
- elements()->PrintTo(stream);
- stream->Add("[");
- key()->PrintTo(stream);
- if (hydrogen()->IsDehoisted()) {
- stream->Add(" + %d] <-", additional_index());
- } else {
- stream->Add("] <- ");
- }
- value()->PrintTo(stream);
-}
-
-
-void LStoreKeyedGeneric::PrintDataTo(StringStream* stream) {
- object()->PrintTo(stream);
- stream->Add("[");
- key()->PrintTo(stream);
- stream->Add("] <- ");
- value()->PrintTo(stream);
-}
-
-
-void LTransitionElementsKind::PrintDataTo(StringStream* stream) {
- object()->PrintTo(stream);
- stream->Add(" %p -> %p", *original_map(), *transitioned_map());
-}
-
-
-int LPlatformChunk::GetNextSpillIndex(bool is_double) {
- // Skip a slot if for a double-width slot.
- if (is_double) spill_slot_count_++;
- return spill_slot_count_++;
-}
-
-
-LOperand* LPlatformChunk::GetNextSpillSlot(bool is_double) {
- int index = GetNextSpillIndex(is_double);
- if (is_double) {
- return LDoubleStackSlot::Create(index, zone());
- } else {
- return LStackSlot::Create(index, zone());
- }
-}
-
-
-LPlatformChunk* LChunkBuilder::Build() {
- ASSERT(is_unused());
- chunk_ = new(zone()) LPlatformChunk(info(), graph());
- HPhase phase("L_Building chunk", chunk_);
- status_ = BUILDING;
- const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
- for (int i = 0; i < blocks->length(); i++) {
- HBasicBlock* next = NULL;
- if (i < blocks->length() - 1) next = blocks->at(i + 1);
- DoBasicBlock(blocks->at(i), next);
- if (is_aborted()) return NULL;
- }
- status_ = DONE;
- return chunk_;
-}
-
-
-void LChunkBuilder::Abort(const char* reason) {
- info()->set_bailout_reason(reason);
- status_ = ABORTED;
-}
-
-
-LUnallocated* LChunkBuilder::ToUnallocated(Register reg) {
- return new(zone()) LUnallocated(LUnallocated::FIXED_REGISTER,
- Register::ToAllocationIndex(reg));
-}
-
-
-LUnallocated* LChunkBuilder::ToUnallocated(DoubleRegister reg) {
- return new(zone()) LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER,
- DoubleRegister::ToAllocationIndex(reg));
-}
-
-
-LOperand* LChunkBuilder::UseFixed(HValue* value, Register fixed_register) {
- return Use(value, ToUnallocated(fixed_register));
-}
-
-
-LOperand* LChunkBuilder::UseFixedDouble(HValue* value, DoubleRegister reg) {
- return Use(value, ToUnallocated(reg));
-}
-
-
-LOperand* LChunkBuilder::UseRegister(HValue* value) {
- return Use(value, new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
-}
-
-
-LOperand* LChunkBuilder::UseRegisterAtStart(HValue* value) {
- return Use(value,
- new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER,
- LUnallocated::USED_AT_START));
-}
-
-
-LOperand* LChunkBuilder::UseTempRegister(HValue* value) {
- return Use(value, new(zone()) LUnallocated(LUnallocated::WRITABLE_REGISTER));
-}
-
-
-LOperand* LChunkBuilder::Use(HValue* value) {
- return Use(value, new(zone()) LUnallocated(LUnallocated::NONE));
-}
-
-
-LOperand* LChunkBuilder::UseAtStart(HValue* value) {
- return Use(value, new(zone()) LUnallocated(LUnallocated::NONE,
- LUnallocated::USED_AT_START));
-}
-
-
-LOperand* LChunkBuilder::UseOrConstant(HValue* value) {
- return value->IsConstant()
- ? chunk_->DefineConstantOperand(HConstant::cast(value))
- : Use(value);
-}
-
-
-LOperand* LChunkBuilder::UseOrConstantAtStart(HValue* value) {
- return value->IsConstant()
- ? chunk_->DefineConstantOperand(HConstant::cast(value))
- : UseAtStart(value);
-}
-
-
-LOperand* LChunkBuilder::UseRegisterOrConstant(HValue* value) {
- return value->IsConstant()
- ? chunk_->DefineConstantOperand(HConstant::cast(value))
- : UseRegister(value);
-}
-
-
-LOperand* LChunkBuilder::UseRegisterOrConstantAtStart(HValue* value) {
- return value->IsConstant()
- ? chunk_->DefineConstantOperand(HConstant::cast(value))
- : UseRegisterAtStart(value);
-}
-
-
-LOperand* LChunkBuilder::UseAny(HValue* value) {
- return value->IsConstant()
- ? chunk_->DefineConstantOperand(HConstant::cast(value))
- : Use(value, new(zone()) LUnallocated(LUnallocated::ANY));
-}
-
-
-LOperand* LChunkBuilder::Use(HValue* value, LUnallocated* operand) {
- if (value->EmitAtUses()) {
- HInstruction* instr = HInstruction::cast(value);
- VisitInstruction(instr);
- }
- operand->set_virtual_register(value->id());
- return operand;
-}
-
-
-template<int I, int T>
-LInstruction* LChunkBuilder::Define(LTemplateInstruction<1, I, T>* instr,
- LUnallocated* result) {
- result->set_virtual_register(current_instruction_->id());
- instr->set_result(result);
- return instr;
-}
-
-
-template<int I, int T>
-LInstruction* LChunkBuilder::DefineAsRegister(
- LTemplateInstruction<1, I, T>* instr) {
- return Define(instr,
- new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
-}
-
-
-template<int I, int T>
-LInstruction* LChunkBuilder::DefineAsSpilled(
- LTemplateInstruction<1, I, T>* instr, int index) {
- return Define(instr,
- new(zone()) LUnallocated(LUnallocated::FIXED_SLOT, index));
-}
-
-
-template<int I, int T>
-LInstruction* LChunkBuilder::DefineSameAsFirst(
- LTemplateInstruction<1, I, T>* instr) {
- return Define(instr,
- new(zone()) LUnallocated(LUnallocated::SAME_AS_FIRST_INPUT));
-}
-
-
-template<int I, int T>
-LInstruction* LChunkBuilder::DefineFixed(
- LTemplateInstruction<1, I, T>* instr, Register reg) {
- return Define(instr, ToUnallocated(reg));
-}
-
-
-template<int I, int T>
-LInstruction* LChunkBuilder::DefineFixedDouble(
- LTemplateInstruction<1, I, T>* instr, DoubleRegister reg) {
- return Define(instr, ToUnallocated(reg));
-}
-
-
-LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
- HEnvironment* hydrogen_env = current_block_->last_environment();
- int argument_index_accumulator = 0;
- instr->set_environment(CreateEnvironment(hydrogen_env,
- &argument_index_accumulator));
- return instr;
-}
-
-
-LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
- HInstruction* hinstr,
- CanDeoptimize can_deoptimize) {
- info()->MarkAsNonDeferredCalling();
-#ifdef DEBUG
- instr->VerifyCall();
-#endif
- instr->MarkAsCall();
- instr = AssignPointerMap(instr);
-
- if (hinstr->HasObservableSideEffects()) {
- ASSERT(hinstr->next()->IsSimulate());
- HSimulate* sim = HSimulate::cast(hinstr->next());
- ASSERT(instruction_pending_deoptimization_environment_ == NULL);
- ASSERT(pending_deoptimization_ast_id_.IsNone());
- instruction_pending_deoptimization_environment_ = instr;
- pending_deoptimization_ast_id_ = sim->ast_id();
- }
-
- // If instruction does not have side-effects lazy deoptimization
- // after the call will try to deoptimize to the point before the call.
- // Thus we still need to attach environment to this call even if
- // call sequence can not deoptimize eagerly.
- bool needs_environment =
- (can_deoptimize == CAN_DEOPTIMIZE_EAGERLY) ||
- !hinstr->HasObservableSideEffects();
- if (needs_environment && !instr->HasEnvironment()) {
- instr = AssignEnvironment(instr);
- }
-
- return instr;
-}
-
-
-LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
- ASSERT(!instr->HasPointerMap());
- instr->set_pointer_map(new(zone()) LPointerMap(position_, zone()));
- return instr;
-}
-
-
-LUnallocated* LChunkBuilder::TempRegister() {
- LUnallocated* operand =
- new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER);
- operand->set_virtual_register(allocator_->GetVirtualRegister());
- if (!allocator_->AllocationOk()) Abort("Not enough virtual registers.");
- return operand;
-}
-
-
-LOperand* LChunkBuilder::FixedTemp(Register reg) {
- LUnallocated* operand = ToUnallocated(reg);
- ASSERT(operand->HasFixedPolicy());
- return operand;
-}
-
-
-LOperand* LChunkBuilder::FixedTemp(DoubleRegister reg) {
- LUnallocated* operand = ToUnallocated(reg);
- ASSERT(operand->HasFixedPolicy());
- return operand;
-}
-
-
-LInstruction* LChunkBuilder::DoBlockEntry(HBlockEntry* instr) {
- return new(zone()) LLabel(instr->block());
-}
-
-
-LInstruction* LChunkBuilder::DoDummyUse(HDummyUse* instr) {
- return DefineAsRegister(new(zone()) LDummyUse(UseAny(instr->value())));
-}
-
-
-LInstruction* LChunkBuilder::DoSoftDeoptimize(HSoftDeoptimize* instr) {
- return AssignEnvironment(new(zone()) LDeoptimize);
-}
-
-
-LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
- return AssignEnvironment(new(zone()) LDeoptimize);
-}
-
-
-LInstruction* LChunkBuilder::DoShift(Token::Value op,
- HBitwiseBinaryOperation* instr) {
- if (instr->representation().IsTagged()) {
- ASSERT(instr->left()->representation().IsTagged());
- ASSERT(instr->right()->representation().IsTagged());
-
- LOperand* left = UseFixed(instr->left(), r1);
- LOperand* right = UseFixed(instr->right(), r0);
- LArithmeticT* result = new(zone()) LArithmeticT(op, left, right);
- return MarkAsCall(DefineFixed(result, r0), instr);
- }
-
- ASSERT(instr->representation().IsInteger32());
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
- LOperand* left = UseRegisterAtStart(instr->left());
-
- HValue* right_value = instr->right();
- LOperand* right = NULL;
- int constant_value = 0;
- if (right_value->IsConstant()) {
- HConstant* constant = HConstant::cast(right_value);
- right = chunk_->DefineConstantOperand(constant);
- constant_value = constant->Integer32Value() & 0x1f;
- } else {
- right = UseRegisterAtStart(right_value);
- }
-
- // Shift operations can only deoptimize if we do a logical shift
- // by 0 and the result cannot be truncated to int32.
- bool does_deopt = false;
- if (op == Token::SHR && constant_value == 0) {
- if (FLAG_opt_safe_uint32_operations) {
- does_deopt = !instr->CheckFlag(HInstruction::kUint32);
- } else {
- for (HUseIterator it(instr->uses()); !it.Done(); it.Advance()) {
- if (!it.value()->CheckFlag(HValue::kTruncatingToInt32)) {
- does_deopt = true;
- break;
- }
- }
- }
- }
-
- LInstruction* result =
- DefineAsRegister(new(zone()) LShiftI(op, left, right, does_deopt));
- return does_deopt ? AssignEnvironment(result) : result;
-}
-
-
-LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op,
- HArithmeticBinaryOperation* instr) {
- ASSERT(instr->representation().IsDouble());
- ASSERT(instr->left()->representation().IsDouble());
- ASSERT(instr->right()->representation().IsDouble());
- ASSERT(op != Token::MOD);
- LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right = UseRegisterAtStart(instr->right());
- LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
- return DefineAsRegister(result);
-}
-
-
-LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op,
- HArithmeticBinaryOperation* instr) {
- ASSERT(op == Token::ADD ||
- op == Token::DIV ||
- op == Token::MOD ||
- op == Token::MUL ||
- op == Token::SUB);
- HValue* left = instr->left();
- HValue* right = instr->right();
- ASSERT(left->representation().IsTagged());
- ASSERT(right->representation().IsTagged());
- LOperand* left_operand = UseFixed(left, r1);
- LOperand* right_operand = UseFixed(right, r0);
- LArithmeticT* result =
- new(zone()) LArithmeticT(op, left_operand, right_operand);
- return MarkAsCall(DefineFixed(result, r0), instr);
-}
-
-
-void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) {
- ASSERT(is_building());
- current_block_ = block;
- next_block_ = next_block;
- if (block->IsStartBlock()) {
- block->UpdateEnvironment(graph_->start_environment());
- argument_count_ = 0;
- } else if (block->predecessors()->length() == 1) {
- // We have a single predecessor => copy environment and outgoing
- // argument count from the predecessor.
- ASSERT(block->phis()->length() == 0);
- HBasicBlock* pred = block->predecessors()->at(0);
- HEnvironment* last_environment = pred->last_environment();
- ASSERT(last_environment != NULL);
- // Only copy the environment, if it is later used again.
- if (pred->end()->SecondSuccessor() == NULL) {
- ASSERT(pred->end()->FirstSuccessor() == block);
- } else {
- if (pred->end()->FirstSuccessor()->block_id() > block->block_id() ||
- pred->end()->SecondSuccessor()->block_id() > block->block_id()) {
- last_environment = last_environment->Copy();
- }
- }
- block->UpdateEnvironment(last_environment);
- ASSERT(pred->argument_count() >= 0);
- argument_count_ = pred->argument_count();
- } else {
- // We are at a state join => process phis.
- HBasicBlock* pred = block->predecessors()->at(0);
- // No need to copy the environment, it cannot be used later.
- HEnvironment* last_environment = pred->last_environment();
- for (int i = 0; i < block->phis()->length(); ++i) {
- HPhi* phi = block->phis()->at(i);
- last_environment->SetValueAt(phi->merged_index(), phi);
- }
- for (int i = 0; i < block->deleted_phis()->length(); ++i) {
- last_environment->SetValueAt(block->deleted_phis()->at(i),
- graph_->GetConstantUndefined());
- }
- block->UpdateEnvironment(last_environment);
- // Pick up the outgoing argument count of one of the predecessors.
- argument_count_ = pred->argument_count();
- }
- HInstruction* current = block->first();
- int start = chunk_->instructions()->length();
- while (current != NULL && !is_aborted()) {
- // Code for constants in registers is generated lazily.
- if (!current->EmitAtUses()) {
- VisitInstruction(current);
- }
- current = current->next();
- }
- int end = chunk_->instructions()->length() - 1;
- if (end >= start) {
- block->set_first_instruction_index(start);
- block->set_last_instruction_index(end);
- }
- block->set_argument_count(argument_count_);
- next_block_ = NULL;
- current_block_ = NULL;
-}
-
-
-void LChunkBuilder::VisitInstruction(HInstruction* current) {
- HInstruction* old_current = current_instruction_;
- current_instruction_ = current;
- if (current->has_position()) position_ = current->position();
- LInstruction* instr = current->CompileToLithium(this);
-
- if (instr != NULL) {
- if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) {
- instr = AssignPointerMap(instr);
- }
- if (FLAG_stress_environments && !instr->HasEnvironment()) {
- instr = AssignEnvironment(instr);
- }
- instr->set_hydrogen_value(current);
- chunk_->AddInstruction(instr, current_block_);
- }
- current_instruction_ = old_current;
-}
-
-
-LEnvironment* LChunkBuilder::CreateEnvironment(
- HEnvironment* hydrogen_env,
- int* argument_index_accumulator) {
- if (hydrogen_env == NULL) return NULL;
-
- LEnvironment* outer =
- CreateEnvironment(hydrogen_env->outer(), argument_index_accumulator);
- BailoutId ast_id = hydrogen_env->ast_id();
- ASSERT(!ast_id.IsNone() ||
- hydrogen_env->frame_type() != JS_FUNCTION);
- int value_count = hydrogen_env->length();
- LEnvironment* result = new(zone()) LEnvironment(
- hydrogen_env->closure(),
- hydrogen_env->frame_type(),
- ast_id,
- hydrogen_env->parameter_count(),
- argument_count_,
- value_count,
- outer,
- hydrogen_env->entry(),
- zone());
- int argument_index = *argument_index_accumulator;
- for (int i = 0; i < value_count; ++i) {
- if (hydrogen_env->is_special_index(i)) continue;
-
- HValue* value = hydrogen_env->values()->at(i);
- LOperand* op = NULL;
- if (value->IsArgumentsObject()) {
- op = NULL;
- } else if (value->IsPushArgument()) {
- op = new(zone()) LArgument(argument_index++);
- } else {
- op = UseAny(value);
- }
- result->AddValue(op,
- value->representation(),
- value->CheckFlag(HInstruction::kUint32));
- }
-
- if (hydrogen_env->frame_type() == JS_FUNCTION) {
- *argument_index_accumulator = argument_index;
- }
-
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
- return new(zone()) LGoto(instr->FirstSuccessor()->block_id());
-}
-
-
-LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
- HValue* value = instr->value();
- if (value->EmitAtUses()) {
- HBasicBlock* successor = HConstant::cast(value)->ToBoolean()
- ? instr->FirstSuccessor()
- : instr->SecondSuccessor();
- return new(zone()) LGoto(successor->block_id());
- }
-
- LBranch* result = new(zone()) LBranch(UseRegister(value));
- // Tagged values that are not known smis or booleans require a
- // deoptimization environment.
- Representation rep = value->representation();
- HType type = value->type();
- if (rep.IsTagged() && !type.IsSmi() && !type.IsBoolean()) {
- return AssignEnvironment(result);
- }
- return result;
-}
-
-
-
-LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) {
- ASSERT(instr->value()->representation().IsTagged());
- LOperand* value = UseRegisterAtStart(instr->value());
- LOperand* temp = TempRegister();
- return new(zone()) LCmpMapAndBranch(value, temp);
-}
-
-
-LInstruction* LChunkBuilder::DoArgumentsLength(HArgumentsLength* instr) {
- LOperand* value = UseRegister(instr->value());
- return DefineAsRegister(new(zone()) LArgumentsLength(value));
-}
-
-
-LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) {
- return DefineAsRegister(new(zone()) LArgumentsElements);
-}
-
-
-LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
- LInstanceOf* result =
- new(zone()) LInstanceOf(UseFixed(instr->left(), r0),
- UseFixed(instr->right(), r1));
- return MarkAsCall(DefineFixed(result, r0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal(
- HInstanceOfKnownGlobal* instr) {
- LInstanceOfKnownGlobal* result =
- new(zone()) LInstanceOfKnownGlobal(UseFixed(instr->left(), r0),
- FixedTemp(r4));
- return MarkAsCall(DefineFixed(result, r0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoInstanceSize(HInstanceSize* instr) {
- LOperand* object = UseRegisterAtStart(instr->object());
- return DefineAsRegister(new(zone()) LInstanceSize(object));
-}
-
-
-LInstruction* LChunkBuilder::DoWrapReceiver(HWrapReceiver* instr) {
- LOperand* receiver = UseRegisterAtStart(instr->receiver());
- LOperand* function = UseRegisterAtStart(instr->function());
- LWrapReceiver* result = new(zone()) LWrapReceiver(receiver, function);
- return AssignEnvironment(DefineSameAsFirst(result));
-}
-
-
-LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) {
- LOperand* function = UseFixed(instr->function(), r1);
- LOperand* receiver = UseFixed(instr->receiver(), r0);
- LOperand* length = UseFixed(instr->length(), r2);
- LOperand* elements = UseFixed(instr->elements(), r3);
- LApplyArguments* result = new(zone()) LApplyArguments(function,
- receiver,
- length,
- elements);
- return MarkAsCall(DefineFixed(result, r0), instr, CAN_DEOPTIMIZE_EAGERLY);
-}
-
-
-LInstruction* LChunkBuilder::DoPushArgument(HPushArgument* instr) {
- ++argument_count_;
- LOperand* argument = Use(instr->argument());
- return new(zone()) LPushArgument(argument);
-}
-
-
-LInstruction* LChunkBuilder::DoThisFunction(HThisFunction* instr) {
- return instr->HasNoUses()
- ? NULL
- : DefineAsRegister(new(zone()) LThisFunction);
-}
-
-
-LInstruction* LChunkBuilder::DoContext(HContext* instr) {
- // If there is a non-return use, the context must be allocated in a register.
- for (HUseIterator it(instr->uses()); !it.Done(); it.Advance()) {
- if (!it.value()->IsReturn()) {
- return DefineAsRegister(new(zone()) LContext);
- }
- }
-
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoOuterContext(HOuterContext* instr) {
- LOperand* context = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LOuterContext(context));
-}
-
-
-LInstruction* LChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) {
- return MarkAsCall(new(zone()) LDeclareGlobals, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoGlobalObject(HGlobalObject* instr) {
- LOperand* context = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LGlobalObject(context,
- instr->qml_global()));
-}
-
-
-LInstruction* LChunkBuilder::DoGlobalReceiver(HGlobalReceiver* instr) {
- LOperand* global_object = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LGlobalReceiver(global_object));
-}
-
-
-LInstruction* LChunkBuilder::DoCallConstantFunction(
- HCallConstantFunction* instr) {
- argument_count_ -= instr->argument_count();
- return MarkAsCall(DefineFixed(new(zone()) LCallConstantFunction, r0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
- LOperand* function = UseFixed(instr->function(), r1);
- argument_count_ -= instr->argument_count();
- LInvokeFunction* result = new(zone()) LInvokeFunction(function);
- return MarkAsCall(DefineFixed(result, r0), instr, CANNOT_DEOPTIMIZE_EAGERLY);
-}
-
-
-LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
- BuiltinFunctionId op = instr->op();
- if (op == kMathLog || op == kMathSin || op == kMathCos || op == kMathTan) {
- LOperand* input = UseFixedDouble(instr->value(), d2);
- LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(input, NULL);
- return MarkAsCall(DefineFixedDouble(result, d2), instr);
- } else if (op == kMathExp) {
- ASSERT(instr->representation().IsDouble());
- ASSERT(instr->value()->representation().IsDouble());
- LOperand* input = UseTempRegister(instr->value());
- LOperand* temp1 = TempRegister();
- LOperand* temp2 = TempRegister();
- LOperand* double_temp = FixedTemp(d3); // Chosen by fair dice roll.
- LMathExp* result = new(zone()) LMathExp(input, double_temp, temp1, temp2);
- return DefineAsRegister(result);
- } else if (op == kMathPowHalf) {
- LOperand* input = UseFixedDouble(instr->value(), d2);
- LOperand* temp = FixedTemp(d3);
- LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(input, temp);
- return DefineFixedDouble(result, d2);
- } else {
- LOperand* input = UseRegisterAtStart(instr->value());
-
- LOperand* temp = (op == kMathRound) ? FixedTemp(d3) : NULL;
- LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(input, temp);
- switch (op) {
- case kMathAbs:
- return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
- case kMathFloor:
- return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
- case kMathSqrt:
- return DefineAsRegister(result);
- case kMathRound:
- return AssignEnvironment(DefineAsRegister(result));
- default:
- UNREACHABLE();
- return NULL;
- }
- }
-}
-
-
-LInstruction* LChunkBuilder::DoCallKeyed(HCallKeyed* instr) {
- ASSERT(instr->key()->representation().IsTagged());
- argument_count_ -= instr->argument_count();
- LOperand* key = UseFixed(instr->key(), r2);
- return MarkAsCall(DefineFixed(new(zone()) LCallKeyed(key), r0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallNamed(HCallNamed* instr) {
- argument_count_ -= instr->argument_count();
- return MarkAsCall(DefineFixed(new(zone()) LCallNamed, r0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallGlobal(HCallGlobal* instr) {
- argument_count_ -= instr->argument_count();
- LCallGlobal* result = new(zone()) LCallGlobal(instr->qml_global());
- return MarkAsCall(DefineFixed(result, r0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallKnownGlobal(HCallKnownGlobal* instr) {
- argument_count_ -= instr->argument_count();
- return MarkAsCall(DefineFixed(new(zone()) LCallKnownGlobal, r0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
- LOperand* constructor = UseFixed(instr->constructor(), r1);
- argument_count_ -= instr->argument_count();
- LCallNew* result = new(zone()) LCallNew(constructor);
- return MarkAsCall(DefineFixed(result, r0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) {
- LOperand* constructor = UseFixed(instr->constructor(), r1);
- argument_count_ -= instr->argument_count();
- LCallNewArray* result = new(zone()) LCallNewArray(constructor);
- return MarkAsCall(DefineFixed(result, r0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
- LOperand* function = UseFixed(instr->function(), r1);
- argument_count_ -= instr->argument_count();
- return MarkAsCall(DefineFixed(new(zone()) LCallFunction(function), r0),
- instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
- argument_count_ -= instr->argument_count();
- return MarkAsCall(DefineFixed(new(zone()) LCallRuntime, r0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoRor(HRor* instr) {
- return DoShift(Token::ROR, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoShr(HShr* instr) {
- return DoShift(Token::SHR, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoSar(HSar* instr) {
- return DoShift(Token::SAR, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoShl(HShl* instr) {
- return DoShift(Token::SHL, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
- if (instr->representation().IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
-
- LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
- LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
- return DefineAsRegister(new(zone()) LBitI(left, right));
- } else {
- ASSERT(instr->representation().IsTagged());
- ASSERT(instr->left()->representation().IsTagged());
- ASSERT(instr->right()->representation().IsTagged());
-
- LOperand* left = UseFixed(instr->left(), r1);
- LOperand* right = UseFixed(instr->right(), r0);
- LArithmeticT* result = new(zone()) LArithmeticT(instr->op(), left, right);
- return MarkAsCall(DefineFixed(result, r0), instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoBitNot(HBitNot* instr) {
- ASSERT(instr->value()->representation().IsInteger32());
- ASSERT(instr->representation().IsInteger32());
- if (instr->HasNoUses()) return NULL;
- LOperand* value = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LBitNotI(value));
-}
-
-
-LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
- if (instr->representation().IsDouble()) {
- return DoArithmeticD(Token::DIV, instr);
- } else if (instr->representation().IsInteger32()) {
- // TODO(1042) The fixed register allocation
- // is needed because we call TypeRecordingBinaryOpStub from
- // the generated code, which requires registers r0
- // and r1 to be used. We should remove that
- // when we provide a native implementation.
- LOperand* dividend = UseFixed(instr->left(), r0);
- LOperand* divisor = UseFixed(instr->right(), r1);
- return AssignEnvironment(AssignPointerMap(
- DefineFixed(new(zone()) LDivI(dividend, divisor), r0)));
- } else {
- return DoArithmeticT(Token::DIV, instr);
- }
-}
-
-
-bool LChunkBuilder::HasMagicNumberForDivisor(int32_t divisor) {
- uint32_t divisor_abs = abs(divisor);
- // Dividing by 0, 1, and powers of 2 is easy.
- // Note that IsPowerOf2(0) returns true;
- ASSERT(IsPowerOf2(0) == true);
- if (IsPowerOf2(divisor_abs)) return true;
-
- // We have magic numbers for a few specific divisors.
- // Details and proofs can be found in:
- // - Hacker's Delight, Henry S. Warren, Jr.
- // - The PowerPC Compiler Writer’s Guide
- // and probably many others.
- //
- // We handle
- // <divisor with magic numbers> * <power of 2>
- // but not
- // <divisor with magic numbers> * <other divisor with magic numbers>
- int32_t power_of_2_factor =
- CompilerIntrinsics::CountTrailingZeros(divisor_abs);
- DivMagicNumbers magic_numbers =
- DivMagicNumberFor(divisor_abs >> power_of_2_factor);
- if (magic_numbers.M != InvalidDivMagicNumber.M) return true;
-
- return false;
-}
-
-
-HValue* LChunkBuilder::SimplifiedDividendForMathFloorOfDiv(HValue* dividend) {
- // A value with an integer representation does not need to be transformed.
- if (dividend->representation().IsInteger32()) {
- return dividend;
- // A change from an integer32 can be replaced by the integer32 value.
- } else if (dividend->IsChange() &&
- HChange::cast(dividend)->from().IsInteger32()) {
- return HChange::cast(dividend)->value();
- }
- return NULL;
-}
-
-
-HValue* LChunkBuilder::SimplifiedDivisorForMathFloorOfDiv(HValue* divisor) {
- if (CpuFeatures::IsSupported(SUDIV)) {
- // A value with an integer representation does not need to be transformed.
- if (divisor->representation().IsInteger32()) {
- return divisor;
- // A change from an integer32 can be replaced by the integer32 value.
- } else if (divisor->IsChange() &&
- HChange::cast(divisor)->from().IsInteger32()) {
- return HChange::cast(divisor)->value();
- }
- }
-
- if (divisor->IsConstant() && HConstant::cast(divisor)->HasInteger32Value()) {
- HConstant* constant_val = HConstant::cast(divisor);
- int32_t int32_val = constant_val->Integer32Value();
- if (LChunkBuilder::HasMagicNumberForDivisor(int32_val) ||
- CpuFeatures::IsSupported(SUDIV)) {
- return constant_val->CopyToRepresentation(Representation::Integer32(),
- divisor->block()->zone());
- }
- }
-
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) {
- HValue* right = instr->right();
- LOperand* dividend = UseRegister(instr->left());
- LOperand* divisor = CpuFeatures::IsSupported(SUDIV)
- ? UseRegister(right)
- : UseOrConstant(right);
- LOperand* remainder = TempRegister();
- ASSERT(CpuFeatures::IsSupported(SUDIV) ||
- (right->IsConstant() &&
- HConstant::cast(right)->HasInteger32Value() &&
- HasMagicNumberForDivisor(HConstant::cast(right)->Integer32Value())));
- return AssignEnvironment(DefineAsRegister(
- new(zone()) LMathFloorOfDiv(dividend, divisor, remainder)));
-}
-
-
-LInstruction* LChunkBuilder::DoMod(HMod* instr) {
- if (instr->representation().IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
-
- LModI* mod;
- if (instr->HasPowerOf2Divisor()) {
- ASSERT(!instr->CheckFlag(HValue::kCanBeDivByZero));
- LOperand* value = UseRegisterAtStart(instr->left());
- mod = new(zone()) LModI(value, UseOrConstant(instr->right()));
- } else {
- LOperand* dividend = UseRegister(instr->left());
- LOperand* divisor = UseRegister(instr->right());
- mod = new(zone()) LModI(dividend,
- divisor,
- TempRegister(),
- FixedTemp(d10),
- FixedTemp(d11));
- }
-
- if (instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
- instr->CheckFlag(HValue::kCanBeDivByZero)) {
- return AssignEnvironment(DefineAsRegister(mod));
- } else {
- return DefineAsRegister(mod);
- }
- } else if (instr->representation().IsTagged()) {
- return DoArithmeticT(Token::MOD, instr);
- } else {
- ASSERT(instr->representation().IsDouble());
- // We call a C function for double modulo. It can't trigger a GC.
- // We need to use fixed result register for the call.
- // TODO(fschneider): Allow any register as input registers.
- LOperand* left = UseFixedDouble(instr->left(), d1);
- LOperand* right = UseFixedDouble(instr->right(), d2);
- LArithmeticD* result = new(zone()) LArithmeticD(Token::MOD, left, right);
- return MarkAsCall(DefineFixedDouble(result, d1), instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoMul(HMul* instr) {
- if (instr->representation().IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
- LOperand* left;
- LOperand* right = UseOrConstant(instr->MostConstantOperand());
- LOperand* temp = NULL;
- if (instr->CheckFlag(HValue::kBailoutOnMinusZero) &&
- (instr->CheckFlag(HValue::kCanOverflow) ||
- !right->IsConstantOperand())) {
- left = UseRegister(instr->LeastConstantOperand());
- temp = TempRegister();
- } else {
- left = UseRegisterAtStart(instr->LeastConstantOperand());
- }
- LMulI* mul = new(zone()) LMulI(left, right, temp);
- if (instr->CheckFlag(HValue::kCanOverflow) ||
- instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
- AssignEnvironment(mul);
- }
- return DefineAsRegister(mul);
-
- } else if (instr->representation().IsDouble()) {
- if (instr->UseCount() == 1 && (instr->uses().value()->IsAdd() ||
- instr->uses().value()->IsSub())) {
- HBinaryOperation* use = HBinaryOperation::cast(instr->uses().value());
-
- if (use->IsAdd() && instr == use->left()) {
- // This mul is the lhs of an add. The add and mul will be folded into a
- // multiply-add in DoAdd.
- return NULL;
- }
- if (instr == use->right() && use->IsAdd() && !use->left()->IsMul()) {
- // This mul is the rhs of an add, where the lhs is not another mul.
- // The add and mul will be folded into a multiply-add in DoAdd.
- return NULL;
- }
- if (instr == use->right() && use->IsSub()) {
- // This mul is the rhs of a sub. The sub and mul will be folded into a
- // multiply-sub in DoSub.
- return NULL;
- }
- }
-
- return DoArithmeticD(Token::MUL, instr);
- } else {
- return DoArithmeticT(Token::MUL, instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoSub(HSub* instr) {
- if (instr->representation().IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
-
- if (instr->left()->IsConstant()) {
- // If lhs is constant, do reverse subtraction instead.
- return DoRSub(instr);
- }
-
- LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right = UseOrConstantAtStart(instr->right());
- LSubI* sub = new(zone()) LSubI(left, right);
- LInstruction* result = DefineAsRegister(sub);
- if (instr->CheckFlag(HValue::kCanOverflow)) {
- result = AssignEnvironment(result);
- }
- return result;
- } else if (instr->representation().IsDouble()) {
- if (instr->right()->IsMul()) {
- return DoMultiplySub(instr->left(), HMul::cast(instr->right()));
- }
-
- return DoArithmeticD(Token::SUB, instr);
- } else {
- return DoArithmeticT(Token::SUB, instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoRSub(HSub* instr) {
- ASSERT(instr->representation().IsInteger32());
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
-
- // Note: The lhs of the subtraction becomes the rhs of the
- // reverse-subtraction.
- LOperand* left = UseRegisterAtStart(instr->right());
- LOperand* right = UseOrConstantAtStart(instr->left());
- LRSubI* rsb = new(zone()) LRSubI(left, right);
- LInstruction* result = DefineAsRegister(rsb);
- if (instr->CheckFlag(HValue::kCanOverflow)) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoMultiplyAdd(HMul* mul, HValue* addend) {
- LOperand* multiplier_op = UseRegisterAtStart(mul->left());
- LOperand* multiplicand_op = UseRegisterAtStart(mul->right());
- LOperand* addend_op = UseRegisterAtStart(addend);
- return DefineSameAsFirst(new(zone()) LMultiplyAddD(addend_op, multiplier_op,
- multiplicand_op));
-}
-
-
-LInstruction* LChunkBuilder::DoMultiplySub(HValue* minuend, HMul* mul) {
- LOperand* minuend_op = UseRegisterAtStart(minuend);
- LOperand* multiplier_op = UseRegisterAtStart(mul->left());
- LOperand* multiplicand_op = UseRegisterAtStart(mul->right());
-
- return DefineSameAsFirst(new(zone()) LMultiplySubD(minuend_op,
- multiplier_op,
- multiplicand_op));
-}
-
-
-LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
- if (instr->representation().IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
- LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
- LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
- LAddI* add = new(zone()) LAddI(left, right);
- LInstruction* result = DefineAsRegister(add);
- if (instr->CheckFlag(HValue::kCanOverflow)) {
- result = AssignEnvironment(result);
- }
- return result;
- } else if (instr->representation().IsDouble()) {
- if (instr->left()->IsMul()) {
- return DoMultiplyAdd(HMul::cast(instr->left()), instr->right());
- }
-
- if (instr->right()->IsMul()) {
- ASSERT(!instr->left()->IsMul());
- return DoMultiplyAdd(HMul::cast(instr->right()), instr->left());
- }
-
- return DoArithmeticD(Token::ADD, instr);
- } else {
- ASSERT(instr->representation().IsTagged());
- return DoArithmeticT(Token::ADD, instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoMathMinMax(HMathMinMax* instr) {
- LOperand* left = NULL;
- LOperand* right = NULL;
- if (instr->representation().IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
- left = UseRegisterAtStart(instr->LeastConstantOperand());
- right = UseOrConstantAtStart(instr->MostConstantOperand());
- } else {
- ASSERT(instr->representation().IsDouble());
- ASSERT(instr->left()->representation().IsDouble());
- ASSERT(instr->right()->representation().IsDouble());
- left = UseRegisterAtStart(instr->left());
- right = UseRegisterAtStart(instr->right());
- }
- return DefineAsRegister(new(zone()) LMathMinMax(left, right));
-}
-
-
-LInstruction* LChunkBuilder::DoPower(HPower* instr) {
- ASSERT(instr->representation().IsDouble());
- // We call a C function for double power. It can't trigger a GC.
- // We need to use fixed result register for the call.
- Representation exponent_type = instr->right()->representation();
- ASSERT(instr->left()->representation().IsDouble());
- LOperand* left = UseFixedDouble(instr->left(), d1);
- LOperand* right = exponent_type.IsDouble() ?
- UseFixedDouble(instr->right(), d2) :
- UseFixed(instr->right(), r2);
- LPower* result = new(zone()) LPower(left, right);
- return MarkAsCall(DefineFixedDouble(result, d3),
- instr,
- CAN_DEOPTIMIZE_EAGERLY);
-}
-
-
-LInstruction* LChunkBuilder::DoRandom(HRandom* instr) {
- ASSERT(instr->representation().IsDouble());
- ASSERT(instr->global_object()->representation().IsTagged());
- LOperand* global_object = UseFixed(instr->global_object(), r0);
- LRandom* result = new(zone()) LRandom(global_object);
- return MarkAsCall(DefineFixedDouble(result, d7), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
- ASSERT(instr->left()->representation().IsTagged());
- ASSERT(instr->right()->representation().IsTagged());
- LOperand* left = UseFixed(instr->left(), r1);
- LOperand* right = UseFixed(instr->right(), r0);
- LCmpT* result = new(zone()) LCmpT(left, right);
- return MarkAsCall(DefineFixed(result, r0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCompareIDAndBranch(
- HCompareIDAndBranch* instr) {
- Representation r = instr->representation();
- if (r.IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
- LOperand* left = UseRegisterOrConstantAtStart(instr->left());
- LOperand* right = UseRegisterOrConstantAtStart(instr->right());
- return new(zone()) LCmpIDAndBranch(left, right);
- } else {
- ASSERT(r.IsDouble());
- ASSERT(instr->left()->representation().IsDouble());
- ASSERT(instr->right()->representation().IsDouble());
- LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right = UseRegisterAtStart(instr->right());
- return new(zone()) LCmpIDAndBranch(left, right);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch(
- HCompareObjectEqAndBranch* instr) {
- LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right = UseRegisterAtStart(instr->right());
- return new(zone()) LCmpObjectEqAndBranch(left, right);
-}
-
-
-LInstruction* LChunkBuilder::DoCompareConstantEqAndBranch(
- HCompareConstantEqAndBranch* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
- return new(zone()) LCmpConstantEqAndBranch(value);
-}
-
-
-LInstruction* LChunkBuilder::DoIsNilAndBranch(HIsNilAndBranch* instr) {
- ASSERT(instr->value()->representation().IsTagged());
- return new(zone()) LIsNilAndBranch(UseRegisterAtStart(instr->value()));
-}
-
-
-LInstruction* LChunkBuilder::DoIsObjectAndBranch(HIsObjectAndBranch* instr) {
- ASSERT(instr->value()->representation().IsTagged());
- LOperand* value = UseRegisterAtStart(instr->value());
- LOperand* temp = TempRegister();
- return new(zone()) LIsObjectAndBranch(value, temp);
-}
-
-
-LInstruction* LChunkBuilder::DoIsStringAndBranch(HIsStringAndBranch* instr) {
- ASSERT(instr->value()->representation().IsTagged());
- LOperand* value = UseRegisterAtStart(instr->value());
- LOperand* temp = TempRegister();
- return new(zone()) LIsStringAndBranch(value, temp);
-}
-
-
-LInstruction* LChunkBuilder::DoIsSmiAndBranch(HIsSmiAndBranch* instr) {
- ASSERT(instr->value()->representation().IsTagged());
- return new(zone()) LIsSmiAndBranch(Use(instr->value()));
-}
-
-
-LInstruction* LChunkBuilder::DoIsUndetectableAndBranch(
- HIsUndetectableAndBranch* instr) {
- ASSERT(instr->value()->representation().IsTagged());
- LOperand* value = UseRegisterAtStart(instr->value());
- return new(zone()) LIsUndetectableAndBranch(value, TempRegister());
-}
-
-
-LInstruction* LChunkBuilder::DoStringCompareAndBranch(
- HStringCompareAndBranch* instr) {
- ASSERT(instr->left()->representation().IsTagged());
- ASSERT(instr->right()->representation().IsTagged());
- LOperand* left = UseFixed(instr->left(), r1);
- LOperand* right = UseFixed(instr->right(), r0);
- LStringCompareAndBranch* result =
- new(zone()) LStringCompareAndBranch(left, right);
- return MarkAsCall(result, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoHasInstanceTypeAndBranch(
- HHasInstanceTypeAndBranch* instr) {
- ASSERT(instr->value()->representation().IsTagged());
- LOperand* value = UseRegisterAtStart(instr->value());
- return new(zone()) LHasInstanceTypeAndBranch(value);
-}
-
-
-LInstruction* LChunkBuilder::DoGetCachedArrayIndex(
- HGetCachedArrayIndex* instr) {
- ASSERT(instr->value()->representation().IsTagged());
- LOperand* value = UseRegisterAtStart(instr->value());
-
- return DefineAsRegister(new(zone()) LGetCachedArrayIndex(value));
-}
-
-
-LInstruction* LChunkBuilder::DoHasCachedArrayIndexAndBranch(
- HHasCachedArrayIndexAndBranch* instr) {
- ASSERT(instr->value()->representation().IsTagged());
- return new(zone()) LHasCachedArrayIndexAndBranch(
- UseRegisterAtStart(instr->value()));
-}
-
-
-LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
- HClassOfTestAndBranch* instr) {
- ASSERT(instr->value()->representation().IsTagged());
- LOperand* value = UseRegister(instr->value());
- return new(zone()) LClassOfTestAndBranch(value, TempRegister());
-}
-
-
-LInstruction* LChunkBuilder::DoJSArrayLength(HJSArrayLength* instr) {
- LOperand* array = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LJSArrayLength(array));
-}
-
-
-LInstruction* LChunkBuilder::DoFixedArrayBaseLength(
- HFixedArrayBaseLength* instr) {
- LOperand* array = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LFixedArrayBaseLength(array));
-}
-
-
-LInstruction* LChunkBuilder::DoMapEnumLength(HMapEnumLength* instr) {
- LOperand* map = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LMapEnumLength(map));
-}
-
-
-LInstruction* LChunkBuilder::DoElementsKind(HElementsKind* instr) {
- LOperand* object = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LElementsKind(object));
-}
-
-
-LInstruction* LChunkBuilder::DoValueOf(HValueOf* instr) {
- LOperand* object = UseRegister(instr->value());
- LValueOf* result = new(zone()) LValueOf(object, TempRegister());
- return DefineAsRegister(result);
-}
-
-
-LInstruction* LChunkBuilder::DoDateField(HDateField* instr) {
- LOperand* object = UseFixed(instr->value(), r0);
- LDateField* result =
- new(zone()) LDateField(object, FixedTemp(r1), instr->index());
- return MarkAsCall(DefineFixed(result, r0), instr, CAN_DEOPTIMIZE_EAGERLY);
-}
-
-
-LInstruction* LChunkBuilder::DoSeqStringSetChar(HSeqStringSetChar* instr) {
- LOperand* string = UseRegister(instr->string());
- LOperand* index = UseRegister(instr->index());
- LOperand* value = UseRegister(instr->value());
- LSeqStringSetChar* result =
- new(zone()) LSeqStringSetChar(instr->encoding(), string, index, value);
- return DefineAsRegister(result);
-}
-
-
-LInstruction* LChunkBuilder::DoNumericConstraint(HNumericConstraint* instr) {
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoInductionVariableAnnotation(
- HInductionVariableAnnotation* instr) {
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
- LOperand* value = UseRegisterOrConstantAtStart(instr->index());
- LOperand* length = UseRegister(instr->length());
- return AssignEnvironment(new(zone()) LBoundsCheck(value, length));
-}
-
-
-LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) {
- // The control instruction marking the end of a block that completed
- // abruptly (e.g., threw an exception). There is nothing specific to do.
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoThrow(HThrow* instr) {
- LOperand* value = UseFixed(instr->value(), r0);
- return MarkAsCall(new(zone()) LThrow(value), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoUseConst(HUseConst* instr) {
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoForceRepresentation(HForceRepresentation* bad) {
- // All HForceRepresentation instructions should be eliminated in the
- // representation change phase of Hydrogen.
- UNREACHABLE();
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoChange(HChange* instr) {
- Representation from = instr->from();
- Representation to = instr->to();
- if (from.IsTagged()) {
- if (to.IsDouble()) {
- info()->MarkAsDeferredCalling();
- LOperand* value = UseRegister(instr->value());
- LNumberUntagD* res = new(zone()) LNumberUntagD(value);
- return AssignEnvironment(DefineAsRegister(res));
- } else {
- ASSERT(to.IsInteger32());
- LOperand* value = UseRegisterAtStart(instr->value());
- LInstruction* res = NULL;
- if (instr->value()->type().IsSmi()) {
- res = DefineAsRegister(new(zone()) LSmiUntag(value, false));
- } else {
- LOperand* temp1 = TempRegister();
- LOperand* temp2 = instr->CanTruncateToInt32() ? TempRegister()
- : NULL;
- LOperand* temp3 = FixedTemp(d11);
- res = DefineSameAsFirst(new(zone()) LTaggedToI(value,
- temp1,
- temp2,
- temp3));
- res = AssignEnvironment(res);
- }
- return res;
- }
- } else if (from.IsDouble()) {
- if (to.IsTagged()) {
- info()->MarkAsDeferredCalling();
- LOperand* value = UseRegister(instr->value());
- LOperand* temp1 = TempRegister();
- LOperand* temp2 = TempRegister();
-
- // Make sure that the temp and result_temp registers are
- // different.
- LUnallocated* result_temp = TempRegister();
- LNumberTagD* result = new(zone()) LNumberTagD(value, temp1, temp2);
- Define(result, result_temp);
- return AssignPointerMap(result);
- } else {
- ASSERT(to.IsInteger32());
- LOperand* value = UseRegister(instr->value());
- LOperand* temp1 = TempRegister();
- LOperand* temp2 = instr->CanTruncateToInt32() ? TempRegister() : NULL;
- LDoubleToI* res = new(zone()) LDoubleToI(value, temp1, temp2);
- return AssignEnvironment(DefineAsRegister(res));
- }
- } else if (from.IsInteger32()) {
- info()->MarkAsDeferredCalling();
- if (to.IsTagged()) {
- HValue* val = instr->value();
- LOperand* value = UseRegisterAtStart(val);
- if (val->CheckFlag(HInstruction::kUint32)) {
- LNumberTagU* result = new(zone()) LNumberTagU(value);
- return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
- } else if (val->HasRange() && val->range()->IsInSmiRange()) {
- return DefineAsRegister(new(zone()) LSmiTag(value));
- } else {
- LNumberTagI* result = new(zone()) LNumberTagI(value);
- return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
- }
- } else {
- ASSERT(to.IsDouble());
- if (instr->value()->CheckFlag(HInstruction::kUint32)) {
- return DefineAsRegister(
- new(zone()) LUint32ToDouble(UseRegister(instr->value())));
- } else {
- return DefineAsRegister(
- new(zone()) LInteger32ToDouble(Use(instr->value())));
- }
- }
- }
- UNREACHABLE();
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoCheckNonSmi(HCheckNonSmi* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
- return AssignEnvironment(new(zone()) LCheckNonSmi(value));
-}
-
-
-LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
- LInstruction* result = new(zone()) LCheckInstanceType(value);
- return AssignEnvironment(result);
-}
-
-
-LInstruction* LChunkBuilder::DoCheckPrototypeMaps(HCheckPrototypeMaps* instr) {
- LUnallocated* temp1 = TempRegister();
- LOperand* temp2 = TempRegister();
- LCheckPrototypeMaps* result = new(zone()) LCheckPrototypeMaps(temp1, temp2);
- return AssignEnvironment(Define(result, temp1));
-}
-
-
-LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
- return AssignEnvironment(new(zone()) LCheckSmi(value));
-}
-
-
-LInstruction* LChunkBuilder::DoCheckSmiOrInt32(HCheckSmiOrInt32* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
- return AssignEnvironment(new(zone()) LCheckSmi(value));
-}
-
-
-LInstruction* LChunkBuilder::DoCheckFunction(HCheckFunction* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
- return AssignEnvironment(new(zone()) LCheckFunction(value));
-}
-
-
-LInstruction* LChunkBuilder::DoCheckMaps(HCheckMaps* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
- LInstruction* result = new(zone()) LCheckMaps(value);
- return AssignEnvironment(result);
-}
-
-
-LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
- HValue* value = instr->value();
- Representation input_rep = value->representation();
- LOperand* reg = UseRegister(value);
- if (input_rep.IsDouble()) {
- return DefineAsRegister(new(zone()) LClampDToUint8(reg, FixedTemp(d11)));
- } else if (input_rep.IsInteger32()) {
- return DefineAsRegister(new(zone()) LClampIToUint8(reg));
- } else {
- ASSERT(input_rep.IsTagged());
- // Register allocator doesn't (yet) support allocation of double
- // temps. Reserve d1 explicitly.
- LClampTToUint8* result = new(zone()) LClampTToUint8(reg, FixedTemp(d11));
- return AssignEnvironment(DefineAsRegister(result));
- }
-}
-
-
-LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
- return new(zone()) LReturn(UseFixed(instr->value(), r0));
-}
-
-
-LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
- Representation r = instr->representation();
- if (r.IsInteger32()) {
- return DefineAsRegister(new(zone()) LConstantI);
- } else if (r.IsDouble()) {
- return DefineAsRegister(new(zone()) LConstantD);
- } else if (r.IsTagged()) {
- return DefineAsRegister(new(zone()) LConstantT);
- } else {
- UNREACHABLE();
- return NULL;
- }
-}
-
-
-LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) {
- LLoadGlobalCell* result = new(zone()) LLoadGlobalCell;
- return instr->RequiresHoleCheck()
- ? AssignEnvironment(DefineAsRegister(result))
- : DefineAsRegister(result);
-}
-
-
-LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
- LOperand* global_object = UseFixed(instr->global_object(), r0);
- LLoadGlobalGeneric* result = new(zone()) LLoadGlobalGeneric(global_object);
- return MarkAsCall(DefineFixed(result, r0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoStoreGlobalCell(HStoreGlobalCell* instr) {
- LOperand* value = UseRegister(instr->value());
- // Use a temp to check the value in the cell in the case where we perform
- // a hole check.
- return instr->RequiresHoleCheck()
- ? AssignEnvironment(new(zone()) LStoreGlobalCell(value, TempRegister()))
- : new(zone()) LStoreGlobalCell(value, NULL);
-}
-
-
-LInstruction* LChunkBuilder::DoStoreGlobalGeneric(HStoreGlobalGeneric* instr) {
- LOperand* global_object = UseFixed(instr->global_object(), r1);
- LOperand* value = UseFixed(instr->value(), r0);
- LStoreGlobalGeneric* result =
- new(zone()) LStoreGlobalGeneric(global_object, value);
- return MarkAsCall(result, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
- LOperand* context = UseRegisterAtStart(instr->value());
- LInstruction* result =
- DefineAsRegister(new(zone()) LLoadContextSlot(context));
- return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
-}
-
-
-LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) {
- LOperand* context;
- LOperand* value;
- if (instr->NeedsWriteBarrier()) {
- context = UseTempRegister(instr->context());
- value = UseTempRegister(instr->value());
- } else {
- context = UseRegister(instr->context());
- value = UseRegister(instr->value());
- }
- LInstruction* result = new(zone()) LStoreContextSlot(context, value);
- return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
-}
-
-
-LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
- return DefineAsRegister(
- new(zone()) LLoadNamedField(UseRegisterAtStart(instr->object())));
-}
-
-
-LInstruction* LChunkBuilder::DoLoadNamedFieldPolymorphic(
- HLoadNamedFieldPolymorphic* instr) {
- ASSERT(instr->representation().IsTagged());
- if (instr->need_generic()) {
- LOperand* obj = UseFixed(instr->object(), r0);
- LLoadNamedFieldPolymorphic* result =
- new(zone()) LLoadNamedFieldPolymorphic(obj);
- return MarkAsCall(DefineFixed(result, r0), instr);
- } else {
- LOperand* obj = UseRegisterAtStart(instr->object());
- LLoadNamedFieldPolymorphic* result =
- new(zone()) LLoadNamedFieldPolymorphic(obj);
- return AssignEnvironment(DefineAsRegister(result));
- }
-}
-
-
-LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
- LOperand* object = UseFixed(instr->object(), r0);
- LInstruction* result = DefineFixed(new(zone()) LLoadNamedGeneric(object), r0);
- return MarkAsCall(result, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoLoadFunctionPrototype(
- HLoadFunctionPrototype* instr) {
- return AssignEnvironment(DefineAsRegister(
- new(zone()) LLoadFunctionPrototype(UseRegister(instr->function()))));
-}
-
-
-LInstruction* LChunkBuilder::DoLoadElements(HLoadElements* instr) {
- LOperand* input = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LLoadElements(input));
-}
-
-
-LInstruction* LChunkBuilder::DoLoadExternalArrayPointer(
- HLoadExternalArrayPointer* instr) {
- LOperand* input = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LLoadExternalArrayPointer(input));
-}
-
-
-LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
- ASSERT(instr->key()->representation().IsInteger32() ||
- instr->key()->representation().IsTagged());
- ElementsKind elements_kind = instr->elements_kind();
- LOperand* key = UseRegisterOrConstantAtStart(instr->key());
- LLoadKeyed* result = NULL;
-
- if (!instr->is_external()) {
- LOperand* obj = NULL;
- if (instr->representation().IsDouble()) {
- obj = UseTempRegister(instr->elements());
- } else {
- ASSERT(instr->representation().IsTagged());
- obj = UseRegisterAtStart(instr->elements());
- }
- result = new(zone()) LLoadKeyed(obj, key);
- } else {
- ASSERT(
- (instr->representation().IsInteger32() &&
- (elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
- (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
- (instr->representation().IsDouble() &&
- ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
- (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
- // float->double conversion on non-VFP2 requires an extra scratch
- // register. For convenience, just mark the elements register as "UseTemp"
- // so that it can be used as a temp during the float->double conversion
- // after it's no longer needed after the float load.
- bool needs_temp =
- !CpuFeatures::IsSupported(VFP2) &&
- (elements_kind == EXTERNAL_FLOAT_ELEMENTS);
- LOperand* external_pointer = needs_temp
- ? UseTempRegister(instr->elements())
- : UseRegister(instr->elements());
- result = new(zone()) LLoadKeyed(external_pointer, key);
- }
-
- DefineAsRegister(result);
- // An unsigned int array load might overflow and cause a deopt, make sure it
- // has an environment.
- bool can_deoptimize = instr->RequiresHoleCheck() ||
- (elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS);
- return can_deoptimize ? AssignEnvironment(result) : result;
-}
-
-
-LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
- LOperand* object = UseFixed(instr->object(), r1);
- LOperand* key = UseFixed(instr->key(), r0);
-
- LInstruction* result =
- DefineFixed(new(zone()) LLoadKeyedGeneric(object, key), r0);
- return MarkAsCall(result, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
- ElementsKind elements_kind = instr->elements_kind();
-
- if (!instr->is_external()) {
- ASSERT(instr->elements()->representation().IsTagged());
- bool needs_write_barrier = instr->NeedsWriteBarrier();
- LOperand* object = NULL;
- LOperand* key = NULL;
- LOperand* val = NULL;
-
- if (instr->value()->representation().IsDouble()) {
- object = UseRegisterAtStart(instr->elements());
- val = UseTempRegister(instr->value());
- key = UseRegisterOrConstantAtStart(instr->key());
- } else {
- ASSERT(instr->value()->representation().IsTagged());
- object = UseTempRegister(instr->elements());
- val = needs_write_barrier ? UseTempRegister(instr->value())
- : UseRegisterAtStart(instr->value());
- key = needs_write_barrier ? UseTempRegister(instr->key())
- : UseRegisterOrConstantAtStart(instr->key());
- }
-
- return new(zone()) LStoreKeyed(object, key, val);
- }
-
- ASSERT(
- (instr->value()->representation().IsInteger32() &&
- (elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
- (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
- (instr->value()->representation().IsDouble() &&
- ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
- (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
- ASSERT(instr->elements()->representation().IsExternal());
- bool val_is_temp_register =
- elements_kind == EXTERNAL_PIXEL_ELEMENTS ||
- elements_kind == EXTERNAL_FLOAT_ELEMENTS;
- LOperand* val = val_is_temp_register ? UseTempRegister(instr->value())
- : UseRegister(instr->value());
- LOperand* key = UseRegisterOrConstantAtStart(instr->key());
- LOperand* external_pointer = UseRegister(instr->elements());
- return new(zone()) LStoreKeyed(external_pointer, key, val);
-}
-
-
-LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
- LOperand* obj = UseFixed(instr->object(), r2);
- LOperand* key = UseFixed(instr->key(), r1);
- LOperand* val = UseFixed(instr->value(), r0);
-
- ASSERT(instr->object()->representation().IsTagged());
- ASSERT(instr->key()->representation().IsTagged());
- ASSERT(instr->value()->representation().IsTagged());
-
- return MarkAsCall(new(zone()) LStoreKeyedGeneric(obj, key, val), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoTransitionElementsKind(
- HTransitionElementsKind* instr) {
- LOperand* object = UseRegister(instr->object());
- if (IsSimpleMapChangeTransition(instr->from_kind(), instr->to_kind())) {
- LOperand* new_map_reg = TempRegister();
- LTransitionElementsKind* result =
- new(zone()) LTransitionElementsKind(object, new_map_reg, NULL);
- return DefineSameAsFirst(result);
- } else if (FLAG_compiled_transitions) {
- LTransitionElementsKind* result =
- new(zone()) LTransitionElementsKind(object, NULL, NULL);
- return AssignPointerMap(result);
- } else {
- LOperand* object = UseFixed(instr->object(), r0);
- LOperand* fixed_object_reg = FixedTemp(r2);
- LOperand* new_map_reg = FixedTemp(r3);
- LTransitionElementsKind* result =
- new(zone()) LTransitionElementsKind(object,
- new_map_reg,
- fixed_object_reg);
- return MarkAsCall(result, instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoTrapAllocationMemento(
- HTrapAllocationMemento* instr) {
- LOperand* object = UseRegister(instr->object());
- LOperand* temp = TempRegister();
- LTrapAllocationMemento* result =
- new(zone()) LTrapAllocationMemento(object, temp);
- return AssignEnvironment(result);
-}
-
-
-LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
- bool needs_write_barrier = instr->NeedsWriteBarrier();
- bool needs_write_barrier_for_map = !instr->transition().is_null() &&
- instr->NeedsWriteBarrierForMap();
-
- LOperand* obj;
- if (needs_write_barrier) {
- obj = instr->is_in_object()
- ? UseRegister(instr->object())
- : UseTempRegister(instr->object());
- } else {
- obj = needs_write_barrier_for_map
- ? UseRegister(instr->object())
- : UseRegisterAtStart(instr->object());
- }
-
- LOperand* val = needs_write_barrier
- ? UseTempRegister(instr->value())
- : UseRegister(instr->value());
-
- // We need a temporary register for write barrier of the map field.
- LOperand* temp = needs_write_barrier_for_map ? TempRegister() : NULL;
-
- return new(zone()) LStoreNamedField(obj, val, temp);
-}
-
-
-LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
- LOperand* obj = UseFixed(instr->object(), r1);
- LOperand* val = UseFixed(instr->value(), r0);
-
- LInstruction* result = new(zone()) LStoreNamedGeneric(obj, val);
- return MarkAsCall(result, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
- LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right = UseRegisterAtStart(instr->right());
- return MarkAsCall(DefineFixed(new(zone()) LStringAdd(left, right), r0),
- instr);
-}
-
-
-LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) {
- LOperand* string = UseTempRegister(instr->string());
- LOperand* index = UseTempRegister(instr->index());
- LStringCharCodeAt* result = new(zone()) LStringCharCodeAt(string, index);
- return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
-}
-
-
-LInstruction* LChunkBuilder::DoStringCharFromCode(HStringCharFromCode* instr) {
- LOperand* char_code = UseRegister(instr->value());
- LStringCharFromCode* result = new(zone()) LStringCharFromCode(char_code);
- return AssignPointerMap(DefineAsRegister(result));
-}
-
-
-LInstruction* LChunkBuilder::DoStringLength(HStringLength* instr) {
- LOperand* string = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LStringLength(string));
-}
-
-
-LInstruction* LChunkBuilder::DoAllocateObject(HAllocateObject* instr) {
- info()->MarkAsDeferredCalling();
- LAllocateObject* result =
- new(zone()) LAllocateObject(TempRegister(), TempRegister());
- return AssignPointerMap(DefineAsRegister(result));
-}
-
-
-LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
- info()->MarkAsDeferredCalling();
- LOperand* size = UseTempRegister(instr->size());
- LOperand* temp1 = TempRegister();
- LOperand* temp2 = TempRegister();
- LAllocate* result = new(zone()) LAllocate(size, temp1, temp2);
- return AssignPointerMap(DefineAsRegister(result));
-}
-
-
-LInstruction* LChunkBuilder::DoFastLiteral(HFastLiteral* instr) {
- return MarkAsCall(DefineFixed(new(zone()) LFastLiteral, r0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoArrayLiteral(HArrayLiteral* instr) {
- return MarkAsCall(DefineFixed(new(zone()) LArrayLiteral, r0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoObjectLiteral(HObjectLiteral* instr) {
- return MarkAsCall(DefineFixed(new(zone()) LObjectLiteral, r0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoRegExpLiteral(HRegExpLiteral* instr) {
- return MarkAsCall(DefineFixed(new(zone()) LRegExpLiteral, r0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoFunctionLiteral(HFunctionLiteral* instr) {
- return MarkAsCall(DefineFixed(new(zone()) LFunctionLiteral, r0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoDeleteProperty(HDeleteProperty* instr) {
- LOperand* object = UseFixed(instr->object(), r0);
- LOperand* key = UseFixed(instr->key(), r1);
- LDeleteProperty* result = new(zone()) LDeleteProperty(object, key);
- return MarkAsCall(DefineFixed(result, r0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
- ASSERT(argument_count_ == 0);
- allocator_->MarkAsOsrEntry();
- current_block_->last_environment()->set_ast_id(instr->ast_id());
- return AssignEnvironment(new(zone()) LOsrEntry);
-}
-
-
-LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
- LParameter* result = new(zone()) LParameter;
- if (instr->kind() == HParameter::STACK_PARAMETER) {
- int spill_index = chunk()->GetParameterStackSlot(instr->index());
- return DefineAsSpilled(result, spill_index);
- } else {
- ASSERT(info()->IsStub());
- CodeStubInterfaceDescriptor* descriptor =
- info()->code_stub()->GetInterfaceDescriptor(info()->isolate());
- Register reg = descriptor->register_params_[instr->index()];
- return DefineFixed(result, reg);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
- int spill_index = chunk()->GetNextSpillIndex(false); // Not double-width.
- if (spill_index > LUnallocated::kMaxFixedIndex) {
- Abort("Too many spill slots needed for OSR");
- spill_index = 0;
- }
- return DefineAsSpilled(new(zone()) LUnknownOSRValue, spill_index);
-}
-
-
-LInstruction* LChunkBuilder::DoCallStub(HCallStub* instr) {
- argument_count_ -= instr->argument_count();
- return MarkAsCall(DefineFixed(new(zone()) LCallStub, r0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) {
- // There are no real uses of the arguments object.
- // arguments.length and element access are supported directly on
- // stack arguments, and any real arguments object use causes a bailout.
- // So this value is never used.
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
- LOperand* args = UseRegister(instr->arguments());
- LOperand* length = UseTempRegister(instr->length());
- LOperand* index = UseRegister(instr->index());
- return DefineAsRegister(new(zone()) LAccessArgumentsAt(args, length, index));
-}
-
-
-LInstruction* LChunkBuilder::DoToFastProperties(HToFastProperties* instr) {
- LOperand* object = UseFixed(instr->value(), r0);
- LToFastProperties* result = new(zone()) LToFastProperties(object);
- return MarkAsCall(DefineFixed(result, r0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
- LTypeof* result = new(zone()) LTypeof(UseFixed(instr->value(), r0));
- return MarkAsCall(DefineFixed(result, r0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) {
- return new(zone()) LTypeofIsAndBranch(UseTempRegister(instr->value()));
-}
-
-
-LInstruction* LChunkBuilder::DoIsConstructCallAndBranch(
- HIsConstructCallAndBranch* instr) {
- return new(zone()) LIsConstructCallAndBranch(TempRegister());
-}
-
-
-LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
- HEnvironment* env = current_block_->last_environment();
- ASSERT(env != NULL);
-
- env->set_ast_id(instr->ast_id());
-
- env->Drop(instr->pop_count());
- for (int i = instr->values()->length() - 1; i >= 0; --i) {
- HValue* value = instr->values()->at(i);
- if (instr->HasAssignedIndexAt(i)) {
- env->Bind(instr->GetAssignedIndexAt(i), value);
- } else {
- env->Push(value);
- }
- }
-
- // If there is an instruction pending deoptimization environment create a
- // lazy bailout instruction to capture the environment.
- if (pending_deoptimization_ast_id_ == instr->ast_id()) {
- LInstruction* result = new(zone()) LLazyBailout;
- result = AssignEnvironment(result);
- // Store the lazy deopt environment with the instruction if needed. Right
- // now it is only used for LInstanceOfKnownGlobal.
- instruction_pending_deoptimization_environment_->
- SetDeferredLazyDeoptimizationEnvironment(result->environment());
- instruction_pending_deoptimization_environment_ = NULL;
- pending_deoptimization_ast_id_ = BailoutId::None();
- return result;
- }
-
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) {
- if (instr->is_function_entry()) {
- return MarkAsCall(new(zone()) LStackCheck, instr);
- } else {
- ASSERT(instr->is_backwards_branch());
- return AssignEnvironment(AssignPointerMap(new(zone()) LStackCheck));
- }
-}
-
-
-LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
- HEnvironment* outer = current_block_->last_environment();
- HConstant* undefined = graph()->GetConstantUndefined();
- HEnvironment* inner = outer->CopyForInlining(instr->closure(),
- instr->arguments_count(),
- instr->function(),
- undefined,
- instr->inlining_kind(),
- instr->undefined_receiver());
- if (instr->arguments_var() != NULL) {
- inner->Bind(instr->arguments_var(), graph()->GetArgumentsObject());
- }
- inner->set_entry(instr);
- current_block_->UpdateEnvironment(inner);
- chunk_->AddInlinedClosure(instr->closure());
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
- LInstruction* pop = NULL;
-
- HEnvironment* env = current_block_->last_environment();
-
- if (env->entry()->arguments_pushed()) {
- int argument_count = env->arguments_environment()->parameter_count();
- pop = new(zone()) LDrop(argument_count);
- argument_count_ -= argument_count;
- }
-
- HEnvironment* outer = current_block_->last_environment()->
- DiscardInlined(false);
- current_block_->UpdateEnvironment(outer);
-
- return pop;
-}
-
-
-LInstruction* LChunkBuilder::DoIn(HIn* instr) {
- LOperand* key = UseRegisterAtStart(instr->key());
- LOperand* object = UseRegisterAtStart(instr->object());
- LIn* result = new(zone()) LIn(key, object);
- return MarkAsCall(DefineFixed(result, r0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoForInPrepareMap(HForInPrepareMap* instr) {
- LOperand* object = UseFixed(instr->enumerable(), r0);
- LForInPrepareMap* result = new(zone()) LForInPrepareMap(object);
- return MarkAsCall(DefineFixed(result, r0), instr, CAN_DEOPTIMIZE_EAGERLY);
-}
-
-
-LInstruction* LChunkBuilder::DoForInCacheArray(HForInCacheArray* instr) {
- LOperand* map = UseRegister(instr->map());
- return AssignEnvironment(DefineAsRegister(new(zone()) LForInCacheArray(map)));
-}
-
-
-LInstruction* LChunkBuilder::DoCheckMapValue(HCheckMapValue* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
- LOperand* map = UseRegisterAtStart(instr->map());
- return AssignEnvironment(new(zone()) LCheckMapValue(value, map));
-}
-
-
-LInstruction* LChunkBuilder::DoLoadFieldByIndex(HLoadFieldByIndex* instr) {
- LOperand* object = UseRegister(instr->object());
- LOperand* index = UseRegister(instr->index());
- return DefineAsRegister(new(zone()) LLoadFieldByIndex(object, index));
-}
-
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/arm/lithium-arm.h b/src/3rdparty/v8/src/arm/lithium-arm.h
deleted file mode 100644
index c654400..0000000
--- a/src/3rdparty/v8/src/arm/lithium-arm.h
+++ /dev/null
@@ -1,2742 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_ARM_LITHIUM_ARM_H_
-#define V8_ARM_LITHIUM_ARM_H_
-
-#include "hydrogen.h"
-#include "lithium-allocator.h"
-#include "lithium.h"
-#include "safepoint-table.h"
-#include "utils.h"
-
-namespace v8 {
-namespace internal {
-
-// Forward declarations.
-class LCodeGen;
-
-#define LITHIUM_ALL_INSTRUCTION_LIST(V) \
- V(ControlInstruction) \
- V(Call) \
- LITHIUM_CONCRETE_INSTRUCTION_LIST(V)
-
-
-#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V) \
- V(AccessArgumentsAt) \
- V(AddI) \
- V(AllocateObject) \
- V(Allocate) \
- V(ApplyArguments) \
- V(ArgumentsElements) \
- V(ArgumentsLength) \
- V(ArithmeticD) \
- V(ArithmeticT) \
- V(ArrayLiteral) \
- V(BitI) \
- V(BitNotI) \
- V(BoundsCheck) \
- V(Branch) \
- V(CallConstantFunction) \
- V(CallFunction) \
- V(CallGlobal) \
- V(CallKeyed) \
- V(CallKnownGlobal) \
- V(CallNamed) \
- V(CallNew) \
- V(CallNewArray) \
- V(CallRuntime) \
- V(CallStub) \
- V(CheckFunction) \
- V(CheckInstanceType) \
- V(CheckNonSmi) \
- V(CheckMaps) \
- V(CheckPrototypeMaps) \
- V(CheckSmi) \
- V(ClampDToUint8) \
- V(ClampIToUint8) \
- V(ClampTToUint8) \
- V(ClassOfTestAndBranch) \
- V(CmpConstantEqAndBranch) \
- V(CmpIDAndBranch) \
- V(CmpObjectEqAndBranch) \
- V(CmpMapAndBranch) \
- V(CmpT) \
- V(ConstantD) \
- V(ConstantI) \
- V(ConstantT) \
- V(Context) \
- V(DeclareGlobals) \
- V(DeleteProperty) \
- V(Deoptimize) \
- V(DivI) \
- V(DoubleToI) \
- V(DummyUse) \
- V(ElementsKind) \
- V(FastLiteral) \
- V(FixedArrayBaseLength) \
- V(FunctionLiteral) \
- V(GetCachedArrayIndex) \
- V(GlobalObject) \
- V(GlobalReceiver) \
- V(Goto) \
- V(HasCachedArrayIndexAndBranch) \
- V(HasInstanceTypeAndBranch) \
- V(In) \
- V(InstanceOf) \
- V(InstanceOfKnownGlobal) \
- V(InstanceSize) \
- V(InstructionGap) \
- V(Integer32ToDouble) \
- V(Uint32ToDouble) \
- V(InvokeFunction) \
- V(IsConstructCallAndBranch) \
- V(IsNilAndBranch) \
- V(IsObjectAndBranch) \
- V(IsStringAndBranch) \
- V(IsSmiAndBranch) \
- V(IsUndetectableAndBranch) \
- V(JSArrayLength) \
- V(Label) \
- V(LazyBailout) \
- V(LoadContextSlot) \
- V(LoadElements) \
- V(LoadExternalArrayPointer) \
- V(LoadFunctionPrototype) \
- V(LoadGlobalCell) \
- V(LoadGlobalGeneric) \
- V(LoadKeyed) \
- V(LoadKeyedGeneric) \
- V(LoadNamedField) \
- V(LoadNamedFieldPolymorphic) \
- V(LoadNamedGeneric) \
- V(MapEnumLength) \
- V(MathExp) \
- V(MathFloorOfDiv) \
- V(MathMinMax) \
- V(ModI) \
- V(MulI) \
- V(MultiplyAddD) \
- V(MultiplySubD) \
- V(NumberTagD) \
- V(NumberTagI) \
- V(NumberTagU) \
- V(NumberUntagD) \
- V(ObjectLiteral) \
- V(OsrEntry) \
- V(OuterContext) \
- V(Parameter) \
- V(Power) \
- V(PushArgument) \
- V(Random) \
- V(RegExpLiteral) \
- V(Return) \
- V(SeqStringSetChar) \
- V(ShiftI) \
- V(SmiTag) \
- V(SmiUntag) \
- V(StackCheck) \
- V(StoreContextSlot) \
- V(StoreGlobalCell) \
- V(StoreGlobalGeneric) \
- V(StoreKeyed) \
- V(StoreKeyedGeneric) \
- V(StoreNamedField) \
- V(StoreNamedGeneric) \
- V(StringAdd) \
- V(StringCharCodeAt) \
- V(StringCharFromCode) \
- V(StringCompareAndBranch) \
- V(StringLength) \
- V(SubI) \
- V(RSubI) \
- V(TaggedToI) \
- V(ThisFunction) \
- V(Throw) \
- V(ToFastProperties) \
- V(TransitionElementsKind) \
- V(TrapAllocationMemento) \
- V(Typeof) \
- V(TypeofIsAndBranch) \
- V(UnaryMathOperation) \
- V(UnknownOSRValue) \
- V(ValueOf) \
- V(ForInPrepareMap) \
- V(ForInCacheArray) \
- V(CheckMapValue) \
- V(LoadFieldByIndex) \
- V(DateField) \
- V(WrapReceiver) \
- V(Drop)
-
-
-#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
- virtual Opcode opcode() const { return LInstruction::k##type; } \
- virtual void CompileToNative(LCodeGen* generator); \
- virtual const char* Mnemonic() const { return mnemonic; } \
- static L##type* cast(LInstruction* instr) { \
- ASSERT(instr->Is##type()); \
- return reinterpret_cast<L##type*>(instr); \
- }
-
-
-#define DECLARE_HYDROGEN_ACCESSOR(type) \
- H##type* hydrogen() const { \
- return H##type::cast(hydrogen_value()); \
- }
-
-
-class LInstruction: public ZoneObject {
- public:
- LInstruction()
- : environment_(NULL),
- hydrogen_value_(NULL),
- is_call_(false) { }
- virtual ~LInstruction() { }
-
- virtual void CompileToNative(LCodeGen* generator) = 0;
- virtual const char* Mnemonic() const = 0;
- virtual void PrintTo(StringStream* stream);
- virtual void PrintDataTo(StringStream* stream);
- virtual void PrintOutputOperandTo(StringStream* stream);
-
- enum Opcode {
- // Declare a unique enum value for each instruction.
-#define DECLARE_OPCODE(type) k##type,
- LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_OPCODE)
- kNumberOfInstructions
-#undef DECLARE_OPCODE
- };
-
- virtual Opcode opcode() const = 0;
-
- // Declare non-virtual type testers for all leaf IR classes.
-#define DECLARE_PREDICATE(type) \
- bool Is##type() const { return opcode() == k##type; }
- LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_PREDICATE)
-#undef DECLARE_PREDICATE
-
- // Declare virtual predicates for instructions that don't have
- // an opcode.
- virtual bool IsGap() const { return false; }
-
- virtual bool IsControl() const { return false; }
-
- void set_environment(LEnvironment* env) { environment_ = env; }
- LEnvironment* environment() const { return environment_; }
- bool HasEnvironment() const { return environment_ != NULL; }
-
- void set_pointer_map(LPointerMap* p) { pointer_map_.set(p); }
- LPointerMap* pointer_map() const { return pointer_map_.get(); }
- bool HasPointerMap() const { return pointer_map_.is_set(); }
-
- void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; }
- HValue* hydrogen_value() const { return hydrogen_value_; }
-
- virtual void SetDeferredLazyDeoptimizationEnvironment(LEnvironment* env) { }
-
- void MarkAsCall() { is_call_ = true; }
-
- // Interface to the register allocator and iterators.
- bool ClobbersTemps() const { return is_call_; }
- bool ClobbersRegisters() const { return is_call_; }
- bool ClobbersDoubleRegisters() const { return is_call_; }
-
- // Interface to the register allocator and iterators.
- bool IsMarkedAsCall() const { return is_call_; }
-
- virtual bool HasResult() const = 0;
- virtual LOperand* result() = 0;
-
- LOperand* FirstInput() { return InputAt(0); }
- LOperand* Output() { return HasResult() ? result() : NULL; }
-
-#ifdef DEBUG
- void VerifyCall();
-#endif
-
- private:
- // Iterator support.
- friend class InputIterator;
- virtual int InputCount() = 0;
- virtual LOperand* InputAt(int i) = 0;
-
- friend class TempIterator;
- virtual int TempCount() = 0;
- virtual LOperand* TempAt(int i) = 0;
-
- LEnvironment* environment_;
- SetOncePointer<LPointerMap> pointer_map_;
- HValue* hydrogen_value_;
- bool is_call_;
-};
-
-
-// R = number of result operands (0 or 1).
-// I = number of input operands.
-// T = number of temporary operands.
-template<int R, int I, int T>
-class LTemplateInstruction: public LInstruction {
- public:
- // Allow 0 or 1 output operands.
- STATIC_ASSERT(R == 0 || R == 1);
- virtual bool HasResult() const { return R != 0; }
- void set_result(LOperand* operand) { results_[0] = operand; }
- LOperand* result() { return results_[0]; }
-
- protected:
- EmbeddedContainer<LOperand*, R> results_;
- EmbeddedContainer<LOperand*, I> inputs_;
- EmbeddedContainer<LOperand*, T> temps_;
-
- private:
- virtual int InputCount() { return I; }
- virtual LOperand* InputAt(int i) { return inputs_[i]; }
-
- virtual int TempCount() { return T; }
- virtual LOperand* TempAt(int i) { return temps_[i]; }
-};
-
-
-class LGap: public LTemplateInstruction<0, 0, 0> {
- public:
- explicit LGap(HBasicBlock* block)
- : block_(block) {
- parallel_moves_[BEFORE] = NULL;
- parallel_moves_[START] = NULL;
- parallel_moves_[END] = NULL;
- parallel_moves_[AFTER] = NULL;
- }
-
- // Can't use the DECLARE-macro here because of sub-classes.
- virtual bool IsGap() const { return true; }
- virtual void PrintDataTo(StringStream* stream);
- static LGap* cast(LInstruction* instr) {
- ASSERT(instr->IsGap());
- return reinterpret_cast<LGap*>(instr);
- }
-
- bool IsRedundant() const;
-
- HBasicBlock* block() const { return block_; }
-
- enum InnerPosition {
- BEFORE,
- START,
- END,
- AFTER,
- FIRST_INNER_POSITION = BEFORE,
- LAST_INNER_POSITION = AFTER
- };
-
- LParallelMove* GetOrCreateParallelMove(InnerPosition pos, Zone* zone) {
- if (parallel_moves_[pos] == NULL) {
- parallel_moves_[pos] = new(zone) LParallelMove(zone);
- }
- return parallel_moves_[pos];
- }
-
- LParallelMove* GetParallelMove(InnerPosition pos) {
- return parallel_moves_[pos];
- }
-
- private:
- LParallelMove* parallel_moves_[LAST_INNER_POSITION + 1];
- HBasicBlock* block_;
-};
-
-
-class LInstructionGap: public LGap {
- public:
- explicit LInstructionGap(HBasicBlock* block) : LGap(block) { }
-
- DECLARE_CONCRETE_INSTRUCTION(InstructionGap, "gap")
-};
-
-
-class LGoto: public LTemplateInstruction<0, 0, 0> {
- public:
- explicit LGoto(int block_id) : block_id_(block_id) { }
-
- DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
- virtual void PrintDataTo(StringStream* stream);
- virtual bool IsControl() const { return true; }
-
- int block_id() const { return block_id_; }
-
- private:
- int block_id_;
-};
-
-
-class LLazyBailout: public LTemplateInstruction<0, 0, 0> {
- public:
- LLazyBailout() : gap_instructions_size_(0) { }
-
- DECLARE_CONCRETE_INSTRUCTION(LazyBailout, "lazy-bailout")
-
- void set_gap_instructions_size(int gap_instructions_size) {
- gap_instructions_size_ = gap_instructions_size;
- }
- int gap_instructions_size() { return gap_instructions_size_; }
-
- private:
- int gap_instructions_size_;
-};
-
-
-class LDummyUse: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LDummyUse(LOperand* value) {
- inputs_[0] = value;
- }
- DECLARE_CONCRETE_INSTRUCTION(DummyUse, "dummy-use")
-};
-
-
-class LDeoptimize: public LTemplateInstruction<0, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
-};
-
-
-class LLabel: public LGap {
- public:
- explicit LLabel(HBasicBlock* block)
- : LGap(block), replacement_(NULL) { }
-
- DECLARE_CONCRETE_INSTRUCTION(Label, "label")
-
- virtual void PrintDataTo(StringStream* stream);
-
- int block_id() const { return block()->block_id(); }
- bool is_loop_header() const { return block()->IsLoopHeader(); }
- Label* label() { return &label_; }
- LLabel* replacement() const { return replacement_; }
- void set_replacement(LLabel* label) { replacement_ = label; }
- bool HasReplacement() const { return replacement_ != NULL; }
-
- private:
- Label label_;
- LLabel* replacement_;
-};
-
-
-class LParameter: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter")
-};
-
-
-class LCallStub: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(CallStub, "call-stub")
- DECLARE_HYDROGEN_ACCESSOR(CallStub)
-
- TranscendentalCache::Type transcendental_type() {
- return hydrogen()->transcendental_type();
- }
-};
-
-
-class LUnknownOSRValue: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown-osr-value")
-};
-
-
-template<int I, int T>
-class LControlInstruction: public LTemplateInstruction<0, I, T> {
- public:
- virtual bool IsControl() const { return true; }
-
- int SuccessorCount() { return hydrogen()->SuccessorCount(); }
- HBasicBlock* SuccessorAt(int i) { return hydrogen()->SuccessorAt(i); }
- int true_block_id() { return hydrogen()->SuccessorAt(0)->block_id(); }
- int false_block_id() { return hydrogen()->SuccessorAt(1)->block_id(); }
-
- private:
- HControlInstruction* hydrogen() {
- return HControlInstruction::cast(this->hydrogen_value());
- }
-};
-
-
-class LWrapReceiver: public LTemplateInstruction<1, 2, 0> {
- public:
- LWrapReceiver(LOperand* receiver, LOperand* function) {
- inputs_[0] = receiver;
- inputs_[1] = function;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(WrapReceiver, "wrap-receiver")
-
- LOperand* receiver() { return inputs_[0]; }
- LOperand* function() { return inputs_[1]; }
-};
-
-
-class LApplyArguments: public LTemplateInstruction<1, 4, 0> {
- public:
- LApplyArguments(LOperand* function,
- LOperand* receiver,
- LOperand* length,
- LOperand* elements) {
- inputs_[0] = function;
- inputs_[1] = receiver;
- inputs_[2] = length;
- inputs_[3] = elements;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(ApplyArguments, "apply-arguments")
-
- LOperand* function() { return inputs_[0]; }
- LOperand* receiver() { return inputs_[1]; }
- LOperand* length() { return inputs_[2]; }
- LOperand* elements() { return inputs_[3]; }
-};
-
-
-class LAccessArgumentsAt: public LTemplateInstruction<1, 3, 0> {
- public:
- LAccessArgumentsAt(LOperand* arguments, LOperand* length, LOperand* index) {
- inputs_[0] = arguments;
- inputs_[1] = length;
- inputs_[2] = index;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(AccessArgumentsAt, "access-arguments-at")
-
- LOperand* arguments() { return inputs_[0]; }
- LOperand* length() { return inputs_[1]; }
- LOperand* index() { return inputs_[2]; }
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LArgumentsLength: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LArgumentsLength(LOperand* elements) {
- inputs_[0] = elements;
- }
-
- LOperand* elements() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ArgumentsLength, "arguments-length")
-};
-
-
-class LArgumentsElements: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments-elements")
- DECLARE_HYDROGEN_ACCESSOR(ArgumentsElements)
-};
-
-
-class LModI: public LTemplateInstruction<1, 2, 3> {
- public:
- // Used when the right hand is a constant power of 2.
- LModI(LOperand* left,
- LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- temps_[0] = NULL;
- temps_[1] = NULL;
- temps_[2] = NULL;
- }
-
- // Used for the standard case.
- LModI(LOperand* left,
- LOperand* right,
- LOperand* temp,
- LOperand* temp2,
- LOperand* temp3) {
- inputs_[0] = left;
- inputs_[1] = right;
- temps_[0] = temp;
- temps_[1] = temp2;
- temps_[2] = temp3;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
- LOperand* temp() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
- LOperand* temp3() { return temps_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ModI, "mod-i")
- DECLARE_HYDROGEN_ACCESSOR(Mod)
-};
-
-
-class LDivI: public LTemplateInstruction<1, 2, 0> {
- public:
- LDivI(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i")
- DECLARE_HYDROGEN_ACCESSOR(Div)
-};
-
-
-class LMathFloorOfDiv: public LTemplateInstruction<1, 2, 1> {
- public:
- LMathFloorOfDiv(LOperand* left,
- LOperand* right,
- LOperand* temp = NULL) {
- inputs_[0] = left;
- inputs_[1] = right;
- temps_[0] = temp;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathFloorOfDiv, "math-floor-of-div")
- DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
-};
-
-
-class LMulI: public LTemplateInstruction<1, 2, 1> {
- public:
- LMulI(LOperand* left, LOperand* right, LOperand* temp) {
- inputs_[0] = left;
- inputs_[1] = right;
- temps_[0] = temp;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MulI, "mul-i")
- DECLARE_HYDROGEN_ACCESSOR(Mul)
-};
-
-
-// Instruction for computing multiplier * multiplicand + addend.
-class LMultiplyAddD: public LTemplateInstruction<1, 3, 0> {
- public:
- LMultiplyAddD(LOperand* addend, LOperand* multiplier,
- LOperand* multiplicand) {
- inputs_[0] = addend;
- inputs_[1] = multiplier;
- inputs_[2] = multiplicand;
- }
-
- LOperand* addend() { return inputs_[0]; }
- LOperand* multiplier() { return inputs_[1]; }
- LOperand* multiplicand() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MultiplyAddD, "multiply-add-d")
-};
-
-
-// Instruction for computing minuend - multiplier * multiplicand.
-class LMultiplySubD: public LTemplateInstruction<1, 3, 0> {
- public:
- LMultiplySubD(LOperand* minuend, LOperand* multiplier,
- LOperand* multiplicand) {
- inputs_[0] = minuend;
- inputs_[1] = multiplier;
- inputs_[2] = multiplicand;
- }
-
- LOperand* minuend() { return inputs_[0]; }
- LOperand* multiplier() { return inputs_[1]; }
- LOperand* multiplicand() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MultiplySubD, "multiply-sub-d")
-};
-
-
-class LCmpIDAndBranch: public LControlInstruction<2, 0> {
- public:
- LCmpIDAndBranch(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpIDAndBranch, "cmp-id-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(CompareIDAndBranch)
-
- Token::Value op() const { return hydrogen()->token(); }
- bool is_double() const {
- return hydrogen()->representation().IsDouble();
- }
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LUnaryMathOperation: public LTemplateInstruction<1, 1, 1> {
- public:
- LUnaryMathOperation(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(UnaryMathOperation, "unary-math-operation")
- DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
-
- virtual void PrintDataTo(StringStream* stream);
- BuiltinFunctionId op() const { return hydrogen()->op(); }
-};
-
-
-class LMathExp: public LTemplateInstruction<1, 1, 3> {
- public:
- LMathExp(LOperand* value,
- LOperand* double_temp,
- LOperand* temp1,
- LOperand* temp2) {
- inputs_[0] = value;
- temps_[0] = temp1;
- temps_[1] = temp2;
- temps_[2] = double_temp;
- ExternalReference::InitializeMathExpData();
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp1() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
- LOperand* double_temp() { return temps_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathExp, "math-exp")
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LCmpObjectEqAndBranch: public LControlInstruction<2, 0> {
- public:
- LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpObjectEqAndBranch,
- "cmp-object-eq-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(CompareObjectEqAndBranch)
-};
-
-
-class LCmpConstantEqAndBranch: public LControlInstruction<1, 0> {
- public:
- explicit LCmpConstantEqAndBranch(LOperand* left) {
- inputs_[0] = left;
- }
-
- LOperand* left() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpConstantEqAndBranch,
- "cmp-constant-eq-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(CompareConstantEqAndBranch)
-};
-
-
-class LIsNilAndBranch: public LControlInstruction<1, 0> {
- public:
- explicit LIsNilAndBranch(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(IsNilAndBranch, "is-nil-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(IsNilAndBranch)
-
- EqualityKind kind() const { return hydrogen()->kind(); }
- NilValue nil() const { return hydrogen()->nil(); }
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LIsObjectAndBranch: public LControlInstruction<1, 1> {
- public:
- LIsObjectAndBranch(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(IsObjectAndBranch)
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LIsStringAndBranch: public LControlInstruction<1, 1> {
- public:
- LIsStringAndBranch(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch, "is-string-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(IsStringAndBranch)
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LIsSmiAndBranch: public LControlInstruction<1, 0> {
- public:
- explicit LIsSmiAndBranch(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(IsSmiAndBranch)
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LIsUndetectableAndBranch: public LControlInstruction<1, 1> {
- public:
- explicit LIsUndetectableAndBranch(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(IsUndetectableAndBranch,
- "is-undetectable-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(IsUndetectableAndBranch)
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LStringCompareAndBranch: public LControlInstruction<2, 0> {
- public:
- LStringCompareAndBranch(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StringCompareAndBranch,
- "string-compare-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(StringCompareAndBranch)
-
- Token::Value op() const { return hydrogen()->token(); }
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LHasInstanceTypeAndBranch: public LControlInstruction<1, 0> {
- public:
- explicit LHasInstanceTypeAndBranch(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(HasInstanceTypeAndBranch,
- "has-instance-type-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(HasInstanceTypeAndBranch)
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LGetCachedArrayIndex: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LGetCachedArrayIndex(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(GetCachedArrayIndex, "get-cached-array-index")
- DECLARE_HYDROGEN_ACCESSOR(GetCachedArrayIndex)
-};
-
-
-class LHasCachedArrayIndexAndBranch: public LControlInstruction<1, 0> {
- public:
- explicit LHasCachedArrayIndexAndBranch(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndexAndBranch,
- "has-cached-array-index-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndexAndBranch)
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LClassOfTestAndBranch: public LControlInstruction<1, 1> {
- public:
- LClassOfTestAndBranch(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch,
- "class-of-test-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(ClassOfTestAndBranch)
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LCmpT: public LTemplateInstruction<1, 2, 0> {
- public:
- LCmpT(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
- DECLARE_HYDROGEN_ACCESSOR(CompareGeneric)
-
- Token::Value op() const { return hydrogen()->token(); }
-};
-
-
-class LInstanceOf: public LTemplateInstruction<1, 2, 0> {
- public:
- LInstanceOf(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(InstanceOf, "instance-of")
-};
-
-
-class LInstanceOfKnownGlobal: public LTemplateInstruction<1, 1, 1> {
- public:
- LInstanceOfKnownGlobal(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(InstanceOfKnownGlobal,
- "instance-of-known-global")
- DECLARE_HYDROGEN_ACCESSOR(InstanceOfKnownGlobal)
-
- Handle<JSFunction> function() const { return hydrogen()->function(); }
- LEnvironment* GetDeferredLazyDeoptimizationEnvironment() {
- return lazy_deopt_env_;
- }
- virtual void SetDeferredLazyDeoptimizationEnvironment(LEnvironment* env) {
- lazy_deopt_env_ = env;
- }
-
- private:
- LEnvironment* lazy_deopt_env_;
-};
-
-
-class LInstanceSize: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LInstanceSize(LOperand* object) {
- inputs_[0] = object;
- }
-
- LOperand* object() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(InstanceSize, "instance-size")
- DECLARE_HYDROGEN_ACCESSOR(InstanceSize)
-};
-
-
-class LBoundsCheck: public LTemplateInstruction<0, 2, 0> {
- public:
- LBoundsCheck(LOperand* index, LOperand* length) {
- inputs_[0] = index;
- inputs_[1] = length;
- }
-
- LOperand* index() { return inputs_[0]; }
- LOperand* length() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(BoundsCheck, "bounds-check")
- DECLARE_HYDROGEN_ACCESSOR(BoundsCheck)
-};
-
-
-class LBitI: public LTemplateInstruction<1, 2, 0> {
- public:
- LBitI(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- Token::Value op() const { return hydrogen()->op(); }
-
- DECLARE_CONCRETE_INSTRUCTION(BitI, "bit-i")
- DECLARE_HYDROGEN_ACCESSOR(Bitwise)
-};
-
-
-class LShiftI: public LTemplateInstruction<1, 2, 0> {
- public:
- LShiftI(Token::Value op, LOperand* left, LOperand* right, bool can_deopt)
- : op_(op), can_deopt_(can_deopt) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- Token::Value op() const { return op_; }
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
- bool can_deopt() const { return can_deopt_; }
-
- DECLARE_CONCRETE_INSTRUCTION(ShiftI, "shift-i")
-
- private:
- Token::Value op_;
- bool can_deopt_;
-};
-
-
-class LSubI: public LTemplateInstruction<1, 2, 0> {
- public:
- LSubI(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(SubI, "sub-i")
- DECLARE_HYDROGEN_ACCESSOR(Sub)
-};
-
-
-class LRSubI: public LTemplateInstruction<1, 2, 0> {
- public:
- LRSubI(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(RSubI, "rsub-i")
- DECLARE_HYDROGEN_ACCESSOR(Sub)
-};
-
-
-class LConstantI: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ConstantI, "constant-i")
- DECLARE_HYDROGEN_ACCESSOR(Constant)
-
- int32_t value() const { return hydrogen()->Integer32Value(); }
-};
-
-
-class LConstantD: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ConstantD, "constant-d")
- DECLARE_HYDROGEN_ACCESSOR(Constant)
-
- double value() const { return hydrogen()->DoubleValue(); }
-};
-
-
-class LConstantT: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ConstantT, "constant-t")
- DECLARE_HYDROGEN_ACCESSOR(Constant)
-
- Handle<Object> value() const { return hydrogen()->handle(); }
-};
-
-
-class LBranch: public LControlInstruction<1, 0> {
- public:
- explicit LBranch(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
- DECLARE_HYDROGEN_ACCESSOR(Branch)
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LCmpMapAndBranch: public LTemplateInstruction<0, 1, 1> {
- public:
- LCmpMapAndBranch(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(CompareMap)
-
- virtual bool IsControl() const { return true; }
-
- Handle<Map> map() const { return hydrogen()->map(); }
- int true_block_id() const {
- return hydrogen()->FirstSuccessor()->block_id();
- }
- int false_block_id() const {
- return hydrogen()->SecondSuccessor()->block_id();
- }
-};
-
-
-class LJSArrayLength: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LJSArrayLength(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(JSArrayLength, "js-array-length")
- DECLARE_HYDROGEN_ACCESSOR(JSArrayLength)
-};
-
-
-class LFixedArrayBaseLength: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LFixedArrayBaseLength(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(FixedArrayBaseLength,
- "fixed-array-base-length")
- DECLARE_HYDROGEN_ACCESSOR(FixedArrayBaseLength)
-};
-
-
-class LMapEnumLength: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LMapEnumLength(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MapEnumLength, "map-enum-length")
-};
-
-
-class LElementsKind: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LElementsKind(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ElementsKind, "elements-kind")
- DECLARE_HYDROGEN_ACCESSOR(ElementsKind)
-};
-
-
-class LValueOf: public LTemplateInstruction<1, 1, 1> {
- public:
- LValueOf(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ValueOf, "value-of")
- DECLARE_HYDROGEN_ACCESSOR(ValueOf)
-};
-
-
-class LDateField: public LTemplateInstruction<1, 1, 1> {
- public:
- LDateField(LOperand* date, LOperand* temp, Smi* index) : index_(index) {
- inputs_[0] = date;
- temps_[0] = temp;
- }
-
- LOperand* date() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
- Smi* index() const { return index_; }
-
- DECLARE_CONCRETE_INSTRUCTION(ValueOf, "date-field")
- DECLARE_HYDROGEN_ACCESSOR(ValueOf)
-
- private:
- Smi* index_;
-};
-
-
-class LSeqStringSetChar: public LTemplateInstruction<1, 3, 0> {
- public:
- LSeqStringSetChar(String::Encoding encoding,
- LOperand* string,
- LOperand* index,
- LOperand* value) : encoding_(encoding) {
- inputs_[0] = string;
- inputs_[1] = index;
- inputs_[2] = value;
- }
-
- String::Encoding encoding() { return encoding_; }
- LOperand* string() { return inputs_[0]; }
- LOperand* index() { return inputs_[1]; }
- LOperand* value() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(SeqStringSetChar, "seq-string-set-char")
- DECLARE_HYDROGEN_ACCESSOR(SeqStringSetChar)
-
- private:
- String::Encoding encoding_;
-};
-
-
-class LThrow: public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LThrow(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Throw, "throw")
-};
-
-
-class LBitNotI: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LBitNotI(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(BitNotI, "bit-not-i")
-};
-
-
-class LAddI: public LTemplateInstruction<1, 2, 0> {
- public:
- LAddI(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(AddI, "add-i")
- DECLARE_HYDROGEN_ACCESSOR(Add)
-};
-
-
-class LMathMinMax: public LTemplateInstruction<1, 2, 0> {
- public:
- LMathMinMax(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathMinMax, "min-max")
- DECLARE_HYDROGEN_ACCESSOR(MathMinMax)
-};
-
-
-class LPower: public LTemplateInstruction<1, 2, 0> {
- public:
- LPower(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Power, "power")
- DECLARE_HYDROGEN_ACCESSOR(Power)
-};
-
-
-class LRandom: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LRandom(LOperand* global_object) {
- inputs_[0] = global_object;
- }
-
- LOperand* global_object() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Random, "random")
- DECLARE_HYDROGEN_ACCESSOR(Random)
-};
-
-
-class LArithmeticD: public LTemplateInstruction<1, 2, 0> {
- public:
- LArithmeticD(Token::Value op, LOperand* left, LOperand* right)
- : op_(op) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- Token::Value op() const { return op_; }
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- virtual Opcode opcode() const { return LInstruction::kArithmeticD; }
- virtual void CompileToNative(LCodeGen* generator);
- virtual const char* Mnemonic() const;
-
- private:
- Token::Value op_;
-};
-
-
-class LArithmeticT: public LTemplateInstruction<1, 2, 0> {
- public:
- LArithmeticT(Token::Value op, LOperand* left, LOperand* right)
- : op_(op) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
- Token::Value op() const { return op_; }
-
- virtual Opcode opcode() const { return LInstruction::kArithmeticT; }
- virtual void CompileToNative(LCodeGen* generator);
- virtual const char* Mnemonic() const;
-
- private:
- Token::Value op_;
-};
-
-
-class LReturn: public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LReturn(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Return, "return")
-};
-
-
-class LLoadNamedField: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LLoadNamedField(LOperand* object) {
- inputs_[0] = object;
- }
-
- LOperand* object() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field")
- DECLARE_HYDROGEN_ACCESSOR(LoadNamedField)
-};
-
-
-class LLoadNamedFieldPolymorphic: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LLoadNamedFieldPolymorphic(LOperand* object) {
- inputs_[0] = object;
- }
-
- LOperand* object() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field-polymorphic")
- DECLARE_HYDROGEN_ACCESSOR(LoadNamedFieldPolymorphic)
-};
-
-
-class LLoadNamedGeneric: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LLoadNamedGeneric(LOperand* object) {
- inputs_[0] = object;
- }
-
- LOperand* object() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric, "load-named-generic")
- DECLARE_HYDROGEN_ACCESSOR(LoadNamedGeneric)
-
- Handle<Object> name() const { return hydrogen()->name(); }
-};
-
-
-class LLoadFunctionPrototype: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LLoadFunctionPrototype(LOperand* function) {
- inputs_[0] = function;
- }
-
- LOperand* function() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadFunctionPrototype, "load-function-prototype")
- DECLARE_HYDROGEN_ACCESSOR(LoadFunctionPrototype)
-};
-
-
-class LLoadElements: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LLoadElements(LOperand* object) {
- inputs_[0] = object;
- }
-
- LOperand* object() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadElements, "load-elements")
-};
-
-
-class LLoadExternalArrayPointer: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LLoadExternalArrayPointer(LOperand* object) {
- inputs_[0] = object;
- }
-
- LOperand* object() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadExternalArrayPointer,
- "load-external-array-pointer")
-};
-
-
-class LLoadKeyed: public LTemplateInstruction<1, 2, 0> {
- public:
- LLoadKeyed(LOperand* elements, LOperand* key) {
- inputs_[0] = elements;
- inputs_[1] = key;
- }
-
- LOperand* elements() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
- ElementsKind elements_kind() const {
- return hydrogen()->elements_kind();
- }
- bool is_external() const {
- return hydrogen()->is_external();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadKeyed, "load-keyed")
- DECLARE_HYDROGEN_ACCESSOR(LoadKeyed)
-
- virtual void PrintDataTo(StringStream* stream);
- uint32_t additional_index() const { return hydrogen()->index_offset(); }
-};
-
-
-class LLoadKeyedGeneric: public LTemplateInstruction<1, 2, 0> {
- public:
- LLoadKeyedGeneric(LOperand* object, LOperand* key) {
- inputs_[0] = object;
- inputs_[1] = key;
- }
-
- LOperand* object() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load-keyed-generic")
-};
-
-
-class LLoadGlobalCell: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(LoadGlobalCell, "load-global-cell")
- DECLARE_HYDROGEN_ACCESSOR(LoadGlobalCell)
-};
-
-
-class LLoadGlobalGeneric: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LLoadGlobalGeneric(LOperand* global_object) {
- inputs_[0] = global_object;
- }
-
- LOperand* global_object() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic")
- DECLARE_HYDROGEN_ACCESSOR(LoadGlobalGeneric)
-
- Handle<Object> name() const { return hydrogen()->name(); }
- bool for_typeof() const { return hydrogen()->for_typeof(); }
-};
-
-
-class LStoreGlobalCell: public LTemplateInstruction<0, 1, 1> {
- public:
- LStoreGlobalCell(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreGlobalCell, "store-global-cell")
- DECLARE_HYDROGEN_ACCESSOR(StoreGlobalCell)
-};
-
-
-class LStoreGlobalGeneric: public LTemplateInstruction<0, 2, 0> {
- public:
- explicit LStoreGlobalGeneric(LOperand* global_object,
- LOperand* value) {
- inputs_[0] = global_object;
- inputs_[1] = value;
- }
-
- LOperand* global_object() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreGlobalGeneric, "store-global-generic")
- DECLARE_HYDROGEN_ACCESSOR(StoreGlobalGeneric)
-
- Handle<Object> name() const { return hydrogen()->name(); }
- StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
-};
-
-
-class LLoadContextSlot: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LLoadContextSlot(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot, "load-context-slot")
- DECLARE_HYDROGEN_ACCESSOR(LoadContextSlot)
-
- int slot_index() { return hydrogen()->slot_index(); }
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LStoreContextSlot: public LTemplateInstruction<0, 2, 0> {
- public:
- LStoreContextSlot(LOperand* context, LOperand* value) {
- inputs_[0] = context;
- inputs_[1] = value;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreContextSlot, "store-context-slot")
- DECLARE_HYDROGEN_ACCESSOR(StoreContextSlot)
-
- int slot_index() { return hydrogen()->slot_index(); }
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LPushArgument: public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LPushArgument(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(PushArgument, "push-argument")
-};
-
-
-class LDrop: public LTemplateInstruction<0, 0, 0> {
- public:
- explicit LDrop(int count) : count_(count) { }
-
- int count() const { return count_; }
-
- DECLARE_CONCRETE_INSTRUCTION(Drop, "drop")
-
- private:
- int count_;
-};
-
-
-class LThisFunction: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function")
- DECLARE_HYDROGEN_ACCESSOR(ThisFunction)
-};
-
-
-class LContext: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(Context, "context")
- DECLARE_HYDROGEN_ACCESSOR(Context)
-};
-
-
-class LOuterContext: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LOuterContext(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(OuterContext, "outer-context")
-};
-
-
-class LDeclareGlobals: public LTemplateInstruction<0, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(DeclareGlobals, "declare-globals")
- DECLARE_HYDROGEN_ACCESSOR(DeclareGlobals)
-};
-
-
-class LGlobalObject: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LGlobalObject(LOperand* context, bool qml_global) {
- inputs_[0] = context;
- qml_global_ = qml_global;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(GlobalObject, "global-object")
-
- LOperand* context() { return inputs_[0]; }
- bool qml_global() { return qml_global_; }
-
- private:
- bool qml_global_;
-};
-
-
-class LGlobalReceiver: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LGlobalReceiver(LOperand* global_object) {
- inputs_[0] = global_object;
- }
-
- LOperand* global_object() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(GlobalReceiver, "global-receiver")
-};
-
-
-class LCallConstantFunction: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(CallConstantFunction, "call-constant-function")
- DECLARE_HYDROGEN_ACCESSOR(CallConstantFunction)
-
- virtual void PrintDataTo(StringStream* stream);
-
- Handle<JSFunction> function() { return hydrogen()->function(); }
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
-class LInvokeFunction: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LInvokeFunction(LOperand* function) {
- inputs_[0] = function;
- }
-
- LOperand* function() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
- DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
-
- virtual void PrintDataTo(StringStream* stream);
-
- int arity() const { return hydrogen()->argument_count() - 1; }
- Handle<JSFunction> known_function() { return hydrogen()->known_function(); }
-};
-
-
-class LCallKeyed: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LCallKeyed(LOperand* key) {
- inputs_[0] = key;
- }
-
- LOperand* key() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallKeyed, "call-keyed")
- DECLARE_HYDROGEN_ACCESSOR(CallKeyed)
-
- virtual void PrintDataTo(StringStream* stream);
-
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
-
-class LCallNamed: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(CallNamed, "call-named")
- DECLARE_HYDROGEN_ACCESSOR(CallNamed)
-
- virtual void PrintDataTo(StringStream* stream);
-
- Handle<String> name() const { return hydrogen()->name(); }
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
-class LCallFunction: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LCallFunction(LOperand* function) {
- inputs_[0] = function;
- }
-
- LOperand* function() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function")
- DECLARE_HYDROGEN_ACCESSOR(CallFunction)
-
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
-class LCallGlobal: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(CallGlobal, "call-global")
- DECLARE_HYDROGEN_ACCESSOR(CallGlobal)
-
- explicit LCallGlobal(bool qml_global) : qml_global_(qml_global) {}
-
- virtual void PrintDataTo(StringStream* stream);
-
- Handle<String> name() const {return hydrogen()->name(); }
- int arity() const { return hydrogen()->argument_count() - 1; }
-
- bool qml_global() { return qml_global_; }
- private:
- bool qml_global_;
-};
-
-
-class LCallKnownGlobal: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(CallKnownGlobal, "call-known-global")
- DECLARE_HYDROGEN_ACCESSOR(CallKnownGlobal)
-
- virtual void PrintDataTo(StringStream* stream);
-
- Handle<JSFunction> target() const { return hydrogen()->target(); }
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
-class LCallNew: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LCallNew(LOperand* constructor) {
- inputs_[0] = constructor;
- }
-
- LOperand* constructor() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallNew, "call-new")
- DECLARE_HYDROGEN_ACCESSOR(CallNew)
-
- virtual void PrintDataTo(StringStream* stream);
-
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
-class LCallNewArray: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LCallNewArray(LOperand* constructor) {
- inputs_[0] = constructor;
- }
-
- LOperand* constructor() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallNewArray, "call-new-array")
- DECLARE_HYDROGEN_ACCESSOR(CallNewArray)
-
- virtual void PrintDataTo(StringStream* stream);
-
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
-class LCallRuntime: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
- DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
-
- const Runtime::Function* function() const { return hydrogen()->function(); }
- int arity() const { return hydrogen()->argument_count(); }
-};
-
-
-class LInteger32ToDouble: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LInteger32ToDouble(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Integer32ToDouble, "int32-to-double")
-};
-
-
-class LUint32ToDouble: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LUint32ToDouble(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Uint32ToDouble, "uint32-to-double")
-};
-
-
-class LNumberTagI: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LNumberTagI(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(NumberTagI, "number-tag-i")
-};
-
-
-class LNumberTagU: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LNumberTagU(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(NumberTagU, "number-tag-u")
-};
-
-
-class LNumberTagD: public LTemplateInstruction<1, 1, 2> {
- public:
- LNumberTagD(LOperand* value, LOperand* temp, LOperand* temp2) {
- inputs_[0] = value;
- temps_[0] = temp;
- temps_[1] = temp2;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(NumberTagD, "number-tag-d")
- DECLARE_HYDROGEN_ACCESSOR(Change)
-};
-
-
-// Sometimes truncating conversion from a tagged value to an int32.
-class LDoubleToI: public LTemplateInstruction<1, 1, 2> {
- public:
- LDoubleToI(LOperand* value, LOperand* temp, LOperand* temp2) {
- inputs_[0] = value;
- temps_[0] = temp;
- temps_[1] = temp2;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(DoubleToI, "double-to-i")
- DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
-
- bool truncating() { return hydrogen()->CanTruncateToInt32(); }
-};
-
-
-// Truncating conversion from a tagged value to an int32.
-class LTaggedToI: public LTemplateInstruction<1, 1, 3> {
- public:
- LTaggedToI(LOperand* value,
- LOperand* temp,
- LOperand* temp2,
- LOperand* temp3) {
- inputs_[0] = value;
- temps_[0] = temp;
- temps_[1] = temp2;
- temps_[2] = temp3;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
- LOperand* temp3() { return temps_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i")
- DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
-
- bool truncating() { return hydrogen()->CanTruncateToInt32(); }
-};
-
-
-class LSmiTag: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LSmiTag(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(SmiTag, "smi-tag")
-};
-
-
-class LNumberUntagD: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LNumberUntagD(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(NumberUntagD, "double-untag")
- DECLARE_HYDROGEN_ACCESSOR(Change)
-};
-
-
-class LSmiUntag: public LTemplateInstruction<1, 1, 0> {
- public:
- LSmiUntag(LOperand* value, bool needs_check)
- : needs_check_(needs_check) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
- bool needs_check() const { return needs_check_; }
-
- DECLARE_CONCRETE_INSTRUCTION(SmiUntag, "smi-untag")
-
- private:
- bool needs_check_;
-};
-
-
-class LStoreNamedField: public LTemplateInstruction<0, 2, 1> {
- public:
- LStoreNamedField(LOperand* object, LOperand* value, LOperand* temp) {
- inputs_[0] = object;
- inputs_[1] = value;
- temps_[0] = temp;
- }
-
- LOperand* object() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
- DECLARE_HYDROGEN_ACCESSOR(StoreNamedField)
-
- virtual void PrintDataTo(StringStream* stream);
-
- Handle<Object> name() const { return hydrogen()->name(); }
- bool is_in_object() { return hydrogen()->is_in_object(); }
- int offset() { return hydrogen()->offset(); }
- Handle<Map> transition() const { return hydrogen()->transition(); }
-};
-
-
-class LStoreNamedGeneric: public LTemplateInstruction<0, 2, 0> {
- public:
- LStoreNamedGeneric(LOperand* object, LOperand* value) {
- inputs_[0] = object;
- inputs_[1] = value;
- }
-
- LOperand* object() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
- DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
-
- virtual void PrintDataTo(StringStream* stream);
-
- Handle<Object> name() const { return hydrogen()->name(); }
- StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
-};
-
-
-class LStoreKeyed: public LTemplateInstruction<0, 3, 0> {
- public:
- LStoreKeyed(LOperand* object, LOperand* key, LOperand* value) {
- inputs_[0] = object;
- inputs_[1] = key;
- inputs_[2] = value;
- }
-
- bool is_external() const { return hydrogen()->is_external(); }
- LOperand* elements() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
- LOperand* value() { return inputs_[2]; }
- ElementsKind elements_kind() const {
- return hydrogen()->elements_kind();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreKeyed, "store-keyed")
- DECLARE_HYDROGEN_ACCESSOR(StoreKeyed)
-
- virtual void PrintDataTo(StringStream* stream);
- bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); }
- uint32_t additional_index() const { return hydrogen()->index_offset(); }
-};
-
-
-class LStoreKeyedGeneric: public LTemplateInstruction<0, 3, 0> {
- public:
- LStoreKeyedGeneric(LOperand* obj, LOperand* key, LOperand* value) {
- inputs_[0] = obj;
- inputs_[1] = key;
- inputs_[2] = value;
- }
-
- LOperand* object() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
- LOperand* value() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
- DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
-
- virtual void PrintDataTo(StringStream* stream);
-
- StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
-};
-
-
-class LTransitionElementsKind: public LTemplateInstruction<1, 1, 2> {
- public:
- LTransitionElementsKind(LOperand* object,
- LOperand* new_map_temp,
- LOperand* fixed_object_temp) {
- inputs_[0] = object;
- temps_[0] = new_map_temp;
- temps_[1] = fixed_object_temp;
- }
-
- LOperand* object() { return inputs_[0]; }
- LOperand* new_map_temp() { return temps_[0]; }
- LOperand* temp() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(TransitionElementsKind,
- "transition-elements-kind")
- DECLARE_HYDROGEN_ACCESSOR(TransitionElementsKind)
-
- virtual void PrintDataTo(StringStream* stream);
-
- Handle<Map> original_map() { return hydrogen()->original_map(); }
- Handle<Map> transitioned_map() { return hydrogen()->transitioned_map(); }
- ElementsKind from_kind() { return hydrogen()->from_kind(); }
- ElementsKind to_kind() { return hydrogen()->to_kind(); }
-};
-
-
-class LTrapAllocationMemento : public LTemplateInstruction<0, 1, 1> {
- public:
- LTrapAllocationMemento(LOperand* object,
- LOperand* temp) {
- inputs_[0] = object;
- temps_[0] = temp;
- }
-
- LOperand* object() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(TrapAllocationMemento,
- "trap-allocation-memento")
-};
-
-
-class LStringAdd: public LTemplateInstruction<1, 2, 0> {
- public:
- LStringAdd(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StringAdd, "string-add")
- DECLARE_HYDROGEN_ACCESSOR(StringAdd)
-};
-
-
-
-class LStringCharCodeAt: public LTemplateInstruction<1, 2, 0> {
- public:
- LStringCharCodeAt(LOperand* string, LOperand* index) {
- inputs_[0] = string;
- inputs_[1] = index;
- }
-
- LOperand* string() { return inputs_[0]; }
- LOperand* index() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt, "string-char-code-at")
- DECLARE_HYDROGEN_ACCESSOR(StringCharCodeAt)
-};
-
-
-class LStringCharFromCode: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LStringCharFromCode(LOperand* char_code) {
- inputs_[0] = char_code;
- }
-
- LOperand* char_code() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StringCharFromCode, "string-char-from-code")
- DECLARE_HYDROGEN_ACCESSOR(StringCharFromCode)
-};
-
-
-class LStringLength: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LStringLength(LOperand* string) {
- inputs_[0] = string;
- }
-
- LOperand* string() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StringLength, "string-length")
- DECLARE_HYDROGEN_ACCESSOR(StringLength)
-};
-
-
-class LCheckFunction: public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LCheckFunction(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckFunction, "check-function")
- DECLARE_HYDROGEN_ACCESSOR(CheckFunction)
-};
-
-
-class LCheckInstanceType: public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LCheckInstanceType(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckInstanceType, "check-instance-type")
- DECLARE_HYDROGEN_ACCESSOR(CheckInstanceType)
-};
-
-
-class LCheckMaps: public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LCheckMaps(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckMaps, "check-maps")
- DECLARE_HYDROGEN_ACCESSOR(CheckMaps)
-};
-
-
-class LCheckPrototypeMaps: public LTemplateInstruction<1, 0, 2> {
- public:
- LCheckPrototypeMaps(LOperand* temp, LOperand* temp2) {
- temps_[0] = temp;
- temps_[1] = temp2;
- }
-
- LOperand* temp() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckPrototypeMaps, "check-prototype-maps")
- DECLARE_HYDROGEN_ACCESSOR(CheckPrototypeMaps)
-
- ZoneList<Handle<JSObject> >* prototypes() const {
- return hydrogen()->prototypes();
- }
- ZoneList<Handle<Map> >* maps() const { return hydrogen()->maps(); }
-};
-
-
-class LCheckSmi: public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LCheckSmi(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckSmi, "check-smi")
-};
-
-
-class LCheckNonSmi: public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LCheckNonSmi(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckNonSmi, "check-non-smi")
-};
-
-
-class LClampDToUint8: public LTemplateInstruction<1, 1, 1> {
- public:
- LClampDToUint8(LOperand* unclamped, LOperand* temp) {
- inputs_[0] = unclamped;
- temps_[0] = temp;
- }
-
- LOperand* unclamped() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ClampDToUint8, "clamp-d-to-uint8")
-};
-
-
-class LClampIToUint8: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LClampIToUint8(LOperand* unclamped) {
- inputs_[0] = unclamped;
- }
-
- LOperand* unclamped() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ClampIToUint8, "clamp-i-to-uint8")
-};
-
-
-class LClampTToUint8: public LTemplateInstruction<1, 1, 1> {
- public:
- LClampTToUint8(LOperand* unclamped, LOperand* temp) {
- inputs_[0] = unclamped;
- temps_[0] = temp;
- }
-
- LOperand* unclamped() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ClampTToUint8, "clamp-t-to-uint8")
-};
-
-
-class LAllocateObject: public LTemplateInstruction<1, 1, 2> {
- public:
- LAllocateObject(LOperand* temp, LOperand* temp2) {
- temps_[0] = temp;
- temps_[1] = temp2;
- }
-
- LOperand* temp() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(AllocateObject, "allocate-object")
- DECLARE_HYDROGEN_ACCESSOR(AllocateObject)
-};
-
-
-class LAllocate: public LTemplateInstruction<1, 2, 2> {
- public:
- LAllocate(LOperand* size, LOperand* temp1, LOperand* temp2) {
- inputs_[1] = size;
- temps_[0] = temp1;
- temps_[1] = temp2;
- }
-
- LOperand* size() { return inputs_[1]; }
- LOperand* temp1() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Allocate, "allocate")
- DECLARE_HYDROGEN_ACCESSOR(Allocate)
-};
-
-
-class LFastLiteral: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(FastLiteral, "fast-literal")
- DECLARE_HYDROGEN_ACCESSOR(FastLiteral)
-};
-
-
-class LArrayLiteral: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ArrayLiteral, "array-literal")
- DECLARE_HYDROGEN_ACCESSOR(ArrayLiteral)
-};
-
-
-class LObjectLiteral: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ObjectLiteral, "object-literal")
- DECLARE_HYDROGEN_ACCESSOR(ObjectLiteral)
-};
-
-
-class LRegExpLiteral: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(RegExpLiteral, "regexp-literal")
- DECLARE_HYDROGEN_ACCESSOR(RegExpLiteral)
-};
-
-
-class LFunctionLiteral: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(FunctionLiteral, "function-literal")
- DECLARE_HYDROGEN_ACCESSOR(FunctionLiteral)
-
- Handle<SharedFunctionInfo> shared_info() { return hydrogen()->shared_info(); }
-};
-
-
-class LToFastProperties: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LToFastProperties(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ToFastProperties, "to-fast-properties")
- DECLARE_HYDROGEN_ACCESSOR(ToFastProperties)
-};
-
-
-class LTypeof: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LTypeof(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Typeof, "typeof")
-};
-
-
-class LTypeofIsAndBranch: public LControlInstruction<1, 0> {
- public:
- explicit LTypeofIsAndBranch(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch, "typeof-is-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(TypeofIsAndBranch)
-
- Handle<String> type_literal() { return hydrogen()->type_literal(); }
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LIsConstructCallAndBranch: public LControlInstruction<0, 1> {
- public:
- explicit LIsConstructCallAndBranch(LOperand* temp) {
- temps_[0] = temp;
- }
-
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(IsConstructCallAndBranch,
- "is-construct-call-and-branch")
-};
-
-
-class LDeleteProperty: public LTemplateInstruction<1, 2, 0> {
- public:
- LDeleteProperty(LOperand* object, LOperand* key) {
- inputs_[0] = object;
- inputs_[1] = key;
- }
-
- LOperand* object() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(DeleteProperty, "delete-property")
-};
-
-
-class LOsrEntry: public LTemplateInstruction<0, 0, 0> {
- public:
- LOsrEntry();
-
- DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry")
-
- LOperand** SpilledRegisterArray() { return register_spills_; }
- LOperand** SpilledDoubleRegisterArray() { return double_register_spills_; }
-
- void MarkSpilledRegister(int allocation_index, LOperand* spill_operand);
- void MarkSpilledDoubleRegister(int allocation_index,
- LOperand* spill_operand);
-
- private:
- // Arrays of spill slot operands for registers with an assigned spill
- // slot, i.e., that must also be restored to the spill slot on OSR entry.
- // NULL if the register has no assigned spill slot. Indexed by allocation
- // index.
- LOperand* register_spills_[Register::kMaxNumAllocatableRegisters];
- LOperand* double_register_spills_[
- DoubleRegister::kMaxNumAllocatableRegisters];
-};
-
-
-class LStackCheck: public LTemplateInstruction<0, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(StackCheck, "stack-check")
- DECLARE_HYDROGEN_ACCESSOR(StackCheck)
-
- Label* done_label() { return &done_label_; }
-
- private:
- Label done_label_;
-};
-
-
-class LIn: public LTemplateInstruction<1, 2, 0> {
- public:
- LIn(LOperand* key, LOperand* object) {
- inputs_[0] = key;
- inputs_[1] = object;
- }
-
- LOperand* key() { return inputs_[0]; }
- LOperand* object() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(In, "in")
-};
-
-
-class LForInPrepareMap: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LForInPrepareMap(LOperand* object) {
- inputs_[0] = object;
- }
-
- LOperand* object() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ForInPrepareMap, "for-in-prepare-map")
-};
-
-
-class LForInCacheArray: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LForInCacheArray(LOperand* map) {
- inputs_[0] = map;
- }
-
- LOperand* map() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ForInCacheArray, "for-in-cache-array")
-
- int idx() {
- return HForInCacheArray::cast(this->hydrogen_value())->idx();
- }
-};
-
-
-class LCheckMapValue: public LTemplateInstruction<0, 2, 0> {
- public:
- LCheckMapValue(LOperand* value, LOperand* map) {
- inputs_[0] = value;
- inputs_[1] = map;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* map() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckMapValue, "check-map-value")
-};
-
-
-class LLoadFieldByIndex: public LTemplateInstruction<1, 2, 0> {
- public:
- LLoadFieldByIndex(LOperand* object, LOperand* index) {
- inputs_[0] = object;
- inputs_[1] = index;
- }
-
- LOperand* object() { return inputs_[0]; }
- LOperand* index() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadFieldByIndex, "load-field-by-index")
-};
-
-
-class LChunkBuilder;
-class LPlatformChunk: public LChunk {
- public:
- LPlatformChunk(CompilationInfo* info, HGraph* graph)
- : LChunk(info, graph) { }
-
- int GetNextSpillIndex(bool is_double);
- LOperand* GetNextSpillSlot(bool is_double);
-};
-
-
-class LChunkBuilder BASE_EMBEDDED {
- public:
- LChunkBuilder(CompilationInfo* info, HGraph* graph, LAllocator* allocator)
- : chunk_(NULL),
- info_(info),
- graph_(graph),
- zone_(graph->zone()),
- status_(UNUSED),
- current_instruction_(NULL),
- current_block_(NULL),
- next_block_(NULL),
- argument_count_(0),
- allocator_(allocator),
- position_(RelocInfo::kNoPosition),
- instruction_pending_deoptimization_environment_(NULL),
- pending_deoptimization_ast_id_(BailoutId::None()) { }
-
- // Build the sequence for the graph.
- LPlatformChunk* Build();
-
- // Declare methods that deal with the individual node types.
-#define DECLARE_DO(type) LInstruction* Do##type(H##type* node);
- HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
-#undef DECLARE_DO
-
- LInstruction* DoMultiplyAdd(HMul* mul, HValue* addend);
- LInstruction* DoMultiplySub(HValue* minuend, HMul* mul);
- LInstruction* DoRSub(HSub* instr);
-
- static bool HasMagicNumberForDivisor(int32_t divisor);
- static HValue* SimplifiedDividendForMathFloorOfDiv(HValue* val);
- static HValue* SimplifiedDivisorForMathFloorOfDiv(HValue* val);
-
- private:
- enum Status {
- UNUSED,
- BUILDING,
- DONE,
- ABORTED
- };
-
- LPlatformChunk* chunk() const { return chunk_; }
- CompilationInfo* info() const { return info_; }
- HGraph* graph() const { return graph_; }
- Zone* zone() const { return zone_; }
-
- bool is_unused() const { return status_ == UNUSED; }
- bool is_building() const { return status_ == BUILDING; }
- bool is_done() const { return status_ == DONE; }
- bool is_aborted() const { return status_ == ABORTED; }
-
- void Abort(const char* reason);
-
- // Methods for getting operands for Use / Define / Temp.
- LUnallocated* ToUnallocated(Register reg);
- LUnallocated* ToUnallocated(DoubleRegister reg);
-
- // Methods for setting up define-use relationships.
- MUST_USE_RESULT LOperand* Use(HValue* value, LUnallocated* operand);
- MUST_USE_RESULT LOperand* UseFixed(HValue* value, Register fixed_register);
- MUST_USE_RESULT LOperand* UseFixedDouble(HValue* value,
- DoubleRegister fixed_register);
-
- // A value that is guaranteed to be allocated to a register.
- // Operand created by UseRegister is guaranteed to be live until the end of
- // instruction. This means that register allocator will not reuse it's
- // register for any other operand inside instruction.
- // Operand created by UseRegisterAtStart is guaranteed to be live only at
- // instruction start. Register allocator is free to assign the same register
- // to some other operand used inside instruction (i.e. temporary or
- // output).
- MUST_USE_RESULT LOperand* UseRegister(HValue* value);
- MUST_USE_RESULT LOperand* UseRegisterAtStart(HValue* value);
-
- // An input operand in a register that may be trashed.
- MUST_USE_RESULT LOperand* UseTempRegister(HValue* value);
-
- // An input operand in a register or stack slot.
- MUST_USE_RESULT LOperand* Use(HValue* value);
- MUST_USE_RESULT LOperand* UseAtStart(HValue* value);
-
- // An input operand in a register, stack slot or a constant operand.
- MUST_USE_RESULT LOperand* UseOrConstant(HValue* value);
- MUST_USE_RESULT LOperand* UseOrConstantAtStart(HValue* value);
-
- // An input operand in a register or a constant operand.
- MUST_USE_RESULT LOperand* UseRegisterOrConstant(HValue* value);
- MUST_USE_RESULT LOperand* UseRegisterOrConstantAtStart(HValue* value);
-
- // An input operand in register, stack slot or a constant operand.
- // Will not be moved to a register even if one is freely available.
- MUST_USE_RESULT LOperand* UseAny(HValue* value);
-
- // Temporary operand that must be in a register.
- MUST_USE_RESULT LUnallocated* TempRegister();
- MUST_USE_RESULT LOperand* FixedTemp(Register reg);
- MUST_USE_RESULT LOperand* FixedTemp(DoubleRegister reg);
-
- // Methods for setting up define-use relationships.
- // Return the same instruction that they are passed.
- template<int I, int T>
- LInstruction* Define(LTemplateInstruction<1, I, T>* instr,
- LUnallocated* result);
- template<int I, int T>
- LInstruction* DefineAsRegister(LTemplateInstruction<1, I, T>* instr);
- template<int I, int T>
- LInstruction* DefineAsSpilled(LTemplateInstruction<1, I, T>* instr,
- int index);
- template<int I, int T>
- LInstruction* DefineSameAsFirst(LTemplateInstruction<1, I, T>* instr);
- template<int I, int T>
- LInstruction* DefineFixed(LTemplateInstruction<1, I, T>* instr,
- Register reg);
- template<int I, int T>
- LInstruction* DefineFixedDouble(LTemplateInstruction<1, I, T>* instr,
- DoubleRegister reg);
- LInstruction* AssignEnvironment(LInstruction* instr);
- LInstruction* AssignPointerMap(LInstruction* instr);
-
- enum CanDeoptimize { CAN_DEOPTIMIZE_EAGERLY, CANNOT_DEOPTIMIZE_EAGERLY };
-
- // By default we assume that instruction sequences generated for calls
- // cannot deoptimize eagerly and we do not attach environment to this
- // instruction.
- LInstruction* MarkAsCall(
- LInstruction* instr,
- HInstruction* hinstr,
- CanDeoptimize can_deoptimize = CANNOT_DEOPTIMIZE_EAGERLY);
-
- LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env,
- int* argument_index_accumulator);
-
- void VisitInstruction(HInstruction* current);
-
- void DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block);
- LInstruction* DoShift(Token::Value op, HBitwiseBinaryOperation* instr);
- LInstruction* DoArithmeticD(Token::Value op,
- HArithmeticBinaryOperation* instr);
- LInstruction* DoArithmeticT(Token::Value op,
- HArithmeticBinaryOperation* instr);
-
- LPlatformChunk* chunk_;
- CompilationInfo* info_;
- HGraph* const graph_;
- Zone* zone_;
- Status status_;
- HInstruction* current_instruction_;
- HBasicBlock* current_block_;
- HBasicBlock* next_block_;
- int argument_count_;
- LAllocator* allocator_;
- int position_;
- LInstruction* instruction_pending_deoptimization_environment_;
- BailoutId pending_deoptimization_ast_id_;
-
- DISALLOW_COPY_AND_ASSIGN(LChunkBuilder);
-};
-
-#undef DECLARE_HYDROGEN_ACCESSOR
-#undef DECLARE_CONCRETE_INSTRUCTION
-
-} } // namespace v8::internal
-
-#endif // V8_ARM_LITHIUM_ARM_H_
diff --git a/src/3rdparty/v8/src/arm/lithium-codegen-arm.cc b/src/3rdparty/v8/src/arm/lithium-codegen-arm.cc
deleted file mode 100644
index f0b0e96..0000000
--- a/src/3rdparty/v8/src/arm/lithium-codegen-arm.cc
+++ /dev/null
@@ -1,6408 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "arm/lithium-codegen-arm.h"
-#include "arm/lithium-gap-resolver-arm.h"
-#include "code-stubs.h"
-#include "stub-cache.h"
-
-namespace v8 {
-namespace internal {
-
-
-class SafepointGenerator : public CallWrapper {
- public:
- SafepointGenerator(LCodeGen* codegen,
- LPointerMap* pointers,
- Safepoint::DeoptMode mode)
- : codegen_(codegen),
- pointers_(pointers),
- deopt_mode_(mode) { }
- virtual ~SafepointGenerator() { }
-
- virtual void BeforeCall(int call_size) const { }
-
- virtual void AfterCall() const {
- codegen_->RecordSafepoint(pointers_, deopt_mode_);
- }
-
- private:
- LCodeGen* codegen_;
- LPointerMap* pointers_;
- Safepoint::DeoptMode deopt_mode_;
-};
-
-
-#define __ masm()->
-
-bool LCodeGen::GenerateCode() {
- HPhase phase("Z_Code generation", chunk());
- ASSERT(is_unused());
- status_ = GENERATING;
-
- // Open a frame scope to indicate that there is a frame on the stack. The
- // NONE indicates that the scope shouldn't actually generate code to set up
- // the frame (that is done in GeneratePrologue).
- FrameScope frame_scope(masm_, StackFrame::NONE);
-
- return GeneratePrologue() &&
- GenerateBody() &&
- GenerateDeferredCode() &&
- GenerateDeoptJumpTable() &&
- GenerateSafepointTable();
-}
-
-
-void LCodeGen::FinishCode(Handle<Code> code) {
- ASSERT(is_done());
- code->set_stack_slots(GetStackSlotCount());
- code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
- if (FLAG_weak_embedded_maps_in_optimized_code) {
- RegisterDependentCodeForEmbeddedMaps(code);
- }
- PopulateDeoptimizationData(code);
- for (int i = 0 ; i < prototype_maps_.length(); i++) {
- prototype_maps_.at(i)->AddDependentCode(
- DependentCode::kPrototypeCheckGroup, code);
- }
-}
-
-
-void LCodeGen::Abort(const char* reason) {
- info()->set_bailout_reason(reason);
- status_ = ABORTED;
-}
-
-
-void LCodeGen::Comment(const char* format, ...) {
- if (!FLAG_code_comments) return;
- char buffer[4 * KB];
- StringBuilder builder(buffer, ARRAY_SIZE(buffer));
- va_list arguments;
- va_start(arguments, format);
- builder.AddFormattedList(format, arguments);
- va_end(arguments);
-
- // Copy the string before recording it in the assembler to avoid
- // issues when the stack allocated buffer goes out of scope.
- size_t length = builder.position();
- Vector<char> copy = Vector<char>::New(length + 1);
- memcpy(copy.start(), builder.Finalize(), copy.length());
- masm()->RecordComment(copy.start());
-}
-
-
-bool LCodeGen::GeneratePrologue() {
- ASSERT(is_generating());
-
- if (info()->IsOptimizing()) {
- ProfileEntryHookStub::MaybeCallEntryHook(masm_);
-
-#ifdef DEBUG
- if (strlen(FLAG_stop_at) > 0 &&
- info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
- __ stop("stop_at");
- }
-#endif
-
- // r1: Callee's JS function.
- // cp: Callee's context.
- // fp: Caller's frame pointer.
- // lr: Caller's pc.
-
- // Strict mode functions and builtins need to replace the receiver
- // with undefined when called as functions (without an explicit
- // receiver object). r5 is zero for method calls and non-zero for
- // function calls.
- if (!info_->is_classic_mode() || info_->is_native()) {
- Label ok;
- __ cmp(r5, Operand::Zero());
- __ b(eq, &ok);
- int receiver_offset = scope()->num_parameters() * kPointerSize;
- __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
- __ str(r2, MemOperand(sp, receiver_offset));
- __ bind(&ok);
- }
- }
-
- info()->set_prologue_offset(masm_->pc_offset());
- if (NeedsEagerFrame()) {
- if (info()->IsStub()) {
- __ stm(db_w, sp, cp.bit() | fp.bit() | lr.bit());
- __ Push(Smi::FromInt(StackFrame::STUB));
- // Adjust FP to point to saved FP.
- __ add(fp, sp, Operand(2 * kPointerSize));
- } else {
- PredictableCodeSizeScope predictible_code_size_scope(
- masm_, kNoCodeAgeSequenceLength * Assembler::kInstrSize);
- // The following three instructions must remain together and unmodified
- // for code aging to work properly.
- __ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
- // Load undefined value here, so the value is ready for the loop
- // below.
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- // Adjust FP to point to saved FP.
- __ add(fp, sp, Operand(2 * kPointerSize));
- }
- frame_is_built_ = true;
- }
-
- // Reserve space for the stack slots needed by the code.
- int slots = GetStackSlotCount();
- if (slots > 0) {
- if (FLAG_debug_code) {
- __ sub(sp, sp, Operand(slots * kPointerSize));
- __ push(r0);
- __ push(r1);
- __ add(r0, sp, Operand(slots * kPointerSize));
- __ mov(r1, Operand(kSlotsZapValue));
- Label loop;
- __ bind(&loop);
- __ sub(r0, r0, Operand(kPointerSize));
- __ str(r1, MemOperand(r0, 2 * kPointerSize));
- __ cmp(r0, sp);
- __ b(ne, &loop);
- __ pop(r1);
- __ pop(r0);
- } else {
- __ sub(sp, sp, Operand(slots * kPointerSize));
- }
- }
-
- if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(VFP2)) {
- CpuFeatures::Scope scope(VFP2);
- Comment(";;; Save clobbered callee double registers");
- int count = 0;
- BitVector* doubles = chunk()->allocated_double_registers();
- BitVector::Iterator save_iterator(doubles);
- while (!save_iterator.Done()) {
- __ vstr(DwVfpRegister::FromAllocationIndex(save_iterator.Current()),
- MemOperand(sp, count * kDoubleSize));
- save_iterator.Advance();
- count++;
- }
- }
-
- // Possibly allocate a local context.
- int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
- if (heap_slots > 0 ||
- (scope() != NULL && scope()->is_qml_mode() && scope()->is_global_scope())) {
- Comment(";;; Allocate local context");
- // Argument to NewContext is the function, which is in r1.
- __ push(r1);
- if (heap_slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub((heap_slots < 0)?0:heap_slots);
- __ CallStub(&stub);
- } else {
- __ CallRuntime(Runtime::kNewFunctionContext, 1);
- }
- RecordSafepoint(Safepoint::kNoLazyDeopt);
- // Context is returned in both r0 and cp. It replaces the context
- // passed to us. It's saved in the stack and kept live in cp.
- __ str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- // Copy any necessary parameters into the context.
- int num_parameters = scope()->num_parameters();
- for (int i = 0; i < num_parameters; i++) {
- Variable* var = scope()->parameter(i);
- if (var->IsContextSlot()) {
- int parameter_offset = StandardFrameConstants::kCallerSPOffset +
- (num_parameters - 1 - i) * kPointerSize;
- // Load parameter from stack.
- __ ldr(r0, MemOperand(fp, parameter_offset));
- // Store it in the context.
- MemOperand target = ContextOperand(cp, var->index());
- __ str(r0, target);
- // Update the write barrier. This clobbers r3 and r0.
- __ RecordWriteContextSlot(
- cp, target.offset(), r0, r3, kLRHasBeenSaved, kSaveFPRegs);
- }
- }
- Comment(";;; End allocate local context");
- }
-
- // Trace the call.
- if (FLAG_trace && info()->IsOptimizing()) {
- __ CallRuntime(Runtime::kTraceEnter, 0);
- }
- return !is_aborted();
-}
-
-
-bool LCodeGen::GenerateBody() {
- ASSERT(is_generating());
- bool emit_instructions = true;
- for (current_instruction_ = 0;
- !is_aborted() && current_instruction_ < instructions_->length();
- current_instruction_++) {
- LInstruction* instr = instructions_->at(current_instruction_);
- if (instr->IsLabel()) {
- LLabel* label = LLabel::cast(instr);
- emit_instructions = !label->HasReplacement();
- }
-
- if (emit_instructions) {
- if (FLAG_code_comments) {
- HValue* hydrogen = instr->hydrogen_value();
- if (hydrogen != NULL) {
- if (hydrogen->IsChange()) {
- HValue* changed_value = HChange::cast(hydrogen)->value();
- int use_id = 0;
- const char* use_mnemo = "dead";
- if (hydrogen->UseCount() >= 1) {
- HValue* use_value = hydrogen->uses().value();
- use_id = use_value->id();
- use_mnemo = use_value->Mnemonic();
- }
- Comment(";;; @%d: %s. <of #%d %s for #%d %s>",
- current_instruction_, instr->Mnemonic(),
- changed_value->id(), changed_value->Mnemonic(),
- use_id, use_mnemo);
- } else {
- Comment(";;; @%d: %s. <#%d>", current_instruction_,
- instr->Mnemonic(), hydrogen->id());
- }
- } else {
- Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic());
- }
- }
- instr->CompileToNative(this);
- }
- }
- EnsureSpaceForLazyDeopt();
- return !is_aborted();
-}
-
-
-bool LCodeGen::GenerateDeferredCode() {
- ASSERT(is_generating());
- if (deferred_.length() > 0) {
- for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
- LDeferredCode* code = deferred_[i];
- __ bind(code->entry());
- if (NeedsDeferredFrame()) {
- Comment(";;; Deferred build frame",
- code->instruction_index(),
- code->instr()->Mnemonic());
- ASSERT(!frame_is_built_);
- ASSERT(info()->IsStub());
- frame_is_built_ = true;
- __ stm(db_w, sp, cp.bit() | fp.bit() | lr.bit());
- __ mov(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
- __ push(scratch0());
- __ add(fp, sp, Operand(2 * kPointerSize));
- }
- Comment(";;; Deferred code @%d: %s.",
- code->instruction_index(),
- code->instr()->Mnemonic());
- code->Generate();
- if (NeedsDeferredFrame()) {
- Comment(";;; Deferred destroy frame",
- code->instruction_index(),
- code->instr()->Mnemonic());
- ASSERT(frame_is_built_);
- __ pop(ip);
- __ ldm(ia_w, sp, cp.bit() | fp.bit() | lr.bit());
- frame_is_built_ = false;
- }
- __ jmp(code->exit());
- }
- }
-
- // Force constant pool emission at the end of the deferred code to make
- // sure that no constant pools are emitted after.
- masm()->CheckConstPool(true, false);
-
- return !is_aborted();
-}
-
-
-bool LCodeGen::GenerateDeoptJumpTable() {
- // Check that the jump table is accessible from everywhere in the function
- // code, i.e. that offsets to the table can be encoded in the 24bit signed
- // immediate of a branch instruction.
- // To simplify we consider the code size from the first instruction to the
- // end of the jump table. We also don't consider the pc load delta.
- // Each entry in the jump table generates one instruction and inlines one
- // 32bit data after it.
- if (!is_int24((masm()->pc_offset() / Assembler::kInstrSize) +
- deopt_jump_table_.length() * 7)) {
- Abort("Generated code is too large");
- }
-
- __ RecordComment("[ Deoptimisation jump table");
- Label table_start;
- __ bind(&table_start);
- Label needs_frame_not_call;
- Label needs_frame_is_call;
- for (int i = 0; i < deopt_jump_table_.length(); i++) {
- __ bind(&deopt_jump_table_[i].label);
- Address entry = deopt_jump_table_[i].address;
- bool is_lazy_deopt = deopt_jump_table_[i].is_lazy_deopt;
- Deoptimizer::BailoutType type =
- is_lazy_deopt ? Deoptimizer::LAZY : Deoptimizer::EAGER;
- int id = Deoptimizer::GetDeoptimizationId(entry, type);
- if (id == Deoptimizer::kNotDeoptimizationEntry) {
- Comment(";;; jump table entry %d.", i);
- } else {
- Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
- }
- if (deopt_jump_table_[i].needs_frame) {
- __ mov(ip, Operand(ExternalReference::ForDeoptEntry(entry)));
- if (is_lazy_deopt) {
- if (needs_frame_is_call.is_bound()) {
- __ b(&needs_frame_is_call);
- } else {
- __ bind(&needs_frame_is_call);
- __ stm(db_w, sp, cp.bit() | fp.bit() | lr.bit());
- // This variant of deopt can only be used with stubs. Since we don't
- // have a function pointer to install in the stack frame that we're
- // building, install a special marker there instead.
- ASSERT(info()->IsStub());
- __ mov(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
- __ push(scratch0());
- __ add(fp, sp, Operand(2 * kPointerSize));
- __ mov(lr, Operand(pc), LeaveCC, al);
- __ mov(pc, ip);
- }
- } else {
- if (needs_frame_not_call.is_bound()) {
- __ b(&needs_frame_not_call);
- } else {
- __ bind(&needs_frame_not_call);
- __ stm(db_w, sp, cp.bit() | fp.bit() | lr.bit());
- // This variant of deopt can only be used with stubs. Since we don't
- // have a function pointer to install in the stack frame that we're
- // building, install a special marker there instead.
- ASSERT(info()->IsStub());
- __ mov(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
- __ push(scratch0());
- __ add(fp, sp, Operand(2 * kPointerSize));
- __ mov(pc, ip);
- }
- }
- } else {
- if (is_lazy_deopt) {
- __ mov(lr, Operand(pc), LeaveCC, al);
- __ mov(pc, Operand(ExternalReference::ForDeoptEntry(entry)));
- } else {
- __ mov(pc, Operand(ExternalReference::ForDeoptEntry(entry)));
- }
- }
- masm()->CheckConstPool(false, false);
- }
- __ RecordComment("]");
-
- // Force constant pool emission at the end of the deopt jump table to make
- // sure that no constant pools are emitted after.
- masm()->CheckConstPool(true, false);
-
- // The deoptimization jump table is the last part of the instruction
- // sequence. Mark the generated code as done unless we bailed out.
- if (!is_aborted()) status_ = DONE;
- return !is_aborted();
-}
-
-
-bool LCodeGen::GenerateSafepointTable() {
- ASSERT(is_done());
- safepoints_.Emit(masm(), GetStackSlotCount());
- return !is_aborted();
-}
-
-
-Register LCodeGen::ToRegister(int index) const {
- return Register::FromAllocationIndex(index);
-}
-
-
-DwVfpRegister LCodeGen::ToDoubleRegister(int index) const {
- return DwVfpRegister::FromAllocationIndex(index);
-}
-
-
-Register LCodeGen::ToRegister(LOperand* op) const {
- ASSERT(op->IsRegister());
- return ToRegister(op->index());
-}
-
-
-Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
- if (op->IsRegister()) {
- return ToRegister(op->index());
- } else if (op->IsConstantOperand()) {
- LConstantOperand* const_op = LConstantOperand::cast(op);
- HConstant* constant = chunk_->LookupConstant(const_op);
- Handle<Object> literal = constant->handle();
- Representation r = chunk_->LookupLiteralRepresentation(const_op);
- if (r.IsInteger32()) {
- ASSERT(literal->IsNumber());
- __ mov(scratch, Operand(static_cast<int32_t>(literal->Number())));
- } else if (r.IsDouble()) {
- Abort("EmitLoadRegister: Unsupported double immediate.");
- } else {
- ASSERT(r.IsTagged());
- if (literal->IsSmi()) {
- __ mov(scratch, Operand(literal));
- } else {
- __ LoadHeapObject(scratch, Handle<HeapObject>::cast(literal));
- }
- }
- return scratch;
- } else if (op->IsStackSlot() || op->IsArgument()) {
- __ ldr(scratch, ToMemOperand(op));
- return scratch;
- }
- UNREACHABLE();
- return scratch;
-}
-
-
-DwVfpRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
- ASSERT(op->IsDoubleRegister());
- return ToDoubleRegister(op->index());
-}
-
-
-DwVfpRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
- SwVfpRegister flt_scratch,
- DwVfpRegister dbl_scratch) {
- if (op->IsDoubleRegister()) {
- return ToDoubleRegister(op->index());
- } else if (op->IsConstantOperand()) {
- LConstantOperand* const_op = LConstantOperand::cast(op);
- HConstant* constant = chunk_->LookupConstant(const_op);
- Handle<Object> literal = constant->handle();
- Representation r = chunk_->LookupLiteralRepresentation(const_op);
- if (r.IsInteger32()) {
- ASSERT(literal->IsNumber());
- __ mov(ip, Operand(static_cast<int32_t>(literal->Number())));
- __ vmov(flt_scratch, ip);
- __ vcvt_f64_s32(dbl_scratch, flt_scratch);
- return dbl_scratch;
- } else if (r.IsDouble()) {
- Abort("unsupported double immediate");
- } else if (r.IsTagged()) {
- Abort("unsupported tagged immediate");
- }
- } else if (op->IsStackSlot() || op->IsArgument()) {
- // TODO(regis): Why is vldr not taking a MemOperand?
- // __ vldr(dbl_scratch, ToMemOperand(op));
- MemOperand mem_op = ToMemOperand(op);
- __ vldr(dbl_scratch, mem_op.rn(), mem_op.offset());
- return dbl_scratch;
- }
- UNREACHABLE();
- return dbl_scratch;
-}
-
-
-Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
- HConstant* constant = chunk_->LookupConstant(op);
- ASSERT(chunk_->LookupLiteralRepresentation(op).IsTagged());
- return constant->handle();
-}
-
-
-bool LCodeGen::IsInteger32(LConstantOperand* op) const {
- return chunk_->LookupLiteralRepresentation(op).IsInteger32();
-}
-
-
-int LCodeGen::ToInteger32(LConstantOperand* op) const {
- HConstant* constant = chunk_->LookupConstant(op);
- return constant->Integer32Value();
-}
-
-
-double LCodeGen::ToDouble(LConstantOperand* op) const {
- HConstant* constant = chunk_->LookupConstant(op);
- ASSERT(constant->HasDoubleValue());
- return constant->DoubleValue();
-}
-
-
-Operand LCodeGen::ToOperand(LOperand* op) {
- if (op->IsConstantOperand()) {
- LConstantOperand* const_op = LConstantOperand::cast(op);
- HConstant* constant = chunk()->LookupConstant(const_op);
- Representation r = chunk_->LookupLiteralRepresentation(const_op);
- if (r.IsInteger32()) {
- ASSERT(constant->HasInteger32Value());
- return Operand(constant->Integer32Value());
- } else if (r.IsDouble()) {
- Abort("ToOperand Unsupported double immediate.");
- }
- ASSERT(r.IsTagged());
- return Operand(constant->handle());
- } else if (op->IsRegister()) {
- return Operand(ToRegister(op));
- } else if (op->IsDoubleRegister()) {
- Abort("ToOperand IsDoubleRegister unimplemented");
- return Operand::Zero();
- }
- // Stack slots not implemented, use ToMemOperand instead.
- UNREACHABLE();
- return Operand::Zero();
-}
-
-
-MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
- ASSERT(!op->IsRegister());
- ASSERT(!op->IsDoubleRegister());
- ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
- return MemOperand(fp, StackSlotOffset(op->index()));
-}
-
-
-MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
- ASSERT(op->IsDoubleStackSlot());
- return MemOperand(fp, StackSlotOffset(op->index()) + kPointerSize);
-}
-
-
-void LCodeGen::WriteTranslation(LEnvironment* environment,
- Translation* translation,
- int* pushed_arguments_index,
- int* pushed_arguments_count) {
- if (environment == NULL) return;
-
- // The translation includes one command per value in the environment.
- int translation_size = environment->values()->length();
- // The output frame height does not include the parameters.
- int height = translation_size - environment->parameter_count();
-
- // Function parameters are arguments to the outermost environment. The
- // arguments index points to the first element of a sequence of tagged
- // values on the stack that represent the arguments. This needs to be
- // kept in sync with the LArgumentsElements implementation.
- *pushed_arguments_index = -environment->parameter_count();
- *pushed_arguments_count = environment->parameter_count();
-
- WriteTranslation(environment->outer(),
- translation,
- pushed_arguments_index,
- pushed_arguments_count);
- bool has_closure_id = !info()->closure().is_null() &&
- *info()->closure() != *environment->closure();
- int closure_id = has_closure_id
- ? DefineDeoptimizationLiteral(environment->closure())
- : Translation::kSelfLiteralId;
-
- switch (environment->frame_type()) {
- case JS_FUNCTION:
- translation->BeginJSFrame(environment->ast_id(), closure_id, height);
- break;
- case JS_CONSTRUCT:
- translation->BeginConstructStubFrame(closure_id, translation_size);
- break;
- case JS_GETTER:
- ASSERT(translation_size == 1);
- ASSERT(height == 0);
- translation->BeginGetterStubFrame(closure_id);
- break;
- case JS_SETTER:
- ASSERT(translation_size == 2);
- ASSERT(height == 0);
- translation->BeginSetterStubFrame(closure_id);
- break;
- case STUB:
- translation->BeginCompiledStubFrame();
- break;
- case ARGUMENTS_ADAPTOR:
- translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
- break;
- }
-
- // Inlined frames which push their arguments cause the index to be
- // bumped and another stack area to be used for materialization,
- // otherwise actual argument values are unknown for inlined frames.
- bool arguments_known = true;
- int arguments_index = *pushed_arguments_index;
- int arguments_count = *pushed_arguments_count;
- if (environment->entry() != NULL) {
- arguments_known = environment->entry()->arguments_pushed();
- arguments_index = arguments_index < 0
- ? GetStackSlotCount() : arguments_index + arguments_count;
- arguments_count = environment->entry()->arguments_count() + 1;
- if (environment->entry()->arguments_pushed()) {
- *pushed_arguments_index = arguments_index;
- *pushed_arguments_count = arguments_count;
- }
- }
-
- for (int i = 0; i < translation_size; ++i) {
- LOperand* value = environment->values()->at(i);
- // spilled_registers_ and spilled_double_registers_ are either
- // both NULL or both set.
- if (environment->spilled_registers() != NULL && value != NULL) {
- if (value->IsRegister() &&
- environment->spilled_registers()[value->index()] != NULL) {
- translation->MarkDuplicate();
- AddToTranslation(translation,
- environment->spilled_registers()[value->index()],
- environment->HasTaggedValueAt(i),
- environment->HasUint32ValueAt(i),
- arguments_known,
- arguments_index,
- arguments_count);
- } else if (
- value->IsDoubleRegister() &&
- environment->spilled_double_registers()[value->index()] != NULL) {
- translation->MarkDuplicate();
- AddToTranslation(
- translation,
- environment->spilled_double_registers()[value->index()],
- false,
- false,
- arguments_known,
- arguments_index,
- arguments_count);
- }
- }
-
- AddToTranslation(translation,
- value,
- environment->HasTaggedValueAt(i),
- environment->HasUint32ValueAt(i),
- arguments_known,
- arguments_index,
- arguments_count);
- }
-}
-
-
-void LCodeGen::AddToTranslation(Translation* translation,
- LOperand* op,
- bool is_tagged,
- bool is_uint32,
- bool arguments_known,
- int arguments_index,
- int arguments_count) {
- if (op == NULL) {
- // TODO(twuerthinger): Introduce marker operands to indicate that this value
- // is not present and must be reconstructed from the deoptimizer. Currently
- // this is only used for the arguments object.
- translation->StoreArgumentsObject(
- arguments_known, arguments_index, arguments_count);
- } else if (op->IsStackSlot()) {
- if (is_tagged) {
- translation->StoreStackSlot(op->index());
- } else if (is_uint32) {
- translation->StoreUint32StackSlot(op->index());
- } else {
- translation->StoreInt32StackSlot(op->index());
- }
- } else if (op->IsDoubleStackSlot()) {
- translation->StoreDoubleStackSlot(op->index());
- } else if (op->IsArgument()) {
- ASSERT(is_tagged);
- int src_index = GetStackSlotCount() + op->index();
- translation->StoreStackSlot(src_index);
- } else if (op->IsRegister()) {
- Register reg = ToRegister(op);
- if (is_tagged) {
- translation->StoreRegister(reg);
- } else if (is_uint32) {
- translation->StoreUint32Register(reg);
- } else {
- translation->StoreInt32Register(reg);
- }
- } else if (op->IsDoubleRegister()) {
- DoubleRegister reg = ToDoubleRegister(op);
- translation->StoreDoubleRegister(reg);
- } else if (op->IsConstantOperand()) {
- HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
- int src_index = DefineDeoptimizationLiteral(constant->handle());
- translation->StoreLiteral(src_index);
- } else {
- UNREACHABLE();
- }
-}
-
-
-void LCodeGen::CallCode(Handle<Code> code,
- RelocInfo::Mode mode,
- LInstruction* instr,
- TargetAddressStorageMode storage_mode) {
- CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT, storage_mode);
-}
-
-
-void LCodeGen::CallCodeGeneric(Handle<Code> code,
- RelocInfo::Mode mode,
- LInstruction* instr,
- SafepointMode safepoint_mode,
- TargetAddressStorageMode storage_mode) {
- ASSERT(instr != NULL);
- // Block literal pool emission to ensure nop indicating no inlined smi code
- // is in the correct position.
- Assembler::BlockConstPoolScope block_const_pool(masm());
- LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
- __ Call(code, mode, TypeFeedbackId::None(), al, storage_mode);
- RecordSafepointWithLazyDeopt(instr, safepoint_mode);
-
- // Signal that we don't inline smi code before these stubs in the
- // optimizing code generator.
- if (code->kind() == Code::BINARY_OP_IC ||
- code->kind() == Code::COMPARE_IC) {
- __ nop();
- }
-}
-
-
-void LCodeGen::CallRuntime(const Runtime::Function* function,
- int num_arguments,
- LInstruction* instr) {
- ASSERT(instr != NULL);
- LPointerMap* pointers = instr->pointer_map();
- ASSERT(pointers != NULL);
- RecordPosition(pointers->position());
-
- __ CallRuntime(function, num_arguments);
- RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
-}
-
-
-void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
- int argc,
- LInstruction* instr) {
- __ CallRuntimeSaveDoubles(id);
- RecordSafepointWithRegisters(
- instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
-}
-
-
-void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
- Safepoint::DeoptMode mode) {
- if (!environment->HasBeenRegistered()) {
- // Physical stack frame layout:
- // -x ............. -4 0 ..................................... y
- // [incoming arguments] [spill slots] [pushed outgoing arguments]
-
- // Layout of the environment:
- // 0 ..................................................... size-1
- // [parameters] [locals] [expression stack including arguments]
-
- // Layout of the translation:
- // 0 ........................................................ size - 1 + 4
- // [expression stack including arguments] [locals] [4 words] [parameters]
- // |>------------ translation_size ------------<|
-
- int frame_count = 0;
- int jsframe_count = 0;
- int args_index = 0;
- int args_count = 0;
- for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
- ++frame_count;
- if (e->frame_type() == JS_FUNCTION) {
- ++jsframe_count;
- }
- }
- Translation translation(&translations_, frame_count, jsframe_count, zone());
- WriteTranslation(environment, &translation, &args_index, &args_count);
- int deoptimization_index = deoptimizations_.length();
- int pc_offset = masm()->pc_offset();
- environment->Register(deoptimization_index,
- translation.index(),
- (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
- deoptimizations_.Add(environment, zone());
- }
-}
-
-
-void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
- RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
- ASSERT(environment->HasBeenRegistered());
- int id = environment->deoptimization_index();
- ASSERT(info()->IsOptimizing() || info()->IsStub());
- Deoptimizer::BailoutType bailout_type = info()->IsStub()
- ? Deoptimizer::LAZY
- : Deoptimizer::EAGER;
- Address entry =
- Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
- if (entry == NULL) {
- Abort("bailout was not prepared");
- return;
- }
-
- ASSERT(FLAG_deopt_every_n_times < 2); // Other values not supported on ARM.
- if (FLAG_deopt_every_n_times == 1 && info_->opt_count() == id) {
- __ Jump(entry, RelocInfo::RUNTIME_ENTRY);
- return;
- }
-
- if (FLAG_trap_on_deopt) {
- __ stop("trap_on_deopt", cc);
- }
-
- ASSERT(info()->IsStub() || frame_is_built_);
- bool needs_lazy_deopt = info()->IsStub();
- if (cc == al && frame_is_built_) {
- if (needs_lazy_deopt) {
- __ Call(entry, RelocInfo::RUNTIME_ENTRY);
- } else {
- __ Jump(entry, RelocInfo::RUNTIME_ENTRY);
- }
- } else {
- // We often have several deopts to the same entry, reuse the last
- // jump entry if this is the case.
- if (deopt_jump_table_.is_empty() ||
- (deopt_jump_table_.last().address != entry) ||
- (deopt_jump_table_.last().is_lazy_deopt != needs_lazy_deopt) ||
- (deopt_jump_table_.last().needs_frame != !frame_is_built_)) {
- JumpTableEntry table_entry(entry, !frame_is_built_, needs_lazy_deopt);
- deopt_jump_table_.Add(table_entry, zone());
- }
- __ b(cc, &deopt_jump_table_.last().label);
- }
-}
-
-
-void LCodeGen::RegisterDependentCodeForEmbeddedMaps(Handle<Code> code) {
- ZoneList<Handle<Map> > maps(1, zone());
- int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
- for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
- RelocInfo::Mode mode = it.rinfo()->rmode();
- if (mode == RelocInfo::EMBEDDED_OBJECT &&
- it.rinfo()->target_object()->IsMap()) {
- Handle<Map> map(Map::cast(it.rinfo()->target_object()));
- if (map->CanTransition()) {
- maps.Add(map, zone());
- }
- }
- }
-#ifdef VERIFY_HEAP
- // This disables verification of weak embedded maps after full GC.
- // AddDependentCode can cause a GC, which would observe the state where
- // this code is not yet in the depended code lists of the embedded maps.
- NoWeakEmbeddedMapsVerificationScope disable_verification_of_embedded_maps;
-#endif
- for (int i = 0; i < maps.length(); i++) {
- maps.at(i)->AddDependentCode(DependentCode::kWeaklyEmbeddedGroup, code);
- }
-}
-
-
-void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
- int length = deoptimizations_.length();
- if (length == 0) return;
- Handle<DeoptimizationInputData> data =
- factory()->NewDeoptimizationInputData(length, TENURED);
-
- Handle<ByteArray> translations = translations_.CreateByteArray();
- data->SetTranslationByteArray(*translations);
- data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
-
- Handle<FixedArray> literals =
- factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
- for (int i = 0; i < deoptimization_literals_.length(); i++) {
- literals->set(i, *deoptimization_literals_[i]);
- }
- data->SetLiteralArray(*literals);
-
- data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
- data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
-
- // Populate the deoptimization entries.
- for (int i = 0; i < length; i++) {
- LEnvironment* env = deoptimizations_[i];
- data->SetAstId(i, env->ast_id());
- data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
- data->SetArgumentsStackHeight(i,
- Smi::FromInt(env->arguments_stack_height()));
- data->SetPc(i, Smi::FromInt(env->pc_offset()));
- }
- code->set_deoptimization_data(*data);
-}
-
-
-int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
- int result = deoptimization_literals_.length();
- for (int i = 0; i < deoptimization_literals_.length(); ++i) {
- if (deoptimization_literals_[i].is_identical_to(literal)) return i;
- }
- deoptimization_literals_.Add(literal, zone());
- return result;
-}
-
-
-void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
- ASSERT(deoptimization_literals_.length() == 0);
-
- const ZoneList<Handle<JSFunction> >* inlined_closures =
- chunk()->inlined_closures();
-
- for (int i = 0, length = inlined_closures->length();
- i < length;
- i++) {
- DefineDeoptimizationLiteral(inlined_closures->at(i));
- }
-
- inlined_function_count_ = deoptimization_literals_.length();
-}
-
-
-void LCodeGen::RecordSafepointWithLazyDeopt(
- LInstruction* instr, SafepointMode safepoint_mode) {
- if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
- RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
- } else {
- ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 0, Safepoint::kLazyDeopt);
- }
-}
-
-
-void LCodeGen::RecordSafepoint(
- LPointerMap* pointers,
- Safepoint::Kind kind,
- int arguments,
- Safepoint::DeoptMode deopt_mode) {
- ASSERT(expected_safepoint_kind_ == kind);
-
- const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
- Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
- kind, arguments, deopt_mode);
- for (int i = 0; i < operands->length(); i++) {
- LOperand* pointer = operands->at(i);
- if (pointer->IsStackSlot()) {
- safepoint.DefinePointerSlot(pointer->index(), zone());
- } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
- safepoint.DefinePointerRegister(ToRegister(pointer), zone());
- }
- }
- if (kind & Safepoint::kWithRegisters) {
- // Register cp always contains a pointer to the context.
- safepoint.DefinePointerRegister(cp, zone());
- }
-}
-
-
-void LCodeGen::RecordSafepoint(LPointerMap* pointers,
- Safepoint::DeoptMode deopt_mode) {
- RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
-}
-
-
-void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
- LPointerMap empty_pointers(RelocInfo::kNoPosition, zone());
- RecordSafepoint(&empty_pointers, deopt_mode);
-}
-
-
-void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
- int arguments,
- Safepoint::DeoptMode deopt_mode) {
- RecordSafepoint(
- pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
-}
-
-
-void LCodeGen::RecordSafepointWithRegistersAndDoubles(
- LPointerMap* pointers,
- int arguments,
- Safepoint::DeoptMode deopt_mode) {
- RecordSafepoint(
- pointers, Safepoint::kWithRegistersAndDoubles, arguments, deopt_mode);
-}
-
-
-void LCodeGen::RecordPosition(int position) {
- if (position == RelocInfo::kNoPosition) return;
- masm()->positions_recorder()->RecordPosition(position);
-}
-
-
-void LCodeGen::DoLabel(LLabel* label) {
- if (label->is_loop_header()) {
- Comment(";;; B%d - LOOP entry", label->block_id());
- } else {
- Comment(";;; B%d", label->block_id());
- }
- __ bind(label->label());
- current_block_ = label->block_id();
- DoGap(label);
-}
-
-
-void LCodeGen::DoParallelMove(LParallelMove* move) {
- resolver_.Resolve(move);
-}
-
-
-void LCodeGen::DoGap(LGap* gap) {
- for (int i = LGap::FIRST_INNER_POSITION;
- i <= LGap::LAST_INNER_POSITION;
- i++) {
- LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
- LParallelMove* move = gap->GetParallelMove(inner_pos);
- if (move != NULL) DoParallelMove(move);
- }
-}
-
-
-void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
- DoGap(instr);
-}
-
-
-void LCodeGen::DoParameter(LParameter* instr) {
- // Nothing to do.
-}
-
-
-void LCodeGen::DoCallStub(LCallStub* instr) {
- ASSERT(ToRegister(instr->result()).is(r0));
- switch (instr->hydrogen()->major_key()) {
- case CodeStub::RegExpConstructResult: {
- RegExpConstructResultStub stub;
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- break;
- }
- case CodeStub::RegExpExec: {
- RegExpExecStub stub;
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- break;
- }
- case CodeStub::SubString: {
- SubStringStub stub;
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- break;
- }
- case CodeStub::NumberToString: {
- NumberToStringStub stub;
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- break;
- }
- case CodeStub::StringAdd: {
- StringAddStub stub(NO_STRING_ADD_FLAGS);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- break;
- }
- case CodeStub::StringCompare: {
- StringCompareStub stub;
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- break;
- }
- case CodeStub::TranscendentalCache: {
- __ ldr(r0, MemOperand(sp, 0));
- TranscendentalCacheStub stub(instr->transcendental_type(),
- TranscendentalCacheStub::TAGGED);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- break;
- }
- default:
- UNREACHABLE();
- }
-}
-
-
-void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
- // Nothing to do.
-}
-
-
-void LCodeGen::DoModI(LModI* instr) {
- if (instr->hydrogen()->HasPowerOf2Divisor()) {
- Register dividend = ToRegister(instr->left());
- Register result = ToRegister(instr->result());
-
- int32_t divisor =
- HConstant::cast(instr->hydrogen()->right())->Integer32Value();
-
- if (divisor < 0) divisor = -divisor;
-
- Label positive_dividend, done;
- __ cmp(dividend, Operand::Zero());
- __ b(pl, &positive_dividend);
- __ rsb(result, dividend, Operand::Zero());
- __ and_(result, result, Operand(divisor - 1), SetCC);
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr->environment());
- }
- __ rsb(result, result, Operand::Zero());
- __ b(&done);
- __ bind(&positive_dividend);
- __ and_(result, dividend, Operand(divisor - 1));
- __ bind(&done);
- return;
- }
-
- // These registers hold untagged 32 bit values.
- Register left = ToRegister(instr->left());
- Register right = ToRegister(instr->right());
- Register result = ToRegister(instr->result());
- Label done;
-
- if (CpuFeatures::IsSupported(SUDIV)) {
- CpuFeatures::Scope scope(SUDIV);
- // Check for x % 0.
- if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
- __ cmp(right, Operand::Zero());
- DeoptimizeIf(eq, instr->environment());
- }
-
- // Check for (kMinInt % -1).
- if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- Label left_not_min_int;
- __ cmp(left, Operand(kMinInt));
- __ b(ne, &left_not_min_int);
- __ cmp(right, Operand(-1));
- DeoptimizeIf(eq, instr->environment());
- __ bind(&left_not_min_int);
- }
-
- // For r3 = r1 % r2; we can have the following ARM code
- // sdiv r3, r1, r2
- // mls r3, r3, r2, r1
-
- __ sdiv(result, left, right);
- __ mls(result, result, right, left);
- __ cmp(result, Operand::Zero());
- __ b(ne, &done);
-
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ cmp(left, Operand::Zero());
- DeoptimizeIf(lt, instr->environment());
- }
- } else {
- Register scratch = scratch0();
- Register scratch2 = ToRegister(instr->temp());
- DwVfpRegister dividend = ToDoubleRegister(instr->temp2());
- DwVfpRegister divisor = ToDoubleRegister(instr->temp3());
- DwVfpRegister quotient = double_scratch0();
-
- ASSERT(!dividend.is(divisor));
- ASSERT(!dividend.is(quotient));
- ASSERT(!divisor.is(quotient));
- ASSERT(!scratch.is(left));
- ASSERT(!scratch.is(right));
- ASSERT(!scratch.is(result));
-
- Label vfp_modulo, both_positive, right_negative;
-
- CpuFeatures::Scope scope(VFP2);
-
- // Check for x % 0.
- if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
- __ cmp(right, Operand::Zero());
- DeoptimizeIf(eq, instr->environment());
- }
-
- __ Move(result, left);
-
- // (0 % x) must yield 0 (if x is finite, which is the case here).
- __ cmp(left, Operand::Zero());
- __ b(eq, &done);
- // Preload right in a vfp register.
- __ vmov(divisor.low(), right);
- __ b(lt, &vfp_modulo);
-
- __ cmp(left, Operand(right));
- __ b(lt, &done);
-
- // Check for (positive) power of two on the right hand side.
- __ JumpIfNotPowerOfTwoOrZeroAndNeg(right,
- scratch,
- &right_negative,
- &both_positive);
- // Perform modulo operation (scratch contains right - 1).
- __ and_(result, scratch, Operand(left));
- __ b(&done);
-
- __ bind(&right_negative);
- // Negate right. The sign of the divisor does not matter.
- __ rsb(right, right, Operand::Zero());
-
- __ bind(&both_positive);
- const int kUnfolds = 3;
- // If the right hand side is smaller than the (nonnegative)
- // left hand side, the left hand side is the result.
- // Else try a few subtractions of the left hand side.
- __ mov(scratch, left);
- for (int i = 0; i < kUnfolds; i++) {
- // Check if the left hand side is less or equal than the
- // the right hand side.
- __ cmp(scratch, Operand(right));
- __ mov(result, scratch, LeaveCC, lt);
- __ b(lt, &done);
- // If not, reduce the left hand side by the right hand
- // side and check again.
- if (i < kUnfolds - 1) __ sub(scratch, scratch, right);
- }
-
- __ bind(&vfp_modulo);
- // Load the arguments in VFP registers.
- // The divisor value is preloaded before. Be careful that 'right'
- // is only live on entry.
- __ vmov(dividend.low(), left);
- // From here on don't use right as it may have been reallocated
- // (for example to scratch2).
- right = no_reg;
-
- __ vcvt_f64_s32(dividend, dividend.low());
- __ vcvt_f64_s32(divisor, divisor.low());
-
- // We do not care about the sign of the divisor.
- __ vabs(divisor, divisor);
- // Compute the quotient and round it to a 32bit integer.
- __ vdiv(quotient, dividend, divisor);
- __ vcvt_s32_f64(quotient.low(), quotient);
- __ vcvt_f64_s32(quotient, quotient.low());
-
- // Compute the remainder in result.
- DwVfpRegister double_scratch = dividend;
- __ vmul(double_scratch, divisor, quotient);
- __ vcvt_s32_f64(double_scratch.low(), double_scratch);
- __ vmov(scratch, double_scratch.low());
-
- if (!instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ sub(result, left, scratch);
- } else {
- Label ok;
- // Check for -0.
- __ sub(scratch2, left, scratch, SetCC);
- __ b(ne, &ok);
- __ cmp(left, Operand::Zero());
- DeoptimizeIf(mi, instr->environment());
- __ bind(&ok);
- // Load the result and we are done.
- __ mov(result, scratch2);
- }
- }
- __ bind(&done);
-}
-
-
-void LCodeGen::EmitSignedIntegerDivisionByConstant(
- Register result,
- Register dividend,
- int32_t divisor,
- Register remainder,
- Register scratch,
- LEnvironment* environment) {
- ASSERT(!AreAliased(dividend, scratch, ip));
- ASSERT(LChunkBuilder::HasMagicNumberForDivisor(divisor));
-
- uint32_t divisor_abs = abs(divisor);
-
- int32_t power_of_2_factor =
- CompilerIntrinsics::CountTrailingZeros(divisor_abs);
-
- switch (divisor_abs) {
- case 0:
- DeoptimizeIf(al, environment);
- return;
-
- case 1:
- if (divisor > 0) {
- __ Move(result, dividend);
- } else {
- __ rsb(result, dividend, Operand::Zero(), SetCC);
- DeoptimizeIf(vs, environment);
- }
- // Compute the remainder.
- __ mov(remainder, Operand::Zero());
- return;
-
- default:
- if (IsPowerOf2(divisor_abs)) {
- // Branch and condition free code for integer division by a power
- // of two.
- int32_t power = WhichPowerOf2(divisor_abs);
- if (power > 1) {
- __ mov(scratch, Operand(dividend, ASR, power - 1));
- }
- __ add(scratch, dividend, Operand(scratch, LSR, 32 - power));
- __ mov(result, Operand(scratch, ASR, power));
- // Negate if necessary.
- // We don't need to check for overflow because the case '-1' is
- // handled separately.
- if (divisor < 0) {
- ASSERT(divisor != -1);
- __ rsb(result, result, Operand::Zero());
- }
- // Compute the remainder.
- if (divisor > 0) {
- __ sub(remainder, dividend, Operand(result, LSL, power));
- } else {
- __ add(remainder, dividend, Operand(result, LSL, power));
- }
- return;
- } else {
- // Use magic numbers for a few specific divisors.
- // Details and proofs can be found in:
- // - Hacker's Delight, Henry S. Warren, Jr.
- // - The PowerPC Compiler Writer’s Guide
- // and probably many others.
- //
- // We handle
- // <divisor with magic numbers> * <power of 2>
- // but not
- // <divisor with magic numbers> * <other divisor with magic numbers>
- DivMagicNumbers magic_numbers =
- DivMagicNumberFor(divisor_abs >> power_of_2_factor);
- // Branch and condition free code for integer division by a power
- // of two.
- const int32_t M = magic_numbers.M;
- const int32_t s = magic_numbers.s + power_of_2_factor;
-
- __ mov(ip, Operand(M));
- __ smull(ip, scratch, dividend, ip);
- if (M < 0) {
- __ add(scratch, scratch, Operand(dividend));
- }
- if (s > 0) {
- __ mov(scratch, Operand(scratch, ASR, s));
- }
- __ add(result, scratch, Operand(dividend, LSR, 31));
- if (divisor < 0) __ rsb(result, result, Operand::Zero());
- // Compute the remainder.
- __ mov(ip, Operand(divisor));
- // This sequence could be replaced with 'mls' when
- // it gets implemented.
- __ mul(scratch, result, ip);
- __ sub(remainder, dividend, scratch);
- }
- }
-}
-
-
-void LCodeGen::DoDivI(LDivI* instr) {
- class DeferredDivI: public LDeferredCode {
- public:
- DeferredDivI(LCodeGen* codegen, LDivI* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() {
- codegen()->DoDeferredBinaryOpStub(instr_->pointer_map(),
- instr_->left(),
- instr_->right(),
- Token::DIV);
- }
- virtual LInstruction* instr() { return instr_; }
- private:
- LDivI* instr_;
- };
-
- const Register left = ToRegister(instr->left());
- const Register right = ToRegister(instr->right());
- const Register scratch = scratch0();
- const Register result = ToRegister(instr->result());
-
- // Check for x / 0.
- if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
- __ cmp(right, Operand::Zero());
- DeoptimizeIf(eq, instr->environment());
- }
-
- // Check for (0 / -x) that will produce negative zero.
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- Label left_not_zero;
- __ cmp(left, Operand::Zero());
- __ b(ne, &left_not_zero);
- __ cmp(right, Operand::Zero());
- DeoptimizeIf(mi, instr->environment());
- __ bind(&left_not_zero);
- }
-
- // Check for (kMinInt / -1).
- if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- Label left_not_min_int;
- __ cmp(left, Operand(kMinInt));
- __ b(ne, &left_not_min_int);
- __ cmp(right, Operand(-1));
- DeoptimizeIf(eq, instr->environment());
- __ bind(&left_not_min_int);
- }
-
- Label done, deoptimize;
- // Test for a few common cases first.
- __ cmp(right, Operand(1));
- __ mov(result, left, LeaveCC, eq);
- __ b(eq, &done);
-
- __ cmp(right, Operand(2));
- __ tst(left, Operand(1), eq);
- __ mov(result, Operand(left, ASR, 1), LeaveCC, eq);
- __ b(eq, &done);
-
- __ cmp(right, Operand(4));
- __ tst(left, Operand(3), eq);
- __ mov(result, Operand(left, ASR, 2), LeaveCC, eq);
- __ b(eq, &done);
-
- // Call the stub. The numbers in r0 and r1 have
- // to be tagged to Smis. If that is not possible, deoptimize.
- DeferredDivI* deferred = new(zone()) DeferredDivI(this, instr);
-
- __ TrySmiTag(left, &deoptimize, scratch);
- __ TrySmiTag(right, &deoptimize, scratch);
-
- __ b(al, deferred->entry());
- __ bind(deferred->exit());
-
- // If the result in r0 is a Smi, untag it, else deoptimize.
- __ JumpIfNotSmi(result, &deoptimize);
- __ SmiUntag(result);
- __ b(&done);
-
- __ bind(&deoptimize);
- DeoptimizeIf(al, instr->environment());
- __ bind(&done);
-}
-
-
-void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) {
- DwVfpRegister addend = ToDoubleRegister(instr->addend());
- DwVfpRegister multiplier = ToDoubleRegister(instr->multiplier());
- DwVfpRegister multiplicand = ToDoubleRegister(instr->multiplicand());
-
- // This is computed in-place.
- ASSERT(addend.is(ToDoubleRegister(instr->result())));
-
- __ vmla(addend, multiplier, multiplicand);
-}
-
-
-void LCodeGen::DoMultiplySubD(LMultiplySubD* instr) {
- DwVfpRegister minuend = ToDoubleRegister(instr->minuend());
- DwVfpRegister multiplier = ToDoubleRegister(instr->multiplier());
- DwVfpRegister multiplicand = ToDoubleRegister(instr->multiplicand());
-
- // This is computed in-place.
- ASSERT(minuend.is(ToDoubleRegister(instr->result())));
-
- __ vmls(minuend, multiplier, multiplicand);
-}
-
-
-void LCodeGen::DoMathFloorOfDiv(LMathFloorOfDiv* instr) {
- const Register result = ToRegister(instr->result());
- const Register left = ToRegister(instr->left());
- const Register remainder = ToRegister(instr->temp());
- const Register scratch = scratch0();
-
- if (!CpuFeatures::IsSupported(SUDIV)) {
- // If the CPU doesn't support sdiv instruction, we only optimize when we
- // have magic numbers for the divisor. The standard integer division routine
- // is usually slower than transitionning to VFP.
- ASSERT(instr->right()->IsConstantOperand());
- int32_t divisor = ToInteger32(LConstantOperand::cast(instr->right()));
- ASSERT(LChunkBuilder::HasMagicNumberForDivisor(divisor));
- if (divisor < 0) {
- __ cmp(left, Operand::Zero());
- DeoptimizeIf(eq, instr->environment());
- }
- EmitSignedIntegerDivisionByConstant(result,
- left,
- divisor,
- remainder,
- scratch,
- instr->environment());
- // We performed a truncating division. Correct the result if necessary.
- __ cmp(remainder, Operand::Zero());
- __ teq(remainder, Operand(divisor), ne);
- __ sub(result, result, Operand(1), LeaveCC, mi);
- } else {
- CpuFeatures::Scope scope(SUDIV);
- const Register right = ToRegister(instr->right());
-
- // Check for x / 0.
- __ cmp(right, Operand::Zero());
- DeoptimizeIf(eq, instr->environment());
-
- // Check for (kMinInt / -1).
- if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- Label left_not_min_int;
- __ cmp(left, Operand(kMinInt));
- __ b(ne, &left_not_min_int);
- __ cmp(right, Operand(-1));
- DeoptimizeIf(eq, instr->environment());
- __ bind(&left_not_min_int);
- }
-
- // Check for (0 / -x) that will produce negative zero.
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ cmp(right, Operand::Zero());
- __ cmp(left, Operand::Zero(), mi);
- // "right" can't be null because the code would have already been
- // deoptimized. The Z flag is set only if (right < 0) and (left == 0).
- // In this case we need to deoptimize to produce a -0.
- DeoptimizeIf(eq, instr->environment());
- }
-
- Label done;
- __ sdiv(result, left, right);
- // If both operands have the same sign then we are done.
- __ eor(remainder, left, Operand(right), SetCC);
- __ b(pl, &done);
-
- // Check if the result needs to be corrected.
- __ mls(remainder, result, right, left);
- __ cmp(remainder, Operand::Zero());
- __ sub(result, result, Operand(1), LeaveCC, ne);
-
- __ bind(&done);
- }
-}
-
-
-void LCodeGen::DoDeferredBinaryOpStub(LPointerMap* pointer_map,
- LOperand* left_argument,
- LOperand* right_argument,
- Token::Value op) {
- CpuFeatures::Scope vfp_scope(VFP2);
- Register left = ToRegister(left_argument);
- Register right = ToRegister(right_argument);
-
- PushSafepointRegistersScope scope(this, Safepoint::kWithRegistersAndDoubles);
- // Move left to r1 and right to r0 for the stub call.
- if (left.is(r1)) {
- __ Move(r0, right);
- } else if (left.is(r0) && right.is(r1)) {
- __ Swap(r0, r1, r2);
- } else if (left.is(r0)) {
- ASSERT(!right.is(r1));
- __ mov(r1, r0);
- __ mov(r0, right);
- } else {
- ASSERT(!left.is(r0) && !right.is(r0));
- __ mov(r0, right);
- __ mov(r1, left);
- }
- BinaryOpStub stub(op, OVERWRITE_LEFT);
- __ CallStub(&stub);
- RecordSafepointWithRegistersAndDoubles(pointer_map,
- 0,
- Safepoint::kNoLazyDeopt);
- // Overwrite the stored value of r0 with the result of the stub.
- __ StoreToSafepointRegistersAndDoublesSlot(r0, r0);
-}
-
-
-void LCodeGen::DoMulI(LMulI* instr) {
- Register scratch = scratch0();
- Register result = ToRegister(instr->result());
- // Note that result may alias left.
- Register left = ToRegister(instr->left());
- LOperand* right_op = instr->right();
-
- bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
- bool bailout_on_minus_zero =
- instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
-
- if (right_op->IsConstantOperand() && !can_overflow) {
- // Use optimized code for specific constants.
- int32_t constant = ToInteger32(LConstantOperand::cast(right_op));
-
- if (bailout_on_minus_zero && (constant < 0)) {
- // The case of a null constant will be handled separately.
- // If constant is negative and left is null, the result should be -0.
- __ cmp(left, Operand::Zero());
- DeoptimizeIf(eq, instr->environment());
- }
-
- switch (constant) {
- case -1:
- __ rsb(result, left, Operand::Zero());
- break;
- case 0:
- if (bailout_on_minus_zero) {
- // If left is strictly negative and the constant is null, the
- // result is -0. Deoptimize if required, otherwise return 0.
- __ cmp(left, Operand::Zero());
- DeoptimizeIf(mi, instr->environment());
- }
- __ mov(result, Operand::Zero());
- break;
- case 1:
- __ Move(result, left);
- break;
- default:
- // Multiplying by powers of two and powers of two plus or minus
- // one can be done faster with shifted operands.
- // For other constants we emit standard code.
- int32_t mask = constant >> 31;
- uint32_t constant_abs = (constant + mask) ^ mask;
-
- if (IsPowerOf2(constant_abs) ||
- IsPowerOf2(constant_abs - 1) ||
- IsPowerOf2(constant_abs + 1)) {
- if (IsPowerOf2(constant_abs)) {
- int32_t shift = WhichPowerOf2(constant_abs);
- __ mov(result, Operand(left, LSL, shift));
- } else if (IsPowerOf2(constant_abs - 1)) {
- int32_t shift = WhichPowerOf2(constant_abs - 1);
- __ add(result, left, Operand(left, LSL, shift));
- } else if (IsPowerOf2(constant_abs + 1)) {
- int32_t shift = WhichPowerOf2(constant_abs + 1);
- __ rsb(result, left, Operand(left, LSL, shift));
- }
-
- // Correct the sign of the result is the constant is negative.
- if (constant < 0) __ rsb(result, result, Operand::Zero());
-
- } else {
- // Generate standard code.
- __ mov(ip, Operand(constant));
- __ mul(result, left, ip);
- }
- }
-
- } else {
- Register right = EmitLoadRegister(right_op, scratch);
- if (bailout_on_minus_zero) {
- __ orr(ToRegister(instr->temp()), left, right);
- }
-
- if (can_overflow) {
- // scratch:result = left * right.
- __ smull(result, scratch, left, right);
- __ cmp(scratch, Operand(result, ASR, 31));
- DeoptimizeIf(ne, instr->environment());
- } else {
- __ mul(result, left, right);
- }
-
- if (bailout_on_minus_zero) {
- // Bail out if the result is supposed to be negative zero.
- Label done;
- __ cmp(result, Operand::Zero());
- __ b(ne, &done);
- __ cmp(ToRegister(instr->temp()), Operand::Zero());
- DeoptimizeIf(mi, instr->environment());
- __ bind(&done);
- }
- }
-}
-
-
-void LCodeGen::DoBitI(LBitI* instr) {
- LOperand* left_op = instr->left();
- LOperand* right_op = instr->right();
- ASSERT(left_op->IsRegister());
- Register left = ToRegister(left_op);
- Register result = ToRegister(instr->result());
- Operand right(no_reg);
-
- if (right_op->IsStackSlot() || right_op->IsArgument()) {
- right = Operand(EmitLoadRegister(right_op, ip));
- } else {
- ASSERT(right_op->IsRegister() || right_op->IsConstantOperand());
- right = ToOperand(right_op);
- }
-
- switch (instr->op()) {
- case Token::BIT_AND:
- __ and_(result, left, right);
- break;
- case Token::BIT_OR:
- __ orr(result, left, right);
- break;
- case Token::BIT_XOR:
- __ eor(result, left, right);
- break;
- default:
- UNREACHABLE();
- break;
- }
-}
-
-
-void LCodeGen::DoShiftI(LShiftI* instr) {
- // Both 'left' and 'right' are "used at start" (see LCodeGen::DoShift), so
- // result may alias either of them.
- LOperand* right_op = instr->right();
- Register left = ToRegister(instr->left());
- Register result = ToRegister(instr->result());
- Register scratch = scratch0();
- if (right_op->IsRegister()) {
- // Mask the right_op operand.
- __ and_(scratch, ToRegister(right_op), Operand(0x1F));
- switch (instr->op()) {
- case Token::ROR:
- __ mov(result, Operand(left, ROR, scratch));
- break;
- case Token::SAR:
- __ mov(result, Operand(left, ASR, scratch));
- break;
- case Token::SHR:
- if (instr->can_deopt()) {
- __ mov(result, Operand(left, LSR, scratch), SetCC);
- DeoptimizeIf(mi, instr->environment());
- } else {
- __ mov(result, Operand(left, LSR, scratch));
- }
- break;
- case Token::SHL:
- __ mov(result, Operand(left, LSL, scratch));
- break;
- default:
- UNREACHABLE();
- break;
- }
- } else {
- // Mask the right_op operand.
- int value = ToInteger32(LConstantOperand::cast(right_op));
- uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
- switch (instr->op()) {
- case Token::ROR:
- if (shift_count != 0) {
- __ mov(result, Operand(left, ROR, shift_count));
- } else {
- __ Move(result, left);
- }
- break;
- case Token::SAR:
- if (shift_count != 0) {
- __ mov(result, Operand(left, ASR, shift_count));
- } else {
- __ Move(result, left);
- }
- break;
- case Token::SHR:
- if (shift_count != 0) {
- __ mov(result, Operand(left, LSR, shift_count));
- } else {
- if (instr->can_deopt()) {
- __ tst(left, Operand(0x80000000));
- DeoptimizeIf(ne, instr->environment());
- }
- __ Move(result, left);
- }
- break;
- case Token::SHL:
- if (shift_count != 0) {
- __ mov(result, Operand(left, LSL, shift_count));
- } else {
- __ Move(result, left);
- }
- break;
- default:
- UNREACHABLE();
- break;
- }
- }
-}
-
-
-void LCodeGen::DoSubI(LSubI* instr) {
- LOperand* left = instr->left();
- LOperand* right = instr->right();
- LOperand* result = instr->result();
- bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
- SBit set_cond = can_overflow ? SetCC : LeaveCC;
-
- if (right->IsStackSlot() || right->IsArgument()) {
- Register right_reg = EmitLoadRegister(right, ip);
- __ sub(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond);
- } else {
- ASSERT(right->IsRegister() || right->IsConstantOperand());
- __ sub(ToRegister(result), ToRegister(left), ToOperand(right), set_cond);
- }
-
- if (can_overflow) {
- DeoptimizeIf(vs, instr->environment());
- }
-}
-
-
-void LCodeGen::DoRSubI(LRSubI* instr) {
- LOperand* left = instr->left();
- LOperand* right = instr->right();
- LOperand* result = instr->result();
- bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
- SBit set_cond = can_overflow ? SetCC : LeaveCC;
-
- if (right->IsStackSlot() || right->IsArgument()) {
- Register right_reg = EmitLoadRegister(right, ip);
- __ rsb(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond);
- } else {
- ASSERT(right->IsRegister() || right->IsConstantOperand());
- __ rsb(ToRegister(result), ToRegister(left), ToOperand(right), set_cond);
- }
-
- if (can_overflow) {
- DeoptimizeIf(vs, instr->environment());
- }
-}
-
-
-void LCodeGen::DoConstantI(LConstantI* instr) {
- ASSERT(instr->result()->IsRegister());
- __ mov(ToRegister(instr->result()), Operand(instr->value()));
-}
-
-
-void LCodeGen::DoConstantD(LConstantD* instr) {
- ASSERT(instr->result()->IsDoubleRegister());
- DwVfpRegister result = ToDoubleRegister(instr->result());
- CpuFeatures::Scope scope(VFP2);
- double v = instr->value();
- __ Vmov(result, v, scratch0());
-}
-
-
-void LCodeGen::DoConstantT(LConstantT* instr) {
- Handle<Object> value = instr->value();
- if (value->IsSmi()) {
- __ mov(ToRegister(instr->result()), Operand(value));
- } else {
- __ LoadHeapObject(ToRegister(instr->result()),
- Handle<HeapObject>::cast(value));
- }
-}
-
-
-void LCodeGen::DoJSArrayLength(LJSArrayLength* instr) {
- Register result = ToRegister(instr->result());
- Register array = ToRegister(instr->value());
- __ ldr(result, FieldMemOperand(array, JSArray::kLengthOffset));
-}
-
-
-void LCodeGen::DoFixedArrayBaseLength(LFixedArrayBaseLength* instr) {
- Register result = ToRegister(instr->result());
- Register array = ToRegister(instr->value());
- __ ldr(result, FieldMemOperand(array, FixedArrayBase::kLengthOffset));
-}
-
-
-void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
- Register result = ToRegister(instr->result());
- Register map = ToRegister(instr->value());
- __ EnumLength(result, map);
-}
-
-
-void LCodeGen::DoElementsKind(LElementsKind* instr) {
- Register result = ToRegister(instr->result());
- Register input = ToRegister(instr->value());
-
- // Load map into |result|.
- __ ldr(result, FieldMemOperand(input, HeapObject::kMapOffset));
- // Load the map's "bit field 2" into |result|. We only need the first byte,
- // but the following bit field extraction takes care of that anyway.
- __ ldr(result, FieldMemOperand(result, Map::kBitField2Offset));
- // Retrieve elements_kind from bit field 2.
- __ ubfx(result, result, Map::kElementsKindShift, Map::kElementsKindBitCount);
-}
-
-
-void LCodeGen::DoValueOf(LValueOf* instr) {
- Register input = ToRegister(instr->value());
- Register result = ToRegister(instr->result());
- Register map = ToRegister(instr->temp());
- Label done;
-
- // If the object is a smi return the object.
- __ tst(input, Operand(kSmiTagMask));
- __ Move(result, input, eq);
- __ b(eq, &done);
-
- // If the object is not a value type, return the object.
- __ CompareObjectType(input, map, map, JS_VALUE_TYPE);
- __ Move(result, input, ne);
- __ b(ne, &done);
- __ ldr(result, FieldMemOperand(input, JSValue::kValueOffset));
-
- __ bind(&done);
-}
-
-
-void LCodeGen::DoDateField(LDateField* instr) {
- Register object = ToRegister(instr->date());
- Register result = ToRegister(instr->result());
- Register scratch = ToRegister(instr->temp());
- Smi* index = instr->index();
- Label runtime, done;
- ASSERT(object.is(result));
- ASSERT(object.is(r0));
- ASSERT(!scratch.is(scratch0()));
- ASSERT(!scratch.is(object));
-
- __ tst(object, Operand(kSmiTagMask));
- DeoptimizeIf(eq, instr->environment());
- __ CompareObjectType(object, scratch, scratch, JS_DATE_TYPE);
- DeoptimizeIf(ne, instr->environment());
-
- if (index->value() == 0) {
- __ ldr(result, FieldMemOperand(object, JSDate::kValueOffset));
- } else {
- if (index->value() < JSDate::kFirstUncachedField) {
- ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
- __ mov(scratch, Operand(stamp));
- __ ldr(scratch, MemOperand(scratch));
- __ ldr(scratch0(), FieldMemOperand(object, JSDate::kCacheStampOffset));
- __ cmp(scratch, scratch0());
- __ b(ne, &runtime);
- __ ldr(result, FieldMemOperand(object, JSDate::kValueOffset +
- kPointerSize * index->value()));
- __ jmp(&done);
- }
- __ bind(&runtime);
- __ PrepareCallCFunction(2, scratch);
- __ mov(r1, Operand(index));
- __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
- __ bind(&done);
- }
-}
-
-
-void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
- SeqStringSetCharGenerator::Generate(masm(),
- instr->encoding(),
- ToRegister(instr->string()),
- ToRegister(instr->index()),
- ToRegister(instr->value()));
-}
-
-
-void LCodeGen::DoBitNotI(LBitNotI* instr) {
- Register input = ToRegister(instr->value());
- Register result = ToRegister(instr->result());
- __ mvn(result, Operand(input));
-}
-
-
-void LCodeGen::DoThrow(LThrow* instr) {
- Register input_reg = EmitLoadRegister(instr->value(), ip);
- __ push(input_reg);
- CallRuntime(Runtime::kThrow, 1, instr);
-
- if (FLAG_debug_code) {
- __ stop("Unreachable code.");
- }
-}
-
-
-void LCodeGen::DoAddI(LAddI* instr) {
- LOperand* left = instr->left();
- LOperand* right = instr->right();
- LOperand* result = instr->result();
- bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
- SBit set_cond = can_overflow ? SetCC : LeaveCC;
-
- if (right->IsStackSlot() || right->IsArgument()) {
- Register right_reg = EmitLoadRegister(right, ip);
- __ add(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond);
- } else {
- ASSERT(right->IsRegister() || right->IsConstantOperand());
- __ add(ToRegister(result), ToRegister(left), ToOperand(right), set_cond);
- }
-
- if (can_overflow) {
- DeoptimizeIf(vs, instr->environment());
- }
-}
-
-
-void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
- LOperand* left = instr->left();
- LOperand* right = instr->right();
- HMathMinMax::Operation operation = instr->hydrogen()->operation();
- Condition condition = (operation == HMathMinMax::kMathMin) ? le : ge;
- if (instr->hydrogen()->representation().IsInteger32()) {
- Register left_reg = ToRegister(left);
- Operand right_op = (right->IsRegister() || right->IsConstantOperand())
- ? ToOperand(right)
- : Operand(EmitLoadRegister(right, ip));
- Register result_reg = ToRegister(instr->result());
- __ cmp(left_reg, right_op);
- if (!result_reg.is(left_reg)) {
- __ mov(result_reg, left_reg, LeaveCC, condition);
- }
- __ mov(result_reg, right_op, LeaveCC, NegateCondition(condition));
- } else {
- ASSERT(instr->hydrogen()->representation().IsDouble());
- CpuFeatures::Scope scope(VFP2);
- DwVfpRegister left_reg = ToDoubleRegister(left);
- DwVfpRegister right_reg = ToDoubleRegister(right);
- DwVfpRegister result_reg = ToDoubleRegister(instr->result());
- Label check_nan_left, check_zero, return_left, return_right, done;
- __ VFPCompareAndSetFlags(left_reg, right_reg);
- __ b(vs, &check_nan_left);
- __ b(eq, &check_zero);
- __ b(condition, &return_left);
- __ b(al, &return_right);
-
- __ bind(&check_zero);
- __ VFPCompareAndSetFlags(left_reg, 0.0);
- __ b(ne, &return_left); // left == right != 0.
- // At this point, both left and right are either 0 or -0.
- if (operation == HMathMinMax::kMathMin) {
- // We could use a single 'vorr' instruction here if we had NEON support.
- __ vneg(left_reg, left_reg);
- __ vsub(result_reg, left_reg, right_reg);
- __ vneg(result_reg, result_reg);
- } else {
- // Since we operate on +0 and/or -0, vadd and vand have the same effect;
- // the decision for vadd is easy because vand is a NEON instruction.
- __ vadd(result_reg, left_reg, right_reg);
- }
- __ b(al, &done);
-
- __ bind(&check_nan_left);
- __ VFPCompareAndSetFlags(left_reg, left_reg);
- __ b(vs, &return_left); // left == NaN.
- __ bind(&return_right);
- if (!right_reg.is(result_reg)) {
- __ vmov(result_reg, right_reg);
- }
- __ b(al, &done);
-
- __ bind(&return_left);
- if (!left_reg.is(result_reg)) {
- __ vmov(result_reg, left_reg);
- }
- __ bind(&done);
- }
-}
-
-
-void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
- CpuFeatures::Scope scope(VFP2);
- DwVfpRegister left = ToDoubleRegister(instr->left());
- DwVfpRegister right = ToDoubleRegister(instr->right());
- DwVfpRegister result = ToDoubleRegister(instr->result());
- switch (instr->op()) {
- case Token::ADD:
- __ vadd(result, left, right);
- break;
- case Token::SUB:
- __ vsub(result, left, right);
- break;
- case Token::MUL:
- __ vmul(result, left, right);
- break;
- case Token::DIV:
- __ vdiv(result, left, right);
- break;
- case Token::MOD: {
- // Save r0-r3 on the stack.
- __ stm(db_w, sp, r0.bit() | r1.bit() | r2.bit() | r3.bit());
-
- __ PrepareCallCFunction(0, 2, scratch0());
- __ SetCallCDoubleArguments(left, right);
- __ CallCFunction(
- ExternalReference::double_fp_operation(Token::MOD, isolate()),
- 0, 2);
- // Move the result in the double result register.
- __ GetCFunctionDoubleResult(result);
-
- // Restore r0-r3.
- __ ldm(ia_w, sp, r0.bit() | r1.bit() | r2.bit() | r3.bit());
- break;
- }
- default:
- UNREACHABLE();
- break;
- }
-}
-
-
-void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
- ASSERT(ToRegister(instr->left()).is(r1));
- ASSERT(ToRegister(instr->right()).is(r0));
- ASSERT(ToRegister(instr->result()).is(r0));
-
- BinaryOpStub stub(instr->op(), NO_OVERWRITE);
- // Block literal pool emission to ensure nop indicating no inlined smi code
- // is in the correct position.
- Assembler::BlockConstPoolScope block_const_pool(masm());
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- __ nop(); // Signals no inlined code.
-}
-
-
-int LCodeGen::GetNextEmittedBlock(int block) {
- for (int i = block + 1; i < graph()->blocks()->length(); ++i) {
- LLabel* label = chunk_->GetLabel(i);
- if (!label->HasReplacement()) return i;
- }
- return -1;
-}
-
-
-void LCodeGen::EmitBranch(int left_block, int right_block, Condition cc) {
- int next_block = GetNextEmittedBlock(current_block_);
- right_block = chunk_->LookupDestination(right_block);
- left_block = chunk_->LookupDestination(left_block);
-
- if (right_block == left_block) {
- EmitGoto(left_block);
- } else if (left_block == next_block) {
- __ b(NegateCondition(cc), chunk_->GetAssemblyLabel(right_block));
- } else if (right_block == next_block) {
- __ b(cc, chunk_->GetAssemblyLabel(left_block));
- } else {
- __ b(cc, chunk_->GetAssemblyLabel(left_block));
- __ b(chunk_->GetAssemblyLabel(right_block));
- }
-}
-
-
-void LCodeGen::DoBranch(LBranch* instr) {
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- Representation r = instr->hydrogen()->value()->representation();
- if (r.IsInteger32()) {
- Register reg = ToRegister(instr->value());
- __ cmp(reg, Operand::Zero());
- EmitBranch(true_block, false_block, ne);
- } else if (r.IsDouble()) {
- CpuFeatures::Scope scope(VFP2);
- DwVfpRegister reg = ToDoubleRegister(instr->value());
- Register scratch = scratch0();
-
- // Test the double value. Zero and NaN are false.
- __ VFPCompareAndLoadFlags(reg, 0.0, scratch);
- __ tst(scratch, Operand(kVFPZConditionFlagBit | kVFPVConditionFlagBit));
- EmitBranch(true_block, false_block, eq);
- } else {
- ASSERT(r.IsTagged());
- Register reg = ToRegister(instr->value());
- HType type = instr->hydrogen()->value()->type();
- if (type.IsBoolean()) {
- __ CompareRoot(reg, Heap::kTrueValueRootIndex);
- EmitBranch(true_block, false_block, eq);
- } else if (type.IsSmi()) {
- __ cmp(reg, Operand::Zero());
- EmitBranch(true_block, false_block, ne);
- } else {
- Label* true_label = chunk_->GetAssemblyLabel(true_block);
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
-
- ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
- // Avoid deopts in the case where we've never executed this path before.
- if (expected.IsEmpty()) expected = ToBooleanStub::all_types();
-
- if (expected.Contains(ToBooleanStub::UNDEFINED)) {
- // undefined -> false.
- __ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
- __ b(eq, false_label);
- }
- if (expected.Contains(ToBooleanStub::BOOLEAN)) {
- // Boolean -> its value.
- __ CompareRoot(reg, Heap::kTrueValueRootIndex);
- __ b(eq, true_label);
- __ CompareRoot(reg, Heap::kFalseValueRootIndex);
- __ b(eq, false_label);
- }
- if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
- // 'null' -> false.
- __ CompareRoot(reg, Heap::kNullValueRootIndex);
- __ b(eq, false_label);
- }
-
- if (expected.Contains(ToBooleanStub::SMI)) {
- // Smis: 0 -> false, all other -> true.
- __ cmp(reg, Operand::Zero());
- __ b(eq, false_label);
- __ JumpIfSmi(reg, true_label);
- } else if (expected.NeedsMap()) {
- // If we need a map later and have a Smi -> deopt.
- __ tst(reg, Operand(kSmiTagMask));
- DeoptimizeIf(eq, instr->environment());
- }
-
- const Register map = scratch0();
- if (expected.NeedsMap()) {
- __ ldr(map, FieldMemOperand(reg, HeapObject::kMapOffset));
-
- if (expected.CanBeUndetectable()) {
- // Undetectable -> false.
- __ ldrb(ip, FieldMemOperand(map, Map::kBitFieldOffset));
- __ tst(ip, Operand(1 << Map::kIsUndetectable));
- __ b(ne, false_label);
- }
- }
-
- if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
- // spec object -> true.
- __ CompareInstanceType(map, ip, FIRST_SPEC_OBJECT_TYPE);
- __ b(ge, true_label);
- }
-
- if (expected.Contains(ToBooleanStub::STRING)) {
- // String value -> false iff empty.
- Label not_string;
- __ CompareInstanceType(map, ip, FIRST_NONSTRING_TYPE);
- __ b(ge, &not_string);
- __ ldr(ip, FieldMemOperand(reg, String::kLengthOffset));
- __ cmp(ip, Operand::Zero());
- __ b(ne, true_label);
- __ b(false_label);
- __ bind(&not_string);
- }
-
- if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
- CpuFeatures::Scope scope(VFP2);
- // heap number -> false iff +0, -0, or NaN.
- DwVfpRegister dbl_scratch = double_scratch0();
- Label not_heap_number;
- __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
- __ b(ne, &not_heap_number);
- __ vldr(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
- __ VFPCompareAndSetFlags(dbl_scratch, 0.0);
- __ b(vs, false_label); // NaN -> false.
- __ b(eq, false_label); // +0, -0 -> false.
- __ b(true_label);
- __ bind(&not_heap_number);
- }
-
- // We've seen something for the first time -> deopt.
- DeoptimizeIf(al, instr->environment());
- }
- }
-}
-
-
-void LCodeGen::EmitGoto(int block) {
- block = chunk_->LookupDestination(block);
- int next_block = GetNextEmittedBlock(current_block_);
- if (block != next_block) {
- __ jmp(chunk_->GetAssemblyLabel(block));
- }
-}
-
-
-void LCodeGen::DoGoto(LGoto* instr) {
- EmitGoto(instr->block_id());
-}
-
-
-Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
- Condition cond = kNoCondition;
- switch (op) {
- case Token::EQ:
- case Token::EQ_STRICT:
- cond = eq;
- break;
- case Token::LT:
- cond = is_unsigned ? lo : lt;
- break;
- case Token::GT:
- cond = is_unsigned ? hi : gt;
- break;
- case Token::LTE:
- cond = is_unsigned ? ls : le;
- break;
- case Token::GTE:
- cond = is_unsigned ? hs : ge;
- break;
- case Token::IN:
- case Token::INSTANCEOF:
- default:
- UNREACHABLE();
- }
- return cond;
-}
-
-
-void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
- LOperand* left = instr->left();
- LOperand* right = instr->right();
- int false_block = chunk_->LookupDestination(instr->false_block_id());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- Condition cond = TokenToCondition(instr->op(), false);
-
- if (left->IsConstantOperand() && right->IsConstantOperand()) {
- // We can statically evaluate the comparison.
- double left_val = ToDouble(LConstantOperand::cast(left));
- double right_val = ToDouble(LConstantOperand::cast(right));
- int next_block =
- EvalComparison(instr->op(), left_val, right_val) ? true_block
- : false_block;
- EmitGoto(next_block);
- } else {
- if (instr->is_double()) {
- CpuFeatures::Scope scope(VFP2);
- // Compare left and right operands as doubles and load the
- // resulting flags into the normal status register.
- __ VFPCompareAndSetFlags(ToDoubleRegister(left), ToDoubleRegister(right));
- // If a NaN is involved, i.e. the result is unordered (V set),
- // jump to false block label.
- __ b(vs, chunk_->GetAssemblyLabel(false_block));
- } else {
- if (right->IsConstantOperand()) {
- __ cmp(ToRegister(left),
- Operand(ToInteger32(LConstantOperand::cast(right))));
- } else if (left->IsConstantOperand()) {
- __ cmp(ToRegister(right),
- Operand(ToInteger32(LConstantOperand::cast(left))));
- // We transposed the operands. Reverse the condition.
- cond = ReverseCondition(cond);
- } else {
- __ cmp(ToRegister(left), ToRegister(right));
- }
- }
- EmitBranch(true_block, false_block, cond);
- }
-}
-
-
-void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
- Register left = ToRegister(instr->left());
- Register right = ToRegister(instr->right());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
-
- __ cmp(left, Operand(right));
- EmitBranch(true_block, false_block, eq);
-}
-
-
-void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) {
- Register left = ToRegister(instr->left());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- __ cmp(left, Operand(instr->hydrogen()->right()));
- EmitBranch(true_block, false_block, eq);
-}
-
-
-void LCodeGen::DoIsNilAndBranch(LIsNilAndBranch* instr) {
- Register scratch = scratch0();
- Register reg = ToRegister(instr->value());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- // If the expression is known to be untagged or a smi, then it's definitely
- // not null, and it can't be a an undetectable object.
- if (instr->hydrogen()->representation().IsSpecialization() ||
- instr->hydrogen()->type().IsSmi()) {
- EmitGoto(false_block);
- return;
- }
-
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- Heap::RootListIndex nil_value = instr->nil() == kNullValue ?
- Heap::kNullValueRootIndex :
- Heap::kUndefinedValueRootIndex;
- __ LoadRoot(ip, nil_value);
- __ cmp(reg, ip);
- if (instr->kind() == kStrictEquality) {
- EmitBranch(true_block, false_block, eq);
- } else {
- Heap::RootListIndex other_nil_value = instr->nil() == kNullValue ?
- Heap::kUndefinedValueRootIndex :
- Heap::kNullValueRootIndex;
- Label* true_label = chunk_->GetAssemblyLabel(true_block);
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
- __ b(eq, true_label);
- __ LoadRoot(ip, other_nil_value);
- __ cmp(reg, ip);
- __ b(eq, true_label);
- __ JumpIfSmi(reg, false_label);
- // Check for undetectable objects by looking in the bit field in
- // the map. The object has already been smi checked.
- __ ldr(scratch, FieldMemOperand(reg, HeapObject::kMapOffset));
- __ ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
- __ tst(scratch, Operand(1 << Map::kIsUndetectable));
- EmitBranch(true_block, false_block, ne);
- }
-}
-
-
-Condition LCodeGen::EmitIsObject(Register input,
- Register temp1,
- Label* is_not_object,
- Label* is_object) {
- Register temp2 = scratch0();
- __ JumpIfSmi(input, is_not_object);
-
- __ LoadRoot(temp2, Heap::kNullValueRootIndex);
- __ cmp(input, temp2);
- __ b(eq, is_object);
-
- // Load map.
- __ ldr(temp1, FieldMemOperand(input, HeapObject::kMapOffset));
- // Undetectable objects behave like undefined.
- __ ldrb(temp2, FieldMemOperand(temp1, Map::kBitFieldOffset));
- __ tst(temp2, Operand(1 << Map::kIsUndetectable));
- __ b(ne, is_not_object);
-
- // Load instance type and check that it is in object type range.
- __ ldrb(temp2, FieldMemOperand(temp1, Map::kInstanceTypeOffset));
- __ cmp(temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
- __ b(lt, is_not_object);
- __ cmp(temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
- return le;
-}
-
-
-void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
- Register reg = ToRegister(instr->value());
- Register temp1 = ToRegister(instr->temp());
-
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
- Label* true_label = chunk_->GetAssemblyLabel(true_block);
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
-
- Condition true_cond =
- EmitIsObject(reg, temp1, false_label, true_label);
-
- EmitBranch(true_block, false_block, true_cond);
-}
-
-
-Condition LCodeGen::EmitIsString(Register input,
- Register temp1,
- Label* is_not_string) {
- __ JumpIfSmi(input, is_not_string);
- __ CompareObjectType(input, temp1, temp1, FIRST_NONSTRING_TYPE);
-
- return lt;
-}
-
-
-void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
- Register reg = ToRegister(instr->value());
- Register temp1 = ToRegister(instr->temp());
-
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
-
- Condition true_cond =
- EmitIsString(reg, temp1, false_label);
-
- EmitBranch(true_block, false_block, true_cond);
-}
-
-
-void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- Register input_reg = EmitLoadRegister(instr->value(), ip);
- __ tst(input_reg, Operand(kSmiTagMask));
- EmitBranch(true_block, false_block, eq);
-}
-
-
-void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
- Register input = ToRegister(instr->value());
- Register temp = ToRegister(instr->temp());
-
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- __ JumpIfSmi(input, chunk_->GetAssemblyLabel(false_block));
- __ ldr(temp, FieldMemOperand(input, HeapObject::kMapOffset));
- __ ldrb(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
- __ tst(temp, Operand(1 << Map::kIsUndetectable));
- EmitBranch(true_block, false_block, ne);
-}
-
-
-static Condition ComputeCompareCondition(Token::Value op) {
- switch (op) {
- case Token::EQ_STRICT:
- case Token::EQ:
- return eq;
- case Token::LT:
- return lt;
- case Token::GT:
- return gt;
- case Token::LTE:
- return le;
- case Token::GTE:
- return ge;
- default:
- UNREACHABLE();
- return kNoCondition;
- }
-}
-
-
-void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
- Token::Value op = instr->op();
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
- // This instruction also signals no smi code inlined.
- __ cmp(r0, Operand::Zero());
-
- Condition condition = ComputeCompareCondition(op);
-
- EmitBranch(true_block, false_block, condition);
-}
-
-
-static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
- InstanceType from = instr->from();
- InstanceType to = instr->to();
- if (from == FIRST_TYPE) return to;
- ASSERT(from == to || to == LAST_TYPE);
- return from;
-}
-
-
-static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
- InstanceType from = instr->from();
- InstanceType to = instr->to();
- if (from == to) return eq;
- if (to == LAST_TYPE) return hs;
- if (from == FIRST_TYPE) return ls;
- UNREACHABLE();
- return eq;
-}
-
-
-void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
- Register scratch = scratch0();
- Register input = ToRegister(instr->value());
-
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
-
- __ JumpIfSmi(input, false_label);
-
- __ CompareObjectType(input, scratch, scratch, TestType(instr->hydrogen()));
- EmitBranch(true_block, false_block, BranchCondition(instr->hydrogen()));
-}
-
-
-void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
- Register input = ToRegister(instr->value());
- Register result = ToRegister(instr->result());
-
- __ AssertString(input);
-
- __ ldr(result, FieldMemOperand(input, String::kHashFieldOffset));
- __ IndexFromHash(result, result);
-}
-
-
-void LCodeGen::DoHasCachedArrayIndexAndBranch(
- LHasCachedArrayIndexAndBranch* instr) {
- Register input = ToRegister(instr->value());
- Register scratch = scratch0();
-
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- __ ldr(scratch,
- FieldMemOperand(input, String::kHashFieldOffset));
- __ tst(scratch, Operand(String::kContainsCachedArrayIndexMask));
- EmitBranch(true_block, false_block, eq);
-}
-
-
-// Branches to a label or falls through with the answer in flags. Trashes
-// the temp registers, but not the input.
-void LCodeGen::EmitClassOfTest(Label* is_true,
- Label* is_false,
- Handle<String>class_name,
- Register input,
- Register temp,
- Register temp2) {
- ASSERT(!input.is(temp));
- ASSERT(!input.is(temp2));
- ASSERT(!temp.is(temp2));
-
- __ JumpIfSmi(input, is_false);
-
- if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Function"))) {
- // Assuming the following assertions, we can use the same compares to test
- // for both being a function type and being in the object type range.
- STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
- STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
- FIRST_SPEC_OBJECT_TYPE + 1);
- STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
- LAST_SPEC_OBJECT_TYPE - 1);
- STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
- __ CompareObjectType(input, temp, temp2, FIRST_SPEC_OBJECT_TYPE);
- __ b(lt, is_false);
- __ b(eq, is_true);
- __ cmp(temp2, Operand(LAST_SPEC_OBJECT_TYPE));
- __ b(eq, is_true);
- } else {
- // Faster code path to avoid two compares: subtract lower bound from the
- // actual type and do a signed compare with the width of the type range.
- __ ldr(temp, FieldMemOperand(input, HeapObject::kMapOffset));
- __ ldrb(temp2, FieldMemOperand(temp, Map::kInstanceTypeOffset));
- __ sub(temp2, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
- __ cmp(temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
- FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
- __ b(gt, is_false);
- }
-
- // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
- // Check if the constructor in the map is a function.
- __ ldr(temp, FieldMemOperand(temp, Map::kConstructorOffset));
-
- // Objects with a non-function constructor have class 'Object'.
- __ CompareObjectType(temp, temp2, temp2, JS_FUNCTION_TYPE);
- if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Object"))) {
- __ b(ne, is_true);
- } else {
- __ b(ne, is_false);
- }
-
- // temp now contains the constructor function. Grab the
- // instance class name from there.
- __ ldr(temp, FieldMemOperand(temp, JSFunction::kSharedFunctionInfoOffset));
- __ ldr(temp, FieldMemOperand(temp,
- SharedFunctionInfo::kInstanceClassNameOffset));
- // The class name we are testing against is internalized since it's a literal.
- // The name in the constructor is internalized because of the way the context
- // is booted. This routine isn't expected to work for random API-created
- // classes and it doesn't have to because you can't access it with natives
- // syntax. Since both sides are internalized it is sufficient to use an
- // identity comparison.
- __ cmp(temp, Operand(class_name));
- // End with the answer in flags.
-}
-
-
-void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
- Register input = ToRegister(instr->value());
- Register temp = scratch0();
- Register temp2 = ToRegister(instr->temp());
- Handle<String> class_name = instr->hydrogen()->class_name();
-
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- Label* true_label = chunk_->GetAssemblyLabel(true_block);
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
-
- EmitClassOfTest(true_label, false_label, class_name, input, temp, temp2);
-
- EmitBranch(true_block, false_block, eq);
-}
-
-
-void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
- Register reg = ToRegister(instr->value());
- Register temp = ToRegister(instr->temp());
- int true_block = instr->true_block_id();
- int false_block = instr->false_block_id();
-
- __ ldr(temp, FieldMemOperand(reg, HeapObject::kMapOffset));
- __ cmp(temp, Operand(instr->map()));
- EmitBranch(true_block, false_block, eq);
-}
-
-
-void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
- ASSERT(ToRegister(instr->left()).is(r0)); // Object is in r0.
- ASSERT(ToRegister(instr->right()).is(r1)); // Function is in r1.
-
- InstanceofStub stub(InstanceofStub::kArgsInRegisters);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
-
- __ cmp(r0, Operand::Zero());
- __ mov(r0, Operand(factory()->false_value()), LeaveCC, ne);
- __ mov(r0, Operand(factory()->true_value()), LeaveCC, eq);
-}
-
-
-void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
- class DeferredInstanceOfKnownGlobal: public LDeferredCode {
- public:
- DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
- LInstanceOfKnownGlobal* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() {
- codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
- }
- virtual LInstruction* instr() { return instr_; }
- Label* map_check() { return &map_check_; }
- private:
- LInstanceOfKnownGlobal* instr_;
- Label map_check_;
- };
-
- DeferredInstanceOfKnownGlobal* deferred;
- deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr);
-
- Label done, false_result;
- Register object = ToRegister(instr->value());
- Register temp = ToRegister(instr->temp());
- Register result = ToRegister(instr->result());
-
- ASSERT(object.is(r0));
- ASSERT(result.is(r0));
-
- // A Smi is not instance of anything.
- __ JumpIfSmi(object, &false_result);
-
- // This is the inlined call site instanceof cache. The two occurences of the
- // hole value will be patched to the last map/result pair generated by the
- // instanceof stub.
- Label cache_miss;
- Register map = temp;
- __ ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
- {
- // Block constant pool emission to ensure the positions of instructions are
- // as expected by the patcher. See InstanceofStub::Generate().
- Assembler::BlockConstPoolScope block_const_pool(masm());
- __ bind(deferred->map_check()); // Label for calculating code patching.
- // We use Factory::the_hole_value() on purpose instead of loading from the
- // root array to force relocation to be able to later patch with
- // the cached map.
- PredictableCodeSizeScope predictable(masm_, 5 * Assembler::kInstrSize);
- Handle<JSGlobalPropertyCell> cell =
- factory()->NewJSGlobalPropertyCell(factory()->the_hole_value());
- __ mov(ip, Operand(Handle<Object>(cell)));
- __ ldr(ip, FieldMemOperand(ip, JSGlobalPropertyCell::kValueOffset));
- __ cmp(map, Operand(ip));
- __ b(ne, &cache_miss);
- // We use Factory::the_hole_value() on purpose instead of loading from the
- // root array to force relocation to be able to later patch
- // with true or false.
- __ mov(result, Operand(factory()->the_hole_value()));
- }
- __ b(&done);
-
- // The inlined call site cache did not match. Check null and string before
- // calling the deferred code.
- __ bind(&cache_miss);
- // Null is not instance of anything.
- __ LoadRoot(ip, Heap::kNullValueRootIndex);
- __ cmp(object, Operand(ip));
- __ b(eq, &false_result);
-
- // String values is not instance of anything.
- Condition is_string = masm_->IsObjectStringType(object, temp);
- __ b(is_string, &false_result);
-
- // Go to the deferred code.
- __ b(deferred->entry());
-
- __ bind(&false_result);
- __ LoadRoot(result, Heap::kFalseValueRootIndex);
-
- // Here result has either true or false. Deferred code also produces true or
- // false object.
- __ bind(deferred->exit());
- __ bind(&done);
-}
-
-
-void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
- Label* map_check) {
- Register result = ToRegister(instr->result());
- ASSERT(result.is(r0));
-
- InstanceofStub::Flags flags = InstanceofStub::kNoFlags;
- flags = static_cast<InstanceofStub::Flags>(
- flags | InstanceofStub::kArgsInRegisters);
- flags = static_cast<InstanceofStub::Flags>(
- flags | InstanceofStub::kCallSiteInlineCheck);
- flags = static_cast<InstanceofStub::Flags>(
- flags | InstanceofStub::kReturnTrueFalseObject);
- InstanceofStub stub(flags);
-
- PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
-
- // Get the temp register reserved by the instruction. This needs to be r4 as
- // its slot of the pushing of safepoint registers is used to communicate the
- // offset to the location of the map check.
- Register temp = ToRegister(instr->temp());
- ASSERT(temp.is(r4));
- __ LoadHeapObject(InstanceofStub::right(), instr->function());
- static const int kAdditionalDelta = 5;
- // Make sure that code size is predicable, since we use specific constants
- // offsets in the code to find embedded values..
- PredictableCodeSizeScope predictable(masm_, 6 * Assembler::kInstrSize);
- int delta = masm_->InstructionsGeneratedSince(map_check) + kAdditionalDelta;
- Label before_push_delta;
- __ bind(&before_push_delta);
- __ BlockConstPoolFor(kAdditionalDelta);
- __ mov(temp, Operand(delta * kPointerSize));
- // The mov above can generate one or two instructions. The delta was computed
- // for two instructions, so we need to pad here in case of one instruction.
- if (masm_->InstructionsGeneratedSince(&before_push_delta) != 2) {
- ASSERT_EQ(1, masm_->InstructionsGeneratedSince(&before_push_delta));
- __ nop();
- }
- __ StoreToSafepointRegisterSlot(temp, temp);
- CallCodeGeneric(stub.GetCode(isolate()),
- RelocInfo::CODE_TARGET,
- instr,
- RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
- LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
- safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
- // Put the result value into the result register slot and
- // restore all registers.
- __ StoreToSafepointRegisterSlot(result, result);
-}
-
-
-void LCodeGen::DoInstanceSize(LInstanceSize* instr) {
- Register object = ToRegister(instr->object());
- Register result = ToRegister(instr->result());
- __ ldr(result, FieldMemOperand(object, HeapObject::kMapOffset));
- __ ldrb(result, FieldMemOperand(result, Map::kInstanceSizeOffset));
-}
-
-
-void LCodeGen::DoCmpT(LCmpT* instr) {
- Token::Value op = instr->op();
-
- Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
- // This instruction also signals no smi code inlined.
- __ cmp(r0, Operand::Zero());
-
- Condition condition = ComputeCompareCondition(op);
- __ LoadRoot(ToRegister(instr->result()),
- Heap::kTrueValueRootIndex,
- condition);
- __ LoadRoot(ToRegister(instr->result()),
- Heap::kFalseValueRootIndex,
- NegateCondition(condition));
-}
-
-
-void LCodeGen::DoReturn(LReturn* instr) {
- if (FLAG_trace && info()->IsOptimizing()) {
- // Push the return value on the stack as the parameter.
- // Runtime::TraceExit returns its parameter in r0.
- __ push(r0);
- __ CallRuntime(Runtime::kTraceExit, 1);
- }
- if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(VFP2)) {
- CpuFeatures::Scope scope(VFP2);
- ASSERT(NeedsEagerFrame());
- BitVector* doubles = chunk()->allocated_double_registers();
- BitVector::Iterator save_iterator(doubles);
- int count = 0;
- while (!save_iterator.Done()) {
- __ vldr(DwVfpRegister::FromAllocationIndex(save_iterator.Current()),
- MemOperand(sp, count * kDoubleSize));
- save_iterator.Advance();
- count++;
- }
- }
- if (NeedsEagerFrame()) {
- int32_t sp_delta = (GetParameterCount() + 1) * kPointerSize;
- __ mov(sp, fp);
- __ ldm(ia_w, sp, fp.bit() | lr.bit());
- if (!info()->IsStub()) {
- __ add(sp, sp, Operand(sp_delta));
- }
- }
- __ Jump(lr);
-}
-
-
-void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
- Register result = ToRegister(instr->result());
- __ mov(ip, Operand(Handle<Object>(instr->hydrogen()->cell())));
- __ ldr(result, FieldMemOperand(ip, JSGlobalPropertyCell::kValueOffset));
- if (instr->hydrogen()->RequiresHoleCheck()) {
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ cmp(result, ip);
- DeoptimizeIf(eq, instr->environment());
- }
-}
-
-
-void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
- ASSERT(ToRegister(instr->global_object()).is(r0));
- ASSERT(ToRegister(instr->result()).is(r0));
-
- __ mov(r2, Operand(instr->name()));
- RelocInfo::Mode mode = instr->for_typeof() ? RelocInfo::CODE_TARGET
- : RelocInfo::CODE_TARGET_CONTEXT;
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallCode(ic, mode, instr);
-}
-
-
-void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
- Register value = ToRegister(instr->value());
- Register cell = scratch0();
-
- // Load the cell.
- __ mov(cell, Operand(instr->hydrogen()->cell()));
-
- // If the cell we are storing to contains the hole it could have
- // been deleted from the property dictionary. In that case, we need
- // to update the property details in the property dictionary to mark
- // it as no longer deleted.
- if (instr->hydrogen()->RequiresHoleCheck()) {
- // We use a temp to check the payload (CompareRoot might clobber ip).
- Register payload = ToRegister(instr->temp());
- __ ldr(payload, FieldMemOperand(cell, JSGlobalPropertyCell::kValueOffset));
- __ CompareRoot(payload, Heap::kTheHoleValueRootIndex);
- DeoptimizeIf(eq, instr->environment());
- }
-
- // Store the value.
- __ str(value, FieldMemOperand(cell, JSGlobalPropertyCell::kValueOffset));
- // Cells are always rescanned, so no write barrier here.
-}
-
-
-void LCodeGen::DoStoreGlobalGeneric(LStoreGlobalGeneric* instr) {
- ASSERT(ToRegister(instr->global_object()).is(r1));
- ASSERT(ToRegister(instr->value()).is(r0));
-
- __ mov(r2, Operand(instr->name()));
- Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
- ? isolate()->builtins()->StoreIC_Initialize_Strict()
- : isolate()->builtins()->StoreIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
-}
-
-
-void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
- Register context = ToRegister(instr->context());
- Register result = ToRegister(instr->result());
- __ ldr(result, ContextOperand(context, instr->slot_index()));
- if (instr->hydrogen()->RequiresHoleCheck()) {
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ cmp(result, ip);
- if (instr->hydrogen()->DeoptimizesOnHole()) {
- DeoptimizeIf(eq, instr->environment());
- } else {
- __ mov(result, Operand(factory()->undefined_value()), LeaveCC, eq);
- }
- }
-}
-
-
-void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
- Register context = ToRegister(instr->context());
- Register value = ToRegister(instr->value());
- Register scratch = scratch0();
- MemOperand target = ContextOperand(context, instr->slot_index());
-
- Label skip_assignment;
-
- if (instr->hydrogen()->RequiresHoleCheck()) {
- __ ldr(scratch, target);
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ cmp(scratch, ip);
- if (instr->hydrogen()->DeoptimizesOnHole()) {
- DeoptimizeIf(eq, instr->environment());
- } else {
- __ b(ne, &skip_assignment);
- }
- }
-
- __ str(value, target);
- if (instr->hydrogen()->NeedsWriteBarrier()) {
- HType type = instr->hydrogen()->value()->type();
- SmiCheck check_needed =
- type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
- __ RecordWriteContextSlot(context,
- target.offset(),
- value,
- scratch,
- kLRHasBeenSaved,
- kSaveFPRegs,
- EMIT_REMEMBERED_SET,
- check_needed);
- }
-
- __ bind(&skip_assignment);
-}
-
-
-void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
- Register object = ToRegister(instr->object());
- Register result = ToRegister(instr->result());
- if (instr->hydrogen()->is_in_object()) {
- __ ldr(result, FieldMemOperand(object, instr->hydrogen()->offset()));
- } else {
- __ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
- __ ldr(result, FieldMemOperand(result, instr->hydrogen()->offset()));
- }
-}
-
-
-void LCodeGen::EmitLoadFieldOrConstantFunction(Register result,
- Register object,
- Handle<Map> type,
- Handle<String> name,
- LEnvironment* env) {
- LookupResult lookup(isolate());
- type->LookupDescriptor(NULL, *name, &lookup);
- ASSERT(lookup.IsFound() || lookup.IsCacheable());
- if (lookup.IsField()) {
- int index = lookup.GetLocalFieldIndexFromMap(*type);
- int offset = index * kPointerSize;
- if (index < 0) {
- // Negative property indices are in-object properties, indexed
- // from the end of the fixed part of the object.
- __ ldr(result, FieldMemOperand(object, offset + type->instance_size()));
- } else {
- // Non-negative property indices are in the properties array.
- __ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
- __ ldr(result, FieldMemOperand(result, offset + FixedArray::kHeaderSize));
- }
- } else if (lookup.IsConstantFunction()) {
- Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*type));
- __ LoadHeapObject(result, function);
- } else {
- // Negative lookup.
- // Check prototypes.
- Handle<HeapObject> current(HeapObject::cast((*type)->prototype()));
- Heap* heap = type->GetHeap();
- while (*current != heap->null_value()) {
- __ LoadHeapObject(result, current);
- __ ldr(result, FieldMemOperand(result, HeapObject::kMapOffset));
- __ cmp(result, Operand(Handle<Map>(current->map())));
- DeoptimizeIf(ne, env);
- current =
- Handle<HeapObject>(HeapObject::cast(current->map()->prototype()));
- }
- __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
- }
-}
-
-
-void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) {
- Register object = ToRegister(instr->object());
- Register result = ToRegister(instr->result());
- Register object_map = scratch0();
-
- int map_count = instr->hydrogen()->types()->length();
- bool need_generic = instr->hydrogen()->need_generic();
-
- if (map_count == 0 && !need_generic) {
- DeoptimizeIf(al, instr->environment());
- return;
- }
- Handle<String> name = instr->hydrogen()->name();
- Label done;
- __ ldr(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
- for (int i = 0; i < map_count; ++i) {
- bool last = (i == map_count - 1);
- Handle<Map> map = instr->hydrogen()->types()->at(i);
- Label check_passed;
- __ CompareMap(
- object_map, map, &check_passed, ALLOW_ELEMENT_TRANSITION_MAPS);
- if (last && !need_generic) {
- DeoptimizeIf(ne, instr->environment());
- __ bind(&check_passed);
- EmitLoadFieldOrConstantFunction(
- result, object, map, name, instr->environment());
- } else {
- Label next;
- __ b(ne, &next);
- __ bind(&check_passed);
- EmitLoadFieldOrConstantFunction(
- result, object, map, name, instr->environment());
- __ b(&done);
- __ bind(&next);
- }
- }
- if (need_generic) {
- __ mov(r2, Operand(name));
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
- }
- __ bind(&done);
-}
-
-
-void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
- ASSERT(ToRegister(instr->object()).is(r0));
- ASSERT(ToRegister(instr->result()).is(r0));
-
- // Name is always in r2.
- __ mov(r2, Operand(instr->name()));
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
-}
-
-
-void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
- Register scratch = scratch0();
- Register function = ToRegister(instr->function());
- Register result = ToRegister(instr->result());
-
- // Check that the function really is a function. Load map into the
- // result register.
- __ CompareObjectType(function, result, scratch, JS_FUNCTION_TYPE);
- DeoptimizeIf(ne, instr->environment());
-
- // Make sure that the function has an instance prototype.
- Label non_instance;
- __ ldrb(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
- __ tst(scratch, Operand(1 << Map::kHasNonInstancePrototype));
- __ b(ne, &non_instance);
-
- // Get the prototype or initial map from the function.
- __ ldr(result,
- FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
-
- // Check that the function has a prototype or an initial map.
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ cmp(result, ip);
- DeoptimizeIf(eq, instr->environment());
-
- // If the function does not have an initial map, we're done.
- Label done;
- __ CompareObjectType(result, scratch, scratch, MAP_TYPE);
- __ b(ne, &done);
-
- // Get the prototype from the initial map.
- __ ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
- __ jmp(&done);
-
- // Non-instance prototype: Fetch prototype from constructor field
- // in initial map.
- __ bind(&non_instance);
- __ ldr(result, FieldMemOperand(result, Map::kConstructorOffset));
-
- // All done.
- __ bind(&done);
-}
-
-
-void LCodeGen::DoLoadElements(LLoadElements* instr) {
- Register result = ToRegister(instr->result());
- Register input = ToRegister(instr->object());
- Register scratch = scratch0();
-
- __ ldr(result, FieldMemOperand(input, JSObject::kElementsOffset));
- if (FLAG_debug_code) {
- Label done, fail;
- __ ldr(scratch, FieldMemOperand(result, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
- __ cmp(scratch, ip);
- __ b(eq, &done);
- __ LoadRoot(ip, Heap::kFixedCOWArrayMapRootIndex);
- __ cmp(scratch, ip);
- __ b(eq, &done);
- // |scratch| still contains |input|'s map.
- __ ldr(scratch, FieldMemOperand(scratch, Map::kBitField2Offset));
- __ ubfx(scratch, scratch, Map::kElementsKindShift,
- Map::kElementsKindBitCount);
- __ cmp(scratch, Operand(GetInitialFastElementsKind()));
- __ b(lt, &fail);
- __ cmp(scratch, Operand(TERMINAL_FAST_ELEMENTS_KIND));
- __ b(le, &done);
- __ cmp(scratch, Operand(FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND));
- __ b(lt, &fail);
- __ cmp(scratch, Operand(LAST_EXTERNAL_ARRAY_ELEMENTS_KIND));
- __ b(le, &done);
- __ bind(&fail);
- __ Abort("Check for fast or external elements failed.");
- __ bind(&done);
- }
-}
-
-
-void LCodeGen::DoLoadExternalArrayPointer(
- LLoadExternalArrayPointer* instr) {
- Register to_reg = ToRegister(instr->result());
- Register from_reg = ToRegister(instr->object());
- __ ldr(to_reg, FieldMemOperand(from_reg,
- ExternalArray::kExternalPointerOffset));
-}
-
-
-void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
- Register arguments = ToRegister(instr->arguments());
- Register length = ToRegister(instr->length());
- Register index = ToRegister(instr->index());
- Register result = ToRegister(instr->result());
- // There are two words between the frame pointer and the last argument.
- // Subtracting from length accounts for one of them add one more.
- __ sub(length, length, index);
- __ add(length, length, Operand(1));
- __ ldr(result, MemOperand(arguments, length, LSL, kPointerSizeLog2));
-}
-
-
-void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
- Register external_pointer = ToRegister(instr->elements());
- Register key = no_reg;
- ElementsKind elements_kind = instr->elements_kind();
- bool key_is_constant = instr->key()->IsConstantOperand();
- int constant_key = 0;
- if (key_is_constant) {
- constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
- if (constant_key & 0xF0000000) {
- Abort("array index constant value too big.");
- }
- } else {
- key = ToRegister(instr->key());
- }
- int element_size_shift = ElementsKindToShiftSize(elements_kind);
- int shift_size = (instr->hydrogen()->key()->representation().IsTagged())
- ? (element_size_shift - kSmiTagSize) : element_size_shift;
- int additional_offset = instr->additional_index() << element_size_shift;
-
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
- elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
- DwVfpRegister result = ToDoubleRegister(instr->result());
- Operand operand = key_is_constant
- ? Operand(constant_key << element_size_shift)
- : Operand(key, LSL, shift_size);
- __ add(scratch0(), external_pointer, operand);
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatures::Scope scope(VFP2);
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
- __ vldr(kScratchDoubleReg.low(), scratch0(), additional_offset);
- __ vcvt_f64_f32(result, kScratchDoubleReg.low());
- } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
- __ vldr(result, scratch0(), additional_offset);
- }
- } else {
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
- Register value = external_pointer;
- __ ldr(value, MemOperand(scratch0(), additional_offset));
- __ and_(sfpd_lo, value, Operand(kBinary32MantissaMask));
-
- __ mov(scratch0(), Operand(value, LSR, kBinary32MantissaBits));
- __ and_(scratch0(), scratch0(),
- Operand(kBinary32ExponentMask >> kBinary32MantissaBits));
-
- Label exponent_rebiased;
- __ teq(scratch0(), Operand(0x00));
- __ b(eq, &exponent_rebiased);
-
- __ teq(scratch0(), Operand(0xff));
- __ mov(scratch0(), Operand(0x7ff), LeaveCC, eq);
- __ b(eq, &exponent_rebiased);
-
- // Rebias exponent.
- __ add(scratch0(),
- scratch0(),
- Operand(-kBinary32ExponentBias + HeapNumber::kExponentBias));
-
- __ bind(&exponent_rebiased);
- __ and_(sfpd_hi, value, Operand(kBinary32SignMask));
- __ orr(sfpd_hi, sfpd_hi,
- Operand(scratch0(), LSL, HeapNumber::kMantissaBitsInTopWord));
-
- // Shift mantissa.
- static const int kMantissaShiftForHiWord =
- kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord;
-
- static const int kMantissaShiftForLoWord =
- kBitsPerInt - kMantissaShiftForHiWord;
-
- __ orr(sfpd_hi, sfpd_hi,
- Operand(sfpd_lo, LSR, kMantissaShiftForHiWord));
- __ mov(sfpd_lo, Operand(sfpd_lo, LSL, kMantissaShiftForLoWord));
-
- } else {
- __ ldr(sfpd_lo, MemOperand(scratch0(), additional_offset));
- __ ldr(sfpd_hi, MemOperand(scratch0(),
- additional_offset + kPointerSize));
- }
- }
- } else {
- Register result = ToRegister(instr->result());
- MemOperand mem_operand = PrepareKeyedOperand(
- key, external_pointer, key_is_constant, constant_key,
- element_size_shift, shift_size,
- instr->additional_index(), additional_offset);
- switch (elements_kind) {
- case EXTERNAL_BYTE_ELEMENTS:
- __ ldrsb(result, mem_operand);
- break;
- case EXTERNAL_PIXEL_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- __ ldrb(result, mem_operand);
- break;
- case EXTERNAL_SHORT_ELEMENTS:
- __ ldrsh(result, mem_operand);
- break;
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- __ ldrh(result, mem_operand);
- break;
- case EXTERNAL_INT_ELEMENTS:
- __ ldr(result, mem_operand);
- break;
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- __ ldr(result, mem_operand);
- if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
- __ cmp(result, Operand(0x80000000));
- DeoptimizeIf(cs, instr->environment());
- }
- break;
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_SMI_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- UNREACHABLE();
- break;
- }
- }
-}
-
-
-void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
- Register elements = ToRegister(instr->elements());
- bool key_is_constant = instr->key()->IsConstantOperand();
- Register key = no_reg;
- DwVfpRegister result = ToDoubleRegister(instr->result());
- Register scratch = scratch0();
-
- int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
- int shift_size = (instr->hydrogen()->key()->representation().IsTagged())
- ? (element_size_shift - kSmiTagSize) : element_size_shift;
- int constant_key = 0;
- if (key_is_constant) {
- constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
- if (constant_key & 0xF0000000) {
- Abort("array index constant value too big.");
- }
- } else {
- key = ToRegister(instr->key());
- }
-
- int base_offset = (FixedDoubleArray::kHeaderSize - kHeapObjectTag) +
- ((constant_key + instr->additional_index()) << element_size_shift);
- if (!key_is_constant) {
- __ add(elements, elements, Operand(key, LSL, shift_size));
- }
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatures::Scope scope(VFP2);
- __ add(elements, elements, Operand(base_offset));
- __ vldr(result, elements, 0);
- if (instr->hydrogen()->RequiresHoleCheck()) {
- __ ldr(scratch, MemOperand(elements, sizeof(kHoleNanLower32)));
- __ cmp(scratch, Operand(kHoleNanUpper32));
- DeoptimizeIf(eq, instr->environment());
- }
- } else {
- __ ldr(sfpd_hi, MemOperand(elements, base_offset + kPointerSize));
- __ ldr(sfpd_lo, MemOperand(elements, base_offset));
- if (instr->hydrogen()->RequiresHoleCheck()) {
- ASSERT(kPointerSize == sizeof(kHoleNanLower32));
- __ cmp(sfpd_hi, Operand(kHoleNanUpper32));
- DeoptimizeIf(eq, instr->environment());
- }
- }
-}
-
-
-void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
- Register elements = ToRegister(instr->elements());
- Register result = ToRegister(instr->result());
- Register scratch = scratch0();
- Register store_base = scratch;
- int offset = 0;
-
- if (instr->key()->IsConstantOperand()) {
- LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
- offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) +
- instr->additional_index());
- store_base = elements;
- } else {
- Register key = EmitLoadRegister(instr->key(), scratch0());
- // Even though the HLoadKeyed instruction forces the input
- // representation for the key to be an integer, the input gets replaced
- // during bound check elimination with the index argument to the bounds
- // check, which can be tagged, so that case must be handled here, too.
- if (instr->hydrogen()->key()->representation().IsTagged()) {
- __ add(scratch, elements,
- Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize));
- } else {
- __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2));
- }
- offset = FixedArray::OffsetOfElementAt(instr->additional_index());
- }
- __ ldr(result, FieldMemOperand(store_base, offset));
-
- // Check for the hole value.
- if (instr->hydrogen()->RequiresHoleCheck()) {
- if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
- __ tst(result, Operand(kSmiTagMask));
- DeoptimizeIf(ne, instr->environment());
- } else {
- __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
- __ cmp(result, scratch);
- DeoptimizeIf(eq, instr->environment());
- }
- }
-}
-
-
-void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
- if (instr->is_external()) {
- DoLoadKeyedExternalArray(instr);
- } else if (instr->hydrogen()->representation().IsDouble()) {
- DoLoadKeyedFixedDoubleArray(instr);
- } else {
- DoLoadKeyedFixedArray(instr);
- }
-}
-
-
-MemOperand LCodeGen::PrepareKeyedOperand(Register key,
- Register base,
- bool key_is_constant,
- int constant_key,
- int element_size,
- int shift_size,
- int additional_index,
- int additional_offset) {
- if (additional_index != 0 && !key_is_constant) {
- additional_index *= 1 << (element_size - shift_size);
- __ add(scratch0(), key, Operand(additional_index));
- }
-
- if (key_is_constant) {
- return MemOperand(base,
- (constant_key << element_size) + additional_offset);
- }
-
- if (additional_index == 0) {
- if (shift_size >= 0) {
- return MemOperand(base, key, LSL, shift_size);
- } else {
- ASSERT_EQ(-1, shift_size);
- return MemOperand(base, key, LSR, 1);
- }
- }
-
- if (shift_size >= 0) {
- return MemOperand(base, scratch0(), LSL, shift_size);
- } else {
- ASSERT_EQ(-1, shift_size);
- return MemOperand(base, scratch0(), LSR, 1);
- }
-}
-
-
-void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
- ASSERT(ToRegister(instr->object()).is(r1));
- ASSERT(ToRegister(instr->key()).is(r0));
-
- Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
-}
-
-
-void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
- Register scratch = scratch0();
- Register result = ToRegister(instr->result());
-
- if (instr->hydrogen()->from_inlined()) {
- __ sub(result, sp, Operand(2 * kPointerSize));
- } else {
- // Check if the calling frame is an arguments adaptor frame.
- Label done, adapted;
- __ ldr(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ ldr(result, MemOperand(scratch, StandardFrameConstants::kContextOffset));
- __ cmp(result, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-
- // Result is the frame pointer for the frame if not adapted and for the real
- // frame below the adaptor frame if adapted.
- __ mov(result, fp, LeaveCC, ne);
- __ mov(result, scratch, LeaveCC, eq);
- }
-}
-
-
-void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
- Register elem = ToRegister(instr->elements());
- Register result = ToRegister(instr->result());
-
- Label done;
-
- // If no arguments adaptor frame the number of arguments is fixed.
- __ cmp(fp, elem);
- __ mov(result, Operand(scope()->num_parameters()));
- __ b(eq, &done);
-
- // Arguments adaptor frame present. Get argument length from there.
- __ ldr(result, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ ldr(result,
- MemOperand(result, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiUntag(result);
-
- // Argument length is in result register.
- __ bind(&done);
-}
-
-
-void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
- Register receiver = ToRegister(instr->receiver());
- Register function = ToRegister(instr->function());
- Register scratch = scratch0();
-
- // If the receiver is null or undefined, we have to pass the global
- // object as a receiver to normal functions. Values have to be
- // passed unchanged to builtins and strict-mode functions.
- Label global_object, receiver_ok;
-
- // Do not transform the receiver to object for strict mode
- // functions.
- __ ldr(scratch,
- FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
- __ ldr(scratch,
- FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
- __ tst(scratch,
- Operand(1 << (SharedFunctionInfo::kStrictModeFunction + kSmiTagSize)));
- __ b(ne, &receiver_ok);
-
- // Do not transform the receiver to object for builtins.
- __ tst(scratch, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
- __ b(ne, &receiver_ok);
-
- // Normal function. Replace undefined or null with global receiver.
- __ LoadRoot(scratch, Heap::kNullValueRootIndex);
- __ cmp(receiver, scratch);
- __ b(eq, &global_object);
- __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
- __ cmp(receiver, scratch);
- __ b(eq, &global_object);
-
- // Deoptimize if the receiver is not a JS object.
- __ tst(receiver, Operand(kSmiTagMask));
- DeoptimizeIf(eq, instr->environment());
- __ CompareObjectType(receiver, scratch, scratch, FIRST_SPEC_OBJECT_TYPE);
- DeoptimizeIf(lt, instr->environment());
- __ jmp(&receiver_ok);
-
- __ bind(&global_object);
- __ ldr(receiver, GlobalObjectOperand());
- __ ldr(receiver,
- FieldMemOperand(receiver, JSGlobalObject::kGlobalReceiverOffset));
- __ bind(&receiver_ok);
-}
-
-
-void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
- Register receiver = ToRegister(instr->receiver());
- Register function = ToRegister(instr->function());
- Register length = ToRegister(instr->length());
- Register elements = ToRegister(instr->elements());
- Register scratch = scratch0();
- ASSERT(receiver.is(r0)); // Used for parameter count.
- ASSERT(function.is(r1)); // Required by InvokeFunction.
- ASSERT(ToRegister(instr->result()).is(r0));
-
- // Copy the arguments to this function possibly from the
- // adaptor frame below it.
- const uint32_t kArgumentsLimit = 1 * KB;
- __ cmp(length, Operand(kArgumentsLimit));
- DeoptimizeIf(hi, instr->environment());
-
- // Push the receiver and use the register to keep the original
- // number of arguments.
- __ push(receiver);
- __ mov(receiver, length);
- // The arguments are at a one pointer size offset from elements.
- __ add(elements, elements, Operand(1 * kPointerSize));
-
- // Loop through the arguments pushing them onto the execution
- // stack.
- Label invoke, loop;
- // length is a small non-negative integer, due to the test above.
- __ cmp(length, Operand::Zero());
- __ b(eq, &invoke);
- __ bind(&loop);
- __ ldr(scratch, MemOperand(elements, length, LSL, 2));
- __ push(scratch);
- __ sub(length, length, Operand(1), SetCC);
- __ b(ne, &loop);
-
- __ bind(&invoke);
- ASSERT(instr->HasPointerMap());
- LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
- SafepointGenerator safepoint_generator(
- this, pointers, Safepoint::kLazyDeopt);
- // The number of arguments is stored in receiver which is r0, as expected
- // by InvokeFunction.
- ParameterCount actual(receiver);
- __ InvokeFunction(function, actual, CALL_FUNCTION,
- safepoint_generator, CALL_AS_METHOD);
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
-}
-
-
-void LCodeGen::DoPushArgument(LPushArgument* instr) {
- LOperand* argument = instr->value();
- if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
- Abort("DoPushArgument not implemented for double type.");
- } else {
- Register argument_reg = EmitLoadRegister(argument, ip);
- __ push(argument_reg);
- }
-}
-
-
-void LCodeGen::DoDrop(LDrop* instr) {
- __ Drop(instr->count());
-}
-
-
-void LCodeGen::DoThisFunction(LThisFunction* instr) {
- Register result = ToRegister(instr->result());
- __ ldr(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
-}
-
-
-void LCodeGen::DoContext(LContext* instr) {
- // If there is a non-return use, the context must be moved to a register.
- Register result = ToRegister(instr->result());
- for (HUseIterator it(instr->hydrogen()->uses()); !it.Done(); it.Advance()) {
- if (!it.value()->IsReturn()) {
- __ mov(result, cp);
- return;
- }
- }
-}
-
-
-void LCodeGen::DoOuterContext(LOuterContext* instr) {
- Register context = ToRegister(instr->context());
- Register result = ToRegister(instr->result());
- __ ldr(result,
- MemOperand(context, Context::SlotOffset(Context::PREVIOUS_INDEX)));
-}
-
-
-void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
- __ push(cp); // The context is the first argument.
- __ LoadHeapObject(scratch0(), instr->hydrogen()->pairs());
- __ push(scratch0());
- __ mov(scratch0(), Operand(Smi::FromInt(instr->hydrogen()->flags())));
- __ push(scratch0());
- CallRuntime(Runtime::kDeclareGlobals, 3, instr);
-}
-
-
-void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
- Register result = ToRegister(instr->result());
- __ ldr(result, ContextOperand(cp, instr->qml_global()
- ? Context::QML_GLOBAL_OBJECT_INDEX
- : Context::GLOBAL_OBJECT_INDEX));
-}
-
-
-void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) {
- Register global = ToRegister(instr->global_object());
- Register result = ToRegister(instr->result());
- __ ldr(result, FieldMemOperand(global, GlobalObject::kGlobalReceiverOffset));
-}
-
-
-void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
- int arity,
- LInstruction* instr,
- CallKind call_kind,
- R1State r1_state) {
- bool can_invoke_directly = !function->NeedsArgumentsAdaption() ||
- function->shared()->formal_parameter_count() == arity;
-
- LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
-
- if (can_invoke_directly) {
- if (r1_state == R1_UNINITIALIZED) {
- __ LoadHeapObject(r1, function);
- }
-
- // Change context.
- __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
-
- // Set r0 to arguments count if adaption is not needed. Assumes that r0
- // is available to write to at this point.
- if (!function->NeedsArgumentsAdaption()) {
- __ mov(r0, Operand(arity));
- }
-
- // Invoke function.
- __ SetCallKind(r5, call_kind);
- __ ldr(ip, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
- __ Call(ip);
-
- // Set up deoptimization.
- RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
- } else {
- SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
- ParameterCount count(arity);
- __ InvokeFunction(function, count, CALL_FUNCTION, generator, call_kind);
- }
-
- // Restore context.
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
-}
-
-
-void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
- ASSERT(ToRegister(instr->result()).is(r0));
- CallKnownFunction(instr->function(),
- instr->arity(),
- instr,
- CALL_AS_METHOD,
- R1_UNINITIALIZED);
-}
-
-
-void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
- Register input = ToRegister(instr->value());
- Register result = ToRegister(instr->result());
- Register scratch = scratch0();
-
- // Deoptimize if not a heap number.
- __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
- __ cmp(scratch, Operand(ip));
- DeoptimizeIf(ne, instr->environment());
-
- Label done;
- Register exponent = scratch0();
- scratch = no_reg;
- __ ldr(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
- // Check the sign of the argument. If the argument is positive, just
- // return it.
- __ tst(exponent, Operand(HeapNumber::kSignMask));
- // Move the input to the result if necessary.
- __ Move(result, input);
- __ b(eq, &done);
-
- // Input is negative. Reverse its sign.
- // Preserve the value of all registers.
- {
- PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
-
- // Registers were saved at the safepoint, so we can use
- // many scratch registers.
- Register tmp1 = input.is(r1) ? r0 : r1;
- Register tmp2 = input.is(r2) ? r0 : r2;
- Register tmp3 = input.is(r3) ? r0 : r3;
- Register tmp4 = input.is(r4) ? r0 : r4;
-
- // exponent: floating point exponent value.
-
- Label allocated, slow;
- __ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow);
- __ b(&allocated);
-
- // Slow case: Call the runtime system to do the number allocation.
- __ bind(&slow);
-
- CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
- // Set the pointer to the new heap number in tmp.
- if (!tmp1.is(r0)) __ mov(tmp1, Operand(r0));
- // Restore input_reg after call to runtime.
- __ LoadFromSafepointRegisterSlot(input, input);
- __ ldr(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
-
- __ bind(&allocated);
- // exponent: floating point exponent value.
- // tmp1: allocated heap number.
- __ bic(exponent, exponent, Operand(HeapNumber::kSignMask));
- __ str(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset));
- __ ldr(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset));
- __ str(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset));
-
- __ StoreToSafepointRegisterSlot(tmp1, result);
- }
-
- __ bind(&done);
-}
-
-
-void LCodeGen::EmitIntegerMathAbs(LUnaryMathOperation* instr) {
- Register input = ToRegister(instr->value());
- Register result = ToRegister(instr->result());
- __ cmp(input, Operand::Zero());
- __ Move(result, input, pl);
- // We can make rsb conditional because the previous cmp instruction
- // will clear the V (overflow) flag and rsb won't set this flag
- // if input is positive.
- __ rsb(result, input, Operand::Zero(), SetCC, mi);
- // Deoptimize on overflow.
- DeoptimizeIf(vs, instr->environment());
-}
-
-
-void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
- CpuFeatures::Scope scope(VFP2);
- // Class for deferred case.
- class DeferredMathAbsTaggedHeapNumber: public LDeferredCode {
- public:
- DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen,
- LUnaryMathOperation* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() {
- codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
- }
- virtual LInstruction* instr() { return instr_; }
- private:
- LUnaryMathOperation* instr_;
- };
-
- Representation r = instr->hydrogen()->value()->representation();
- if (r.IsDouble()) {
- DwVfpRegister input = ToDoubleRegister(instr->value());
- DwVfpRegister result = ToDoubleRegister(instr->result());
- __ vabs(result, input);
- } else if (r.IsInteger32()) {
- EmitIntegerMathAbs(instr);
- } else {
- // Representation is tagged.
- DeferredMathAbsTaggedHeapNumber* deferred =
- new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
- Register input = ToRegister(instr->value());
- // Smi check.
- __ JumpIfNotSmi(input, deferred->entry());
- // If smi, handle it directly.
- EmitIntegerMathAbs(instr);
- __ bind(deferred->exit());
- }
-}
-
-
-void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
- CpuFeatures::Scope scope(VFP2);
- DwVfpRegister input = ToDoubleRegister(instr->value());
- Register result = ToRegister(instr->result());
- Register scratch = scratch0();
-
- __ EmitVFPTruncate(kRoundToMinusInf,
- result,
- input,
- scratch,
- double_scratch0());
- DeoptimizeIf(ne, instr->environment());
-
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- // Test for -0.
- Label done;
- __ cmp(result, Operand::Zero());
- __ b(ne, &done);
- __ vmov(scratch, input.high());
- __ tst(scratch, Operand(HeapNumber::kSignMask));
- DeoptimizeIf(ne, instr->environment());
- __ bind(&done);
- }
-}
-
-
-void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
- CpuFeatures::Scope scope(VFP2);
- DwVfpRegister input = ToDoubleRegister(instr->value());
- Register result = ToRegister(instr->result());
- DwVfpRegister double_scratch1 = ToDoubleRegister(instr->temp());
- Register scratch = scratch0();
- Label done, check_sign_on_zero;
-
- // Extract exponent bits.
- __ vmov(result, input.high());
- __ ubfx(scratch,
- result,
- HeapNumber::kExponentShift,
- HeapNumber::kExponentBits);
-
- // If the number is in ]-0.5, +0.5[, the result is +/- 0.
- __ cmp(scratch, Operand(HeapNumber::kExponentBias - 2));
- __ mov(result, Operand::Zero(), LeaveCC, le);
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ b(le, &check_sign_on_zero);
- } else {
- __ b(le, &done);
- }
-
- // The following conversion will not work with numbers
- // outside of ]-2^32, 2^32[.
- __ cmp(scratch, Operand(HeapNumber::kExponentBias + 32));
- DeoptimizeIf(ge, instr->environment());
-
- __ Vmov(double_scratch0(), 0.5, scratch);
- __ vadd(double_scratch0(), input, double_scratch0());
-
- // Save the original sign for later comparison.
- __ and_(scratch, result, Operand(HeapNumber::kSignMask));
-
- // Check sign of the result: if the sign changed, the input
- // value was in ]0.5, 0[ and the result should be -0.
- __ vmov(result, double_scratch0().high());
- __ eor(result, result, Operand(scratch), SetCC);
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(mi, instr->environment());
- } else {
- __ mov(result, Operand::Zero(), LeaveCC, mi);
- __ b(mi, &done);
- }
-
- __ EmitVFPTruncate(kRoundToMinusInf,
- result,
- double_scratch0(),
- scratch,
- double_scratch1);
- DeoptimizeIf(ne, instr->environment());
-
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- // Test for -0.
- __ cmp(result, Operand::Zero());
- __ b(ne, &done);
- __ bind(&check_sign_on_zero);
- __ vmov(scratch, input.high());
- __ tst(scratch, Operand(HeapNumber::kSignMask));
- DeoptimizeIf(ne, instr->environment());
- }
- __ bind(&done);
-}
-
-
-void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
- CpuFeatures::Scope scope(VFP2);
- DwVfpRegister input = ToDoubleRegister(instr->value());
- DwVfpRegister result = ToDoubleRegister(instr->result());
- __ vsqrt(result, input);
-}
-
-
-void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
- CpuFeatures::Scope scope(VFP2);
- DwVfpRegister input = ToDoubleRegister(instr->value());
- DwVfpRegister result = ToDoubleRegister(instr->result());
- DwVfpRegister temp = ToDoubleRegister(instr->temp());
-
- // Note that according to ECMA-262 15.8.2.13:
- // Math.pow(-Infinity, 0.5) == Infinity
- // Math.sqrt(-Infinity) == NaN
- Label done;
- __ vmov(temp, -V8_INFINITY, scratch0());
- __ VFPCompareAndSetFlags(input, temp);
- __ vneg(result, temp, eq);
- __ b(&done, eq);
-
- // Add +0 to convert -0 to +0.
- __ vadd(result, input, kDoubleRegZero);
- __ vsqrt(result, result);
- __ bind(&done);
-}
-
-
-void LCodeGen::DoPower(LPower* instr) {
- CpuFeatures::Scope scope(VFP2);
- Representation exponent_type = instr->hydrogen()->right()->representation();
- // Having marked this as a call, we can use any registers.
- // Just make sure that the input/output registers are the expected ones.
- ASSERT(!instr->right()->IsDoubleRegister() ||
- ToDoubleRegister(instr->right()).is(d2));
- ASSERT(!instr->right()->IsRegister() ||
- ToRegister(instr->right()).is(r2));
- ASSERT(ToDoubleRegister(instr->left()).is(d1));
- ASSERT(ToDoubleRegister(instr->result()).is(d3));
-
- if (exponent_type.IsTagged()) {
- Label no_deopt;
- __ JumpIfSmi(r2, &no_deopt);
- __ ldr(r7, FieldMemOperand(r2, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
- __ cmp(r7, Operand(ip));
- DeoptimizeIf(ne, instr->environment());
- __ bind(&no_deopt);
- MathPowStub stub(MathPowStub::TAGGED);
- __ CallStub(&stub);
- } else if (exponent_type.IsInteger32()) {
- MathPowStub stub(MathPowStub::INTEGER);
- __ CallStub(&stub);
- } else {
- ASSERT(exponent_type.IsDouble());
- MathPowStub stub(MathPowStub::DOUBLE);
- __ CallStub(&stub);
- }
-}
-
-
-void LCodeGen::DoRandom(LRandom* instr) {
- CpuFeatures::Scope scope(VFP2);
- class DeferredDoRandom: public LDeferredCode {
- public:
- DeferredDoRandom(LCodeGen* codegen, LRandom* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredRandom(instr_); }
- virtual LInstruction* instr() { return instr_; }
- private:
- LRandom* instr_;
- };
-
- DeferredDoRandom* deferred = new(zone()) DeferredDoRandom(this, instr);
-
- // Having marked this instruction as a call we can use any
- // registers.
- ASSERT(ToDoubleRegister(instr->result()).is(d7));
- ASSERT(ToRegister(instr->global_object()).is(r0));
-
- static const int kSeedSize = sizeof(uint32_t);
- STATIC_ASSERT(kPointerSize == kSeedSize);
-
- __ ldr(r0, FieldMemOperand(r0, GlobalObject::kNativeContextOffset));
- static const int kRandomSeedOffset =
- FixedArray::kHeaderSize + Context::RANDOM_SEED_INDEX * kPointerSize;
- __ ldr(r2, FieldMemOperand(r0, kRandomSeedOffset));
- // r2: FixedArray of the native context's random seeds
-
- // Load state[0].
- __ ldr(r1, FieldMemOperand(r2, ByteArray::kHeaderSize));
- __ cmp(r1, Operand::Zero());
- __ b(eq, deferred->entry());
- // Load state[1].
- __ ldr(r0, FieldMemOperand(r2, ByteArray::kHeaderSize + kSeedSize));
- // r1: state[0].
- // r0: state[1].
-
- // state[0] = 18273 * (state[0] & 0xFFFF) + (state[0] >> 16)
- __ and_(r3, r1, Operand(0xFFFF));
- __ mov(r4, Operand(18273));
- __ mul(r3, r3, r4);
- __ add(r1, r3, Operand(r1, LSR, 16));
- // Save state[0].
- __ str(r1, FieldMemOperand(r2, ByteArray::kHeaderSize));
-
- // state[1] = 36969 * (state[1] & 0xFFFF) + (state[1] >> 16)
- __ and_(r3, r0, Operand(0xFFFF));
- __ mov(r4, Operand(36969));
- __ mul(r3, r3, r4);
- __ add(r0, r3, Operand(r0, LSR, 16));
- // Save state[1].
- __ str(r0, FieldMemOperand(r2, ByteArray::kHeaderSize + kSeedSize));
-
- // Random bit pattern = (state[0] << 14) + (state[1] & 0x3FFFF)
- __ and_(r0, r0, Operand(0x3FFFF));
- __ add(r0, r0, Operand(r1, LSL, 14));
-
- __ bind(deferred->exit());
- // 0x41300000 is the top half of 1.0 x 2^20 as a double.
- // Create this constant using mov/orr to avoid PC relative load.
- __ mov(r1, Operand(0x41000000));
- __ orr(r1, r1, Operand(0x300000));
- // Move 0x41300000xxxxxxxx (x = random bits) to VFP.
- __ vmov(d7, r0, r1);
- // Move 0x4130000000000000 to VFP.
- __ mov(r0, Operand::Zero());
- __ vmov(d8, r0, r1);
- // Subtract and store the result in the heap number.
- __ vsub(d7, d7, d8);
-}
-
-
-void LCodeGen::DoDeferredRandom(LRandom* instr) {
- __ PrepareCallCFunction(1, scratch0());
- __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
- // Return value is in r0.
-}
-
-
-void LCodeGen::DoMathExp(LMathExp* instr) {
- CpuFeatures::Scope scope(VFP2);
- DwVfpRegister input = ToDoubleRegister(instr->value());
- DwVfpRegister result = ToDoubleRegister(instr->result());
- DwVfpRegister double_scratch1 = ToDoubleRegister(instr->double_temp());
- DwVfpRegister double_scratch2 = double_scratch0();
- Register temp1 = ToRegister(instr->temp1());
- Register temp2 = ToRegister(instr->temp2());
-
- MathExpGenerator::EmitMathExp(
- masm(), input, result, double_scratch1, double_scratch2,
- temp1, temp2, scratch0());
-}
-
-
-void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
- ASSERT(ToDoubleRegister(instr->result()).is(d2));
- TranscendentalCacheStub stub(TranscendentalCache::LOG,
- TranscendentalCacheStub::UNTAGGED);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoMathTan(LUnaryMathOperation* instr) {
- ASSERT(ToDoubleRegister(instr->result()).is(d2));
- TranscendentalCacheStub stub(TranscendentalCache::TAN,
- TranscendentalCacheStub::UNTAGGED);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoMathCos(LUnaryMathOperation* instr) {
- ASSERT(ToDoubleRegister(instr->result()).is(d2));
- TranscendentalCacheStub stub(TranscendentalCache::COS,
- TranscendentalCacheStub::UNTAGGED);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoMathSin(LUnaryMathOperation* instr) {
- ASSERT(ToDoubleRegister(instr->result()).is(d2));
- TranscendentalCacheStub stub(TranscendentalCache::SIN,
- TranscendentalCacheStub::UNTAGGED);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) {
- switch (instr->op()) {
- case kMathAbs:
- DoMathAbs(instr);
- break;
- case kMathFloor:
- DoMathFloor(instr);
- break;
- case kMathRound:
- DoMathRound(instr);
- break;
- case kMathSqrt:
- DoMathSqrt(instr);
- break;
- case kMathPowHalf:
- DoMathPowHalf(instr);
- break;
- case kMathCos:
- DoMathCos(instr);
- break;
- case kMathSin:
- DoMathSin(instr);
- break;
- case kMathTan:
- DoMathTan(instr);
- break;
- case kMathLog:
- DoMathLog(instr);
- break;
- default:
- Abort("Unimplemented type of LUnaryMathOperation.");
- UNREACHABLE();
- }
-}
-
-
-void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
- ASSERT(ToRegister(instr->function()).is(r1));
- ASSERT(instr->HasPointerMap());
-
- if (instr->known_function().is_null()) {
- LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
- SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
- ParameterCount count(instr->arity());
- __ InvokeFunction(r1, count, CALL_FUNCTION, generator, CALL_AS_METHOD);
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- } else {
- CallKnownFunction(instr->known_function(),
- instr->arity(),
- instr,
- CALL_AS_METHOD,
- R1_CONTAINS_TARGET);
- }
-}
-
-
-void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
- ASSERT(ToRegister(instr->result()).is(r0));
-
- int arity = instr->arity();
- Handle<Code> ic =
- isolate()->stub_cache()->ComputeKeyedCallInitialize(arity);
- CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
-}
-
-
-void LCodeGen::DoCallNamed(LCallNamed* instr) {
- ASSERT(ToRegister(instr->result()).is(r0));
-
- int arity = instr->arity();
- RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
- Handle<Code> ic =
- isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
- __ mov(r2, Operand(instr->name()));
- CallCode(ic, mode, instr, NEVER_INLINE_TARGET_ADDRESS);
- // Restore context register.
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
-}
-
-
-void LCodeGen::DoCallFunction(LCallFunction* instr) {
- ASSERT(ToRegister(instr->function()).is(r1));
- ASSERT(ToRegister(instr->result()).is(r0));
-
- int arity = instr->arity();
- CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
-}
-
-
-void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
- ASSERT(ToRegister(instr->result()).is(r0));
-
- int arity = instr->arity();
- RelocInfo::Mode mode = RelocInfo::CODE_TARGET_CONTEXT;
- Handle<Code> ic =
- isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
- __ mov(r2, Operand(instr->name()));
- CallCode(ic, mode, instr, NEVER_INLINE_TARGET_ADDRESS);
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
-}
-
-
-void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
- ASSERT(ToRegister(instr->result()).is(r0));
- CallKnownFunction(instr->target(),
- instr->arity(),
- instr,
- CALL_AS_FUNCTION,
- R1_UNINITIALIZED);
-}
-
-
-void LCodeGen::DoCallNew(LCallNew* instr) {
- ASSERT(ToRegister(instr->constructor()).is(r1));
- ASSERT(ToRegister(instr->result()).is(r0));
-
- __ mov(r0, Operand(instr->arity()));
- if (FLAG_optimize_constructed_arrays) {
- // No cell in r2 for construct type feedback in optimized code
- Handle<Object> undefined_value(isolate()->heap()->undefined_value(),
- isolate());
- __ mov(r2, Operand(undefined_value));
- }
- CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
- CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
-}
-
-
-void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
- ASSERT(ToRegister(instr->constructor()).is(r1));
- ASSERT(ToRegister(instr->result()).is(r0));
- ASSERT(FLAG_optimize_constructed_arrays);
-
- __ mov(r0, Operand(instr->arity()));
- __ mov(r2, Operand(instr->hydrogen()->property_cell()));
- Handle<Code> array_construct_code =
- isolate()->builtins()->ArrayConstructCode();
-
- CallCode(array_construct_code, RelocInfo::CONSTRUCT_CALL, instr);
-}
-
-
-void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
- CallRuntime(instr->function(), instr->arity(), instr);
-}
-
-
-void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
- Register object = ToRegister(instr->object());
- Register value = ToRegister(instr->value());
- Register scratch = scratch0();
- int offset = instr->offset();
-
- ASSERT(!object.is(value));
-
- if (!instr->transition().is_null()) {
- __ mov(scratch, Operand(instr->transition()));
- __ str(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
- if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
- Register temp = ToRegister(instr->temp());
- // Update the write barrier for the map field.
- __ RecordWriteField(object,
- HeapObject::kMapOffset,
- scratch,
- temp,
- kLRHasBeenSaved,
- kSaveFPRegs,
- OMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- }
- }
-
- // Do the store.
- HType type = instr->hydrogen()->value()->type();
- SmiCheck check_needed =
- type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
- if (instr->is_in_object()) {
- __ str(value, FieldMemOperand(object, offset));
- if (instr->hydrogen()->NeedsWriteBarrier()) {
- // Update the write barrier for the object for in-object properties.
- __ RecordWriteField(object,
- offset,
- value,
- scratch,
- kLRHasBeenSaved,
- kSaveFPRegs,
- EMIT_REMEMBERED_SET,
- check_needed);
- }
- } else {
- __ ldr(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset));
- __ str(value, FieldMemOperand(scratch, offset));
- if (instr->hydrogen()->NeedsWriteBarrier()) {
- // Update the write barrier for the properties array.
- // object is used as a scratch register.
- __ RecordWriteField(scratch,
- offset,
- value,
- object,
- kLRHasBeenSaved,
- kSaveFPRegs,
- EMIT_REMEMBERED_SET,
- check_needed);
- }
- }
-}
-
-
-void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
- ASSERT(ToRegister(instr->object()).is(r1));
- ASSERT(ToRegister(instr->value()).is(r0));
-
- // Name is always in r2.
- __ mov(r2, Operand(instr->name()));
- Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
- ? isolate()->builtins()->StoreIC_Initialize_Strict()
- : isolate()->builtins()->StoreIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
-}
-
-
-void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
- if (instr->hydrogen()->skip_check()) return;
-
- if (instr->index()->IsConstantOperand()) {
- int constant_index =
- ToInteger32(LConstantOperand::cast(instr->index()));
- if (instr->hydrogen()->length()->representation().IsTagged()) {
- __ mov(ip, Operand(Smi::FromInt(constant_index)));
- } else {
- __ mov(ip, Operand(constant_index));
- }
- __ cmp(ip, ToRegister(instr->length()));
- } else {
- __ cmp(ToRegister(instr->index()), ToRegister(instr->length()));
- }
- DeoptimizeIf(hs, instr->environment());
-}
-
-
-void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
- CpuFeatures::Scope scope(VFP2);
- Register external_pointer = ToRegister(instr->elements());
- Register key = no_reg;
- ElementsKind elements_kind = instr->elements_kind();
- bool key_is_constant = instr->key()->IsConstantOperand();
- int constant_key = 0;
- if (key_is_constant) {
- constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
- if (constant_key & 0xF0000000) {
- Abort("array index constant value too big.");
- }
- } else {
- key = ToRegister(instr->key());
- }
- int element_size_shift = ElementsKindToShiftSize(elements_kind);
- int shift_size = (instr->hydrogen()->key()->representation().IsTagged())
- ? (element_size_shift - kSmiTagSize) : element_size_shift;
- int additional_offset = instr->additional_index() << element_size_shift;
-
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
- elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
- CpuFeatures::Scope scope(VFP3);
- DwVfpRegister value(ToDoubleRegister(instr->value()));
- Operand operand(key_is_constant
- ? Operand(constant_key << element_size_shift)
- : Operand(key, LSL, shift_size));
- __ add(scratch0(), external_pointer, operand);
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
- __ vcvt_f32_f64(double_scratch0().low(), value);
- __ vstr(double_scratch0().low(), scratch0(), additional_offset);
- } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
- __ vstr(value, scratch0(), additional_offset);
- }
- } else {
- Register value(ToRegister(instr->value()));
- MemOperand mem_operand = PrepareKeyedOperand(
- key, external_pointer, key_is_constant, constant_key,
- element_size_shift, shift_size,
- instr->additional_index(), additional_offset);
- switch (elements_kind) {
- case EXTERNAL_PIXEL_ELEMENTS:
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- __ strb(value, mem_operand);
- break;
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- __ strh(value, mem_operand);
- break;
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- __ str(value, mem_operand);
- break;
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_SMI_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- UNREACHABLE();
- break;
- }
- }
-}
-
-
-void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
- CpuFeatures::Scope scope(VFP2);
- DwVfpRegister value = ToDoubleRegister(instr->value());
- Register elements = ToRegister(instr->elements());
- Register key = no_reg;
- Register scratch = scratch0();
- bool key_is_constant = instr->key()->IsConstantOperand();
- int constant_key = 0;
-
- // Calculate the effective address of the slot in the array to store the
- // double value.
- if (key_is_constant) {
- constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
- if (constant_key & 0xF0000000) {
- Abort("array index constant value too big.");
- }
- } else {
- key = ToRegister(instr->key());
- }
- int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
- int shift_size = (instr->hydrogen()->key()->representation().IsTagged())
- ? (element_size_shift - kSmiTagSize) : element_size_shift;
- Operand operand = key_is_constant
- ? Operand((constant_key << element_size_shift) +
- FixedDoubleArray::kHeaderSize - kHeapObjectTag)
- : Operand(key, LSL, shift_size);
- __ add(scratch, elements, operand);
- if (!key_is_constant) {
- __ add(scratch, scratch,
- Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
- }
-
- if (instr->NeedsCanonicalization()) {
- // Check for NaN. All NaNs must be canonicalized.
- __ VFPCompareAndSetFlags(value, value);
- Label after_canonicalization;
-
- // Only load canonical NaN if the comparison above set the overflow.
- __ b(vc, &after_canonicalization);
- __ Vmov(value,
- FixedDoubleArray::canonical_not_the_hole_nan_as_double());
-
- __ bind(&after_canonicalization);
- }
-
- __ vstr(value, scratch, instr->additional_index() << element_size_shift);
-}
-
-
-void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
- Register value = ToRegister(instr->value());
- Register elements = ToRegister(instr->elements());
- Register key = instr->key()->IsRegister() ? ToRegister(instr->key())
- : no_reg;
- Register scratch = scratch0();
- Register store_base = scratch;
- int offset = 0;
-
- // Do the store.
- if (instr->key()->IsConstantOperand()) {
- ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
- LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
- offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) +
- instr->additional_index());
- store_base = elements;
- } else {
- // Even though the HLoadKeyed instruction forces the input
- // representation for the key to be an integer, the input gets replaced
- // during bound check elimination with the index argument to the bounds
- // check, which can be tagged, so that case must be handled here, too.
- if (instr->hydrogen()->key()->representation().IsTagged()) {
- __ add(scratch, elements,
- Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize));
- } else {
- __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2));
- }
- offset = FixedArray::OffsetOfElementAt(instr->additional_index());
- }
- __ str(value, FieldMemOperand(store_base, offset));
-
- if (instr->hydrogen()->NeedsWriteBarrier()) {
- HType type = instr->hydrogen()->value()->type();
- SmiCheck check_needed =
- type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
- // Compute address of modified element and store it into key register.
- __ add(key, store_base, Operand(offset - kHeapObjectTag));
- __ RecordWrite(elements,
- key,
- value,
- kLRHasBeenSaved,
- kSaveFPRegs,
- EMIT_REMEMBERED_SET,
- check_needed);
- }
-}
-
-
-void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
- // By cases: external, fast double
- if (instr->is_external()) {
- DoStoreKeyedExternalArray(instr);
- } else if (instr->hydrogen()->value()->representation().IsDouble()) {
- DoStoreKeyedFixedDoubleArray(instr);
- } else {
- DoStoreKeyedFixedArray(instr);
- }
-}
-
-
-void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
- ASSERT(ToRegister(instr->object()).is(r2));
- ASSERT(ToRegister(instr->key()).is(r1));
- ASSERT(ToRegister(instr->value()).is(r0));
-
- Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
- ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
- : isolate()->builtins()->KeyedStoreIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
-}
-
-
-void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
- Register object_reg = ToRegister(instr->object());
- Register scratch = scratch0();
-
- Handle<Map> from_map = instr->original_map();
- Handle<Map> to_map = instr->transitioned_map();
- ElementsKind from_kind = instr->from_kind();
- ElementsKind to_kind = instr->to_kind();
-
- Label not_applicable;
- __ ldr(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset));
- __ cmp(scratch, Operand(from_map));
- __ b(ne, &not_applicable);
-
- if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
- Register new_map_reg = ToRegister(instr->new_map_temp());
- __ mov(new_map_reg, Operand(to_map));
- __ str(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset));
- // Write barrier.
- __ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
- scratch, kLRHasBeenSaved, kDontSaveFPRegs);
- } else if (FLAG_compiled_transitions) {
- PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
- __ Move(r0, object_reg);
- __ Move(r1, to_map);
- TransitionElementsKindStub stub(from_kind, to_kind);
- __ CallStub(&stub);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
- } else if (IsFastSmiElementsKind(from_kind) &&
- IsFastDoubleElementsKind(to_kind)) {
- Register fixed_object_reg = ToRegister(instr->temp());
- ASSERT(fixed_object_reg.is(r2));
- Register new_map_reg = ToRegister(instr->new_map_temp());
- ASSERT(new_map_reg.is(r3));
- __ mov(new_map_reg, Operand(to_map));
- __ mov(fixed_object_reg, object_reg);
- CallCode(isolate()->builtins()->TransitionElementsSmiToDouble(),
- RelocInfo::CODE_TARGET, instr);
- } else if (IsFastDoubleElementsKind(from_kind) &&
- IsFastObjectElementsKind(to_kind)) {
- Register fixed_object_reg = ToRegister(instr->temp());
- ASSERT(fixed_object_reg.is(r2));
- Register new_map_reg = ToRegister(instr->new_map_temp());
- ASSERT(new_map_reg.is(r3));
- __ mov(new_map_reg, Operand(to_map));
- __ mov(fixed_object_reg, object_reg);
- CallCode(isolate()->builtins()->TransitionElementsDoubleToObject(),
- RelocInfo::CODE_TARGET, instr);
- } else {
- UNREACHABLE();
- }
- __ bind(&not_applicable);
-}
-
-
-void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
- Register object = ToRegister(instr->object());
- Register temp = ToRegister(instr->temp());
- __ TestJSArrayForAllocationSiteInfo(object, temp);
- DeoptimizeIf(eq, instr->environment());
-}
-
-
-void LCodeGen::DoStringAdd(LStringAdd* instr) {
- __ push(ToRegister(instr->left()));
- __ push(ToRegister(instr->right()));
- StringAddStub stub(NO_STRING_CHECK_IN_STUB);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
- class DeferredStringCharCodeAt: public LDeferredCode {
- public:
- DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
- virtual LInstruction* instr() { return instr_; }
- private:
- LStringCharCodeAt* instr_;
- };
-
- DeferredStringCharCodeAt* deferred =
- new(zone()) DeferredStringCharCodeAt(this, instr);
-
- StringCharLoadGenerator::Generate(masm(),
- ToRegister(instr->string()),
- ToRegister(instr->index()),
- ToRegister(instr->result()),
- deferred->entry());
- __ bind(deferred->exit());
-}
-
-
-void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
- Register string = ToRegister(instr->string());
- Register result = ToRegister(instr->result());
- Register scratch = scratch0();
-
- // TODO(3095996): Get rid of this. For now, we need to make the
- // result register contain a valid pointer because it is already
- // contained in the register pointer map.
- __ mov(result, Operand::Zero());
-
- PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
- __ push(string);
- // Push the index as a smi. This is safe because of the checks in
- // DoStringCharCodeAt above.
- if (instr->index()->IsConstantOperand()) {
- int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
- __ mov(scratch, Operand(Smi::FromInt(const_index)));
- __ push(scratch);
- } else {
- Register index = ToRegister(instr->index());
- __ SmiTag(index);
- __ push(index);
- }
- CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2, instr);
- __ AssertSmi(r0);
- __ SmiUntag(r0);
- __ StoreToSafepointRegisterSlot(r0, result);
-}
-
-
-void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
- class DeferredStringCharFromCode: public LDeferredCode {
- public:
- DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); }
- virtual LInstruction* instr() { return instr_; }
- private:
- LStringCharFromCode* instr_;
- };
-
- DeferredStringCharFromCode* deferred =
- new(zone()) DeferredStringCharFromCode(this, instr);
-
- ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
- Register char_code = ToRegister(instr->char_code());
- Register result = ToRegister(instr->result());
- ASSERT(!char_code.is(result));
-
- __ cmp(char_code, Operand(String::kMaxOneByteCharCode));
- __ b(hi, deferred->entry());
- __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
- __ add(result, result, Operand(char_code, LSL, kPointerSizeLog2));
- __ ldr(result, FieldMemOperand(result, FixedArray::kHeaderSize));
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ cmp(result, ip);
- __ b(eq, deferred->entry());
- __ bind(deferred->exit());
-}
-
-
-void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
- Register char_code = ToRegister(instr->char_code());
- Register result = ToRegister(instr->result());
-
- // TODO(3095996): Get rid of this. For now, we need to make the
- // result register contain a valid pointer because it is already
- // contained in the register pointer map.
- __ mov(result, Operand::Zero());
-
- PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
- __ SmiTag(char_code);
- __ push(char_code);
- CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr);
- __ StoreToSafepointRegisterSlot(r0, result);
-}
-
-
-void LCodeGen::DoStringLength(LStringLength* instr) {
- Register string = ToRegister(instr->string());
- Register result = ToRegister(instr->result());
- __ ldr(result, FieldMemOperand(string, String::kLengthOffset));
-}
-
-
-void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
- CpuFeatures::Scope scope(VFP2);
- LOperand* input = instr->value();
- ASSERT(input->IsRegister() || input->IsStackSlot());
- LOperand* output = instr->result();
- ASSERT(output->IsDoubleRegister());
- SwVfpRegister single_scratch = double_scratch0().low();
- if (input->IsStackSlot()) {
- Register scratch = scratch0();
- __ ldr(scratch, ToMemOperand(input));
- __ vmov(single_scratch, scratch);
- } else {
- __ vmov(single_scratch, ToRegister(input));
- }
- __ vcvt_f64_s32(ToDoubleRegister(output), single_scratch);
-}
-
-
-void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
- CpuFeatures::Scope scope(VFP2);
- LOperand* input = instr->value();
- LOperand* output = instr->result();
-
- SwVfpRegister flt_scratch = double_scratch0().low();
- __ vmov(flt_scratch, ToRegister(input));
- __ vcvt_f64_u32(ToDoubleRegister(output), flt_scratch);
-}
-
-
-void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
- class DeferredNumberTagI: public LDeferredCode {
- public:
- DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() {
- codegen()->DoDeferredNumberTagI(instr_,
- instr_->value(),
- SIGNED_INT32);
- }
- virtual LInstruction* instr() { return instr_; }
- private:
- LNumberTagI* instr_;
- };
-
- Register src = ToRegister(instr->value());
- Register dst = ToRegister(instr->result());
-
- DeferredNumberTagI* deferred = new(zone()) DeferredNumberTagI(this, instr);
- __ SmiTag(dst, src, SetCC);
- __ b(vs, deferred->entry());
- __ bind(deferred->exit());
-}
-
-
-void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
- class DeferredNumberTagU: public LDeferredCode {
- public:
- DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() {
- codegen()->DoDeferredNumberTagI(instr_,
- instr_->value(),
- UNSIGNED_INT32);
- }
- virtual LInstruction* instr() { return instr_; }
- private:
- LNumberTagU* instr_;
- };
-
- LOperand* input = instr->value();
- ASSERT(input->IsRegister() && input->Equals(instr->result()));
- Register reg = ToRegister(input);
-
- DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
- __ cmp(reg, Operand(Smi::kMaxValue));
- __ b(hi, deferred->entry());
- __ SmiTag(reg, reg);
- __ bind(deferred->exit());
-}
-
-
-// Convert unsigned integer with specified number of leading zeroes in binary
-// representation to IEEE 754 double.
-// Integer to convert is passed in register hiword.
-// Resulting double is returned in registers hiword:loword.
-// This functions does not work correctly for 0.
-static void GenerateUInt2Double(MacroAssembler* masm,
- Register hiword,
- Register loword,
- Register scratch,
- int leading_zeroes) {
- const int meaningful_bits = kBitsPerInt - leading_zeroes - 1;
- const int biased_exponent = HeapNumber::kExponentBias + meaningful_bits;
-
- const int mantissa_shift_for_hi_word =
- meaningful_bits - HeapNumber::kMantissaBitsInTopWord;
- const int mantissa_shift_for_lo_word =
- kBitsPerInt - mantissa_shift_for_hi_word;
- masm->mov(scratch, Operand(biased_exponent << HeapNumber::kExponentShift));
- if (mantissa_shift_for_hi_word > 0) {
- masm->mov(loword, Operand(hiword, LSL, mantissa_shift_for_lo_word));
- masm->orr(hiword, scratch,
- Operand(hiword, LSR, mantissa_shift_for_hi_word));
- } else {
- masm->mov(loword, Operand::Zero());
- masm->orr(hiword, scratch,
- Operand(hiword, LSL, -mantissa_shift_for_hi_word));
- }
-
- // If least significant bit of biased exponent was not 1 it was corrupted
- // by most significant bit of mantissa so we should fix that.
- if (!(biased_exponent & 1)) {
- masm->bic(hiword, hiword, Operand(1 << HeapNumber::kExponentShift));
- }
-}
-
-
-void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
- LOperand* value,
- IntegerSignedness signedness) {
- Label slow;
- Register src = ToRegister(value);
- Register dst = ToRegister(instr->result());
- DwVfpRegister dbl_scratch = double_scratch0();
- SwVfpRegister flt_scratch = dbl_scratch.low();
-
- // Preserve the value of all registers.
- PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
-
- Label done;
- if (signedness == SIGNED_INT32) {
- // There was overflow, so bits 30 and 31 of the original integer
- // disagree. Try to allocate a heap number in new space and store
- // the value in there. If that fails, call the runtime system.
- if (dst.is(src)) {
- __ SmiUntag(src, dst);
- __ eor(src, src, Operand(0x80000000));
- }
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatures::Scope scope(VFP2);
- __ vmov(flt_scratch, src);
- __ vcvt_f64_s32(dbl_scratch, flt_scratch);
- } else {
- FloatingPointHelper::Destination dest =
- FloatingPointHelper::kCoreRegisters;
- FloatingPointHelper::ConvertIntToDouble(masm(), src, dest, d0,
- sfpd_lo, sfpd_hi,
- scratch0(), s0);
- }
- } else {
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatures::Scope scope(VFP2);
- __ vmov(flt_scratch, src);
- __ vcvt_f64_u32(dbl_scratch, flt_scratch);
- } else {
- Label no_leading_zero, done;
- __ tst(src, Operand(0x80000000));
- __ b(ne, &no_leading_zero);
-
- // Integer has one leading zeros.
- GenerateUInt2Double(masm(), sfpd_hi, sfpd_lo, r9, 1);
- __ b(&done);
-
- __ bind(&no_leading_zero);
- GenerateUInt2Double(masm(), sfpd_hi, sfpd_lo, r9, 0);
- __ b(&done);
- }
- }
-
- if (FLAG_inline_new) {
- __ LoadRoot(scratch0(), Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(r5, r3, r4, scratch0(), &slow, DONT_TAG_RESULT);
- __ Move(dst, r5);
- __ b(&done);
- }
-
- // Slow case: Call the runtime system to do the number allocation.
- __ bind(&slow);
-
- // TODO(3095996): Put a valid pointer value in the stack slot where the result
- // register is stored, as this register is in the pointer map, but contains an
- // integer value.
- __ mov(ip, Operand::Zero());
- __ StoreToSafepointRegisterSlot(ip, dst);
- CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
- __ Move(dst, r0);
- __ sub(dst, dst, Operand(kHeapObjectTag));
-
- // Done. Put the value in dbl_scratch into the value of the allocated heap
- // number.
- __ bind(&done);
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatures::Scope scope(VFP2);
- __ vstr(dbl_scratch, dst, HeapNumber::kValueOffset);
- } else {
- __ str(sfpd_lo, MemOperand(dst, HeapNumber::kMantissaOffset));
- __ str(sfpd_hi, MemOperand(dst, HeapNumber::kExponentOffset));
- }
- __ add(dst, dst, Operand(kHeapObjectTag));
- __ StoreToSafepointRegisterSlot(dst, dst);
-}
-
-
-void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
- class DeferredNumberTagD: public LDeferredCode {
- public:
- DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
- virtual LInstruction* instr() { return instr_; }
- private:
- LNumberTagD* instr_;
- };
-
- DwVfpRegister input_reg = ToDoubleRegister(instr->value());
- Register scratch = scratch0();
- Register reg = ToRegister(instr->result());
- Register temp1 = ToRegister(instr->temp());
- Register temp2 = ToRegister(instr->temp2());
-
- bool convert_hole = false;
- HValue* change_input = instr->hydrogen()->value();
- if (change_input->IsLoadKeyed()) {
- HLoadKeyed* load = HLoadKeyed::cast(change_input);
- convert_hole = load->UsesMustHandleHole();
- }
-
- Label no_special_nan_handling;
- Label done;
- if (convert_hole) {
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatures::Scope scope(VFP2);
- DwVfpRegister input_reg = ToDoubleRegister(instr->value());
- __ VFPCompareAndSetFlags(input_reg, input_reg);
- __ b(vc, &no_special_nan_handling);
- __ vmov(reg, scratch0(), input_reg);
- __ cmp(scratch0(), Operand(kHoleNanUpper32));
- Label canonicalize;
- __ b(ne, &canonicalize);
- __ Move(reg, factory()->the_hole_value());
- __ b(&done);
- __ bind(&canonicalize);
- __ Vmov(input_reg,
- FixedDoubleArray::canonical_not_the_hole_nan_as_double(),
- no_reg);
- } else {
- Label not_hole;
- __ cmp(sfpd_hi, Operand(kHoleNanUpper32));
- __ b(ne, &not_hole);
- __ Move(reg, factory()->the_hole_value());
- __ b(&done);
- __ bind(&not_hole);
- __ and_(scratch, sfpd_hi, Operand(0x7ff00000));
- __ cmp(scratch, Operand(0x7ff00000));
- __ b(ne, &no_special_nan_handling);
- Label special_nan_handling;
- __ tst(sfpd_hi, Operand(0x000FFFFF));
- __ b(ne, &special_nan_handling);
- __ cmp(sfpd_lo, Operand(0));
- __ b(eq, &no_special_nan_handling);
- __ bind(&special_nan_handling);
- double canonical_nan =
- FixedDoubleArray::canonical_not_the_hole_nan_as_double();
- uint64_t casted_nan = BitCast<uint64_t>(canonical_nan);
- __ mov(sfpd_lo,
- Operand(static_cast<uint32_t>(casted_nan & 0xFFFFFFFF)));
- __ mov(sfpd_hi,
- Operand(static_cast<uint32_t>(casted_nan >> 32)));
- }
- }
-
- __ bind(&no_special_nan_handling);
- DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
- if (FLAG_inline_new) {
- __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
- // We want the untagged address first for performance
- __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry(),
- DONT_TAG_RESULT);
- } else {
- __ jmp(deferred->entry());
- }
- __ bind(deferred->exit());
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatures::Scope scope(VFP2);
- __ vstr(input_reg, reg, HeapNumber::kValueOffset);
- } else {
- __ str(sfpd_lo, MemOperand(reg, HeapNumber::kValueOffset));
- __ str(sfpd_hi, MemOperand(reg, HeapNumber::kValueOffset + kPointerSize));
- }
- // Now that we have finished with the object's real address tag it
- __ add(reg, reg, Operand(kHeapObjectTag));
- __ bind(&done);
-}
-
-
-void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
- // TODO(3095996): Get rid of this. For now, we need to make the
- // result register contain a valid pointer because it is already
- // contained in the register pointer map.
- Register reg = ToRegister(instr->result());
- __ mov(reg, Operand::Zero());
-
- PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
- CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
- __ sub(r0, r0, Operand(kHeapObjectTag));
- __ StoreToSafepointRegisterSlot(r0, reg);
-}
-
-
-void LCodeGen::DoSmiTag(LSmiTag* instr) {
- ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow));
- __ SmiTag(ToRegister(instr->result()), ToRegister(instr->value()));
-}
-
-
-void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
- Register input = ToRegister(instr->value());
- Register result = ToRegister(instr->result());
- if (instr->needs_check()) {
- STATIC_ASSERT(kHeapObjectTag == 1);
- // If the input is a HeapObject, SmiUntag will set the carry flag.
- __ SmiUntag(result, input, SetCC);
- DeoptimizeIf(cs, instr->environment());
- } else {
- __ SmiUntag(result, input);
- }
-}
-
-
-void LCodeGen::EmitNumberUntagD(Register input_reg,
- DwVfpRegister result_reg,
- bool deoptimize_on_undefined,
- bool deoptimize_on_minus_zero,
- LEnvironment* env,
- NumberUntagDMode mode) {
- Register scratch = scratch0();
- SwVfpRegister flt_scratch = double_scratch0().low();
- ASSERT(!result_reg.is(double_scratch0()));
- CpuFeatures::Scope scope(VFP2);
-
- Label load_smi, heap_number, done;
-
- if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
- // Smi check.
- __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
-
- // Heap number map check.
- __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
- __ cmp(scratch, Operand(ip));
- if (deoptimize_on_undefined) {
- DeoptimizeIf(ne, env);
- } else {
- Label heap_number;
- __ b(eq, &heap_number);
-
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ cmp(input_reg, Operand(ip));
- DeoptimizeIf(ne, env);
-
- // Convert undefined to NaN.
- __ LoadRoot(ip, Heap::kNanValueRootIndex);
- __ sub(ip, ip, Operand(kHeapObjectTag));
- __ vldr(result_reg, ip, HeapNumber::kValueOffset);
- __ jmp(&done);
-
- __ bind(&heap_number);
- }
- // Heap number to double register conversion.
- __ sub(ip, input_reg, Operand(kHeapObjectTag));
- __ vldr(result_reg, ip, HeapNumber::kValueOffset);
- if (deoptimize_on_minus_zero) {
- __ vmov(ip, result_reg.low());
- __ cmp(ip, Operand::Zero());
- __ b(ne, &done);
- __ vmov(ip, result_reg.high());
- __ cmp(ip, Operand(HeapNumber::kSignMask));
- DeoptimizeIf(eq, env);
- }
- __ jmp(&done);
- } else if (mode == NUMBER_CANDIDATE_IS_SMI_OR_HOLE) {
- __ SmiUntag(scratch, input_reg, SetCC);
- DeoptimizeIf(cs, env);
- } else if (mode == NUMBER_CANDIDATE_IS_SMI_CONVERT_HOLE) {
- __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
- __ Vmov(result_reg,
- FixedDoubleArray::hole_nan_as_double(),
- no_reg);
- __ b(&done);
- } else {
- __ SmiUntag(scratch, input_reg);
- ASSERT(mode == NUMBER_CANDIDATE_IS_SMI);
- }
-
- // Smi to double register conversion
- __ bind(&load_smi);
- // scratch: untagged value of input_reg
- __ vmov(flt_scratch, scratch);
- __ vcvt_f64_s32(result_reg, flt_scratch);
- __ bind(&done);
-}
-
-
-void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
- Register input_reg = ToRegister(instr->value());
- Register scratch1 = scratch0();
- Register scratch2 = ToRegister(instr->temp());
- DwVfpRegister double_scratch = double_scratch0();
- DwVfpRegister double_scratch2 = ToDoubleRegister(instr->temp3());
-
- ASSERT(!scratch1.is(input_reg) && !scratch1.is(scratch2));
- ASSERT(!scratch2.is(input_reg) && !scratch2.is(scratch1));
-
- Label done;
-
- // The input was optimistically untagged; revert it.
- // The carry flag is set when we reach this deferred code as we just executed
- // SmiUntag(heap_object, SetCC)
- STATIC_ASSERT(kHeapObjectTag == 1);
- __ adc(input_reg, input_reg, Operand(input_reg));
-
- // Heap number map check.
- __ ldr(scratch1, FieldMemOperand(input_reg, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
- __ cmp(scratch1, Operand(ip));
-
- if (instr->truncating()) {
- CpuFeatures::Scope scope(VFP2);
- Register scratch3 = ToRegister(instr->temp2());
- ASSERT(!scratch3.is(input_reg) &&
- !scratch3.is(scratch1) &&
- !scratch3.is(scratch2));
- // Performs a truncating conversion of a floating point number as used by
- // the JS bitwise operations.
- Label heap_number;
- __ b(eq, &heap_number);
- // Check for undefined. Undefined is converted to zero for truncating
- // conversions.
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ cmp(input_reg, Operand(ip));
- DeoptimizeIf(ne, instr->environment());
- __ mov(input_reg, Operand::Zero());
- __ b(&done);
-
- __ bind(&heap_number);
- __ sub(scratch1, input_reg, Operand(kHeapObjectTag));
- __ vldr(double_scratch2, scratch1, HeapNumber::kValueOffset);
-
- __ EmitECMATruncate(input_reg,
- double_scratch2,
- double_scratch,
- scratch1,
- scratch2,
- scratch3);
-
- } else {
- CpuFeatures::Scope scope(VFP3);
- // Deoptimize if we don't have a heap number.
- DeoptimizeIf(ne, instr->environment());
-
- __ sub(ip, input_reg, Operand(kHeapObjectTag));
- __ vldr(double_scratch, ip, HeapNumber::kValueOffset);
- __ EmitVFPTruncate(kRoundToZero,
- input_reg,
- double_scratch,
- scratch1,
- double_scratch2,
- kCheckForInexactConversion);
- DeoptimizeIf(ne, instr->environment());
-
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ cmp(input_reg, Operand::Zero());
- __ b(ne, &done);
- __ vmov(scratch1, double_scratch.high());
- __ tst(scratch1, Operand(HeapNumber::kSignMask));
- DeoptimizeIf(ne, instr->environment());
- }
- }
- __ bind(&done);
-}
-
-
-void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
- class DeferredTaggedToI: public LDeferredCode {
- public:
- DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
- virtual LInstruction* instr() { return instr_; }
- private:
- LTaggedToI* instr_;
- };
-
- LOperand* input = instr->value();
- ASSERT(input->IsRegister());
- ASSERT(input->Equals(instr->result()));
-
- Register input_reg = ToRegister(input);
-
- DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
-
- // Optimistically untag the input.
- // If the input is a HeapObject, SmiUntag will set the carry flag.
- __ SmiUntag(input_reg, SetCC);
- // Branch to deferred code if the input was tagged.
- // The deferred code will take care of restoring the tag.
- __ b(cs, deferred->entry());
- __ bind(deferred->exit());
-}
-
-
-void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
- LOperand* input = instr->value();
- ASSERT(input->IsRegister());
- LOperand* result = instr->result();
- ASSERT(result->IsDoubleRegister());
-
- Register input_reg = ToRegister(input);
- DwVfpRegister result_reg = ToDoubleRegister(result);
-
- NumberUntagDMode mode = NUMBER_CANDIDATE_IS_ANY_TAGGED;
- HValue* value = instr->hydrogen()->value();
- if (value->type().IsSmi()) {
- if (value->IsLoadKeyed()) {
- HLoadKeyed* load = HLoadKeyed::cast(value);
- if (load->UsesMustHandleHole()) {
- if (load->hole_mode() == ALLOW_RETURN_HOLE) {
- mode = NUMBER_CANDIDATE_IS_SMI_CONVERT_HOLE;
- } else {
- mode = NUMBER_CANDIDATE_IS_SMI_OR_HOLE;
- }
- } else {
- mode = NUMBER_CANDIDATE_IS_SMI;
- }
- }
- }
-
- EmitNumberUntagD(input_reg, result_reg,
- instr->hydrogen()->deoptimize_on_undefined(),
- instr->hydrogen()->deoptimize_on_minus_zero(),
- instr->environment(),
- mode);
-}
-
-
-void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
- Register result_reg = ToRegister(instr->result());
- Register scratch1 = scratch0();
- Register scratch2 = ToRegister(instr->temp());
- DwVfpRegister double_input = ToDoubleRegister(instr->value());
- DwVfpRegister double_scratch = double_scratch0();
-
- Label done;
-
- if (instr->truncating()) {
- Register scratch3 = ToRegister(instr->temp2());
- __ EmitECMATruncate(result_reg,
- double_input,
- double_scratch,
- scratch1,
- scratch2,
- scratch3);
- } else {
- __ EmitVFPTruncate(kRoundToMinusInf,
- result_reg,
- double_input,
- scratch1,
- double_scratch,
- kCheckForInexactConversion);
-
- // Deoptimize if we had a vfp invalid exception,
- // including inexact operation.
- DeoptimizeIf(ne, instr->environment());
- }
- __ bind(&done);
-}
-
-
-void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
- LOperand* input = instr->value();
- __ tst(ToRegister(input), Operand(kSmiTagMask));
- DeoptimizeIf(ne, instr->environment());
-}
-
-
-void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
- LOperand* input = instr->value();
- __ tst(ToRegister(input), Operand(kSmiTagMask));
- DeoptimizeIf(eq, instr->environment());
-}
-
-
-void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
- Register input = ToRegister(instr->value());
- Register scratch = scratch0();
-
- __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
- __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
-
- if (instr->hydrogen()->is_interval_check()) {
- InstanceType first;
- InstanceType last;
- instr->hydrogen()->GetCheckInterval(&first, &last);
-
- __ cmp(scratch, Operand(first));
-
- // If there is only one type in the interval check for equality.
- if (first == last) {
- DeoptimizeIf(ne, instr->environment());
- } else {
- DeoptimizeIf(lo, instr->environment());
- // Omit check for the last type.
- if (last != LAST_TYPE) {
- __ cmp(scratch, Operand(last));
- DeoptimizeIf(hi, instr->environment());
- }
- }
- } else {
- uint8_t mask;
- uint8_t tag;
- instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
-
- if (IsPowerOf2(mask)) {
- ASSERT(tag == 0 || IsPowerOf2(tag));
- __ tst(scratch, Operand(mask));
- DeoptimizeIf(tag == 0 ? ne : eq, instr->environment());
- } else {
- __ and_(scratch, scratch, Operand(mask));
- __ cmp(scratch, Operand(tag));
- DeoptimizeIf(ne, instr->environment());
- }
- }
-}
-
-
-void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
- Register reg = ToRegister(instr->value());
- Handle<JSFunction> target = instr->hydrogen()->target();
- if (isolate()->heap()->InNewSpace(*target)) {
- Register reg = ToRegister(instr->value());
- Handle<JSGlobalPropertyCell> cell =
- isolate()->factory()->NewJSGlobalPropertyCell(target);
- __ mov(ip, Operand(Handle<Object>(cell)));
- __ ldr(ip, FieldMemOperand(ip, JSGlobalPropertyCell::kValueOffset));
- __ cmp(reg, ip);
- } else {
- __ cmp(reg, Operand(target));
- }
- DeoptimizeIf(ne, instr->environment());
-}
-
-
-void LCodeGen::DoCheckMapCommon(Register map_reg,
- Handle<Map> map,
- CompareMapMode mode,
- LEnvironment* env) {
- Label success;
- __ CompareMap(map_reg, map, &success, mode);
- DeoptimizeIf(ne, env);
- __ bind(&success);
-}
-
-
-void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
- Register map_reg = scratch0();
- LOperand* input = instr->value();
- ASSERT(input->IsRegister());
- Register reg = ToRegister(input);
-
- Label success;
- SmallMapList* map_set = instr->hydrogen()->map_set();
- __ ldr(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
- for (int i = 0; i < map_set->length() - 1; i++) {
- Handle<Map> map = map_set->at(i);
- __ CompareMap(map_reg, map, &success, REQUIRE_EXACT_MAP);
- __ b(eq, &success);
- }
- Handle<Map> map = map_set->last();
- DoCheckMapCommon(map_reg, map, REQUIRE_EXACT_MAP, instr->environment());
- __ bind(&success);
-}
-
-
-void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
- CpuFeatures::Scope vfp_scope(VFP2);
- DwVfpRegister value_reg = ToDoubleRegister(instr->unclamped());
- Register result_reg = ToRegister(instr->result());
- DwVfpRegister temp_reg = ToDoubleRegister(instr->temp());
- __ ClampDoubleToUint8(result_reg, value_reg, temp_reg);
-}
-
-
-void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
- CpuFeatures::Scope scope(VFP2);
- Register unclamped_reg = ToRegister(instr->unclamped());
- Register result_reg = ToRegister(instr->result());
- __ ClampUint8(result_reg, unclamped_reg);
-}
-
-
-void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
- CpuFeatures::Scope scope(VFP2);
- Register scratch = scratch0();
- Register input_reg = ToRegister(instr->unclamped());
- Register result_reg = ToRegister(instr->result());
- DwVfpRegister temp_reg = ToDoubleRegister(instr->temp());
- Label is_smi, done, heap_number;
-
- // Both smi and heap number cases are handled.
- __ UntagAndJumpIfSmi(result_reg, input_reg, &is_smi);
-
- // Check for heap number
- __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
- __ cmp(scratch, Operand(factory()->heap_number_map()));
- __ b(eq, &heap_number);
-
- // Check for undefined. Undefined is converted to zero for clamping
- // conversions.
- __ cmp(input_reg, Operand(factory()->undefined_value()));
- DeoptimizeIf(ne, instr->environment());
- __ mov(result_reg, Operand::Zero());
- __ jmp(&done);
-
- // Heap number
- __ bind(&heap_number);
- __ vldr(double_scratch0(), FieldMemOperand(input_reg,
- HeapNumber::kValueOffset));
- __ ClampDoubleToUint8(result_reg, double_scratch0(), temp_reg);
- __ jmp(&done);
-
- // smi
- __ bind(&is_smi);
- __ ClampUint8(result_reg, result_reg);
-
- __ bind(&done);
-}
-
-
-void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
- ASSERT(instr->temp()->Equals(instr->result()));
- Register prototype_reg = ToRegister(instr->temp());
- Register map_reg = ToRegister(instr->temp2());
-
- ZoneList<Handle<JSObject> >* prototypes = instr->prototypes();
- ZoneList<Handle<Map> >* maps = instr->maps();
-
- ASSERT(prototypes->length() == maps->length());
-
- if (instr->hydrogen()->CanOmitPrototypeChecks()) {
- for (int i = 0; i < maps->length(); i++) {
- prototype_maps_.Add(maps->at(i), info()->zone());
- }
- __ LoadHeapObject(prototype_reg,
- prototypes->at(prototypes->length() - 1));
- } else {
- for (int i = 0; i < prototypes->length(); i++) {
- __ LoadHeapObject(prototype_reg, prototypes->at(i));
- __ ldr(map_reg, FieldMemOperand(prototype_reg, HeapObject::kMapOffset));
- DoCheckMapCommon(map_reg,
- maps->at(i),
- ALLOW_ELEMENT_TRANSITION_MAPS,
- instr->environment());
- }
- }
-}
-
-
-void LCodeGen::DoAllocateObject(LAllocateObject* instr) {
- class DeferredAllocateObject: public LDeferredCode {
- public:
- DeferredAllocateObject(LCodeGen* codegen, LAllocateObject* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredAllocateObject(instr_); }
- virtual LInstruction* instr() { return instr_; }
- private:
- LAllocateObject* instr_;
- };
-
- DeferredAllocateObject* deferred =
- new(zone()) DeferredAllocateObject(this, instr);
-
- Register result = ToRegister(instr->result());
- Register scratch = ToRegister(instr->temp());
- Register scratch2 = ToRegister(instr->temp2());
- Handle<JSFunction> constructor = instr->hydrogen()->constructor();
- Handle<Map> initial_map(constructor->initial_map());
- int instance_size = initial_map->instance_size();
- ASSERT(initial_map->pre_allocated_property_fields() +
- initial_map->unused_property_fields() -
- initial_map->inobject_properties() == 0);
-
- // Allocate memory for the object. The initial map might change when
- // the constructor's prototype changes, but instance size and property
- // counts remain unchanged (if slack tracking finished).
- ASSERT(!constructor->shared()->IsInobjectSlackTrackingInProgress());
- __ AllocateInNewSpace(instance_size,
- result,
- scratch,
- scratch2,
- deferred->entry(),
- TAG_OBJECT);
-
- __ bind(deferred->exit());
- if (FLAG_debug_code) {
- Label is_in_new_space;
- __ JumpIfInNewSpace(result, scratch, &is_in_new_space);
- __ Abort("Allocated object is not in new-space");
- __ bind(&is_in_new_space);
- }
-
- // Load the initial map.
- Register map = scratch;
- __ LoadHeapObject(map, constructor);
- __ ldr(map, FieldMemOperand(map, JSFunction::kPrototypeOrInitialMapOffset));
-
- // Initialize map and fields of the newly allocated object.
- ASSERT(initial_map->instance_type() == JS_OBJECT_TYPE);
- __ str(map, FieldMemOperand(result, JSObject::kMapOffset));
- __ LoadRoot(scratch, Heap::kEmptyFixedArrayRootIndex);
- __ str(scratch, FieldMemOperand(result, JSObject::kElementsOffset));
- __ str(scratch, FieldMemOperand(result, JSObject::kPropertiesOffset));
- if (initial_map->inobject_properties() != 0) {
- __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
- for (int i = 0; i < initial_map->inobject_properties(); i++) {
- int property_offset = JSObject::kHeaderSize + i * kPointerSize;
- __ str(scratch, FieldMemOperand(result, property_offset));
- }
- }
-}
-
-
-void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) {
- Register result = ToRegister(instr->result());
- Handle<JSFunction> constructor = instr->hydrogen()->constructor();
- Handle<Map> initial_map(constructor->initial_map());
- int instance_size = initial_map->instance_size();
-
- // TODO(3095996): Get rid of this. For now, we need to make the
- // result register contain a valid pointer because it is already
- // contained in the register pointer map.
- __ mov(result, Operand::Zero());
-
- PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
- __ mov(r0, Operand(Smi::FromInt(instance_size)));
- __ push(r0);
- CallRuntimeFromDeferred(Runtime::kAllocateInNewSpace, 1, instr);
- __ StoreToSafepointRegisterSlot(r0, result);
-}
-
-
-void LCodeGen::DoAllocate(LAllocate* instr) {
- class DeferredAllocate: public LDeferredCode {
- public:
- DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredAllocate(instr_); }
- virtual LInstruction* instr() { return instr_; }
- private:
- LAllocate* instr_;
- };
-
- DeferredAllocate* deferred =
- new(zone()) DeferredAllocate(this, instr);
-
- Register result = ToRegister(instr->result());
- Register scratch = ToRegister(instr->temp1());
- Register scratch2 = ToRegister(instr->temp2());
-
- // Allocate memory for the object.
- AllocationFlags flags = TAG_OBJECT;
- if (instr->hydrogen()->MustAllocateDoubleAligned()) {
- flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
- }
- if (instr->size()->IsConstantOperand()) {
- int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
- __ AllocateInNewSpace(size,
- result,
- scratch,
- scratch2,
- deferred->entry(),
- flags);
- } else {
- Register size = ToRegister(instr->size());
- __ AllocateInNewSpace(size,
- result,
- scratch,
- scratch2,
- deferred->entry(),
- flags);
- }
-
- __ bind(deferred->exit());
-}
-
-
-void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
- Register size = ToRegister(instr->size());
- Register result = ToRegister(instr->result());
-
- // TODO(3095996): Get rid of this. For now, we need to make the
- // result register contain a valid pointer because it is already
- // contained in the register pointer map.
- __ mov(result, Operand(Smi::FromInt(0)));
-
- PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
- __ SmiTag(size, size);
- __ push(size);
- CallRuntimeFromDeferred(Runtime::kAllocateInNewSpace, 1, instr);
- __ StoreToSafepointRegisterSlot(r0, result);
-}
-
-
-void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
- Handle<FixedArray> literals(instr->environment()->closure()->literals());
- ElementsKind boilerplate_elements_kind =
- instr->hydrogen()->boilerplate_elements_kind();
- AllocationSiteMode allocation_site_mode =
- instr->hydrogen()->allocation_site_mode();
-
- // Deopt if the array literal boilerplate ElementsKind is of a type different
- // than the expected one. The check isn't necessary if the boilerplate has
- // already been converted to TERMINAL_FAST_ELEMENTS_KIND.
- if (CanTransitionToMoreGeneralFastElementsKind(
- boilerplate_elements_kind, true)) {
- __ LoadHeapObject(r1, instr->hydrogen()->boilerplate_object());
- // Load map into r2.
- __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
- // Load the map's "bit field 2".
- __ ldrb(r2, FieldMemOperand(r2, Map::kBitField2Offset));
- // Retrieve elements_kind from bit field 2.
- __ ubfx(r2, r2, Map::kElementsKindShift, Map::kElementsKindBitCount);
- __ cmp(r2, Operand(boilerplate_elements_kind));
- DeoptimizeIf(ne, instr->environment());
- }
-
- // Set up the parameters to the stub/runtime call.
- __ LoadHeapObject(r3, literals);
- __ mov(r2, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
- // Boilerplate already exists, constant elements are never accessed.
- // Pass an empty fixed array.
- __ mov(r1, Operand(isolate()->factory()->empty_fixed_array()));
- __ Push(r3, r2, r1);
-
- // Pick the right runtime function or stub to call.
- int length = instr->hydrogen()->length();
- if (instr->hydrogen()->IsCopyOnWrite()) {
- ASSERT(instr->hydrogen()->depth() == 1);
- FastCloneShallowArrayStub::Mode mode =
- FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS;
- FastCloneShallowArrayStub stub(mode, DONT_TRACK_ALLOCATION_SITE, length);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- } else if (instr->hydrogen()->depth() > 1) {
- CallRuntime(Runtime::kCreateArrayLiteral, 3, instr);
- } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
- CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr);
- } else {
- FastCloneShallowArrayStub::Mode mode =
- boilerplate_elements_kind == FAST_DOUBLE_ELEMENTS
- ? FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
- : FastCloneShallowArrayStub::CLONE_ELEMENTS;
- FastCloneShallowArrayStub stub(mode, allocation_site_mode, length);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- }
-}
-
-
-void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
- Register result,
- Register source,
- int* offset,
- AllocationSiteMode mode) {
- ASSERT(!source.is(r2));
- ASSERT(!result.is(r2));
-
- bool create_allocation_site_info = mode == TRACK_ALLOCATION_SITE &&
- object->map()->CanTrackAllocationSite();
-
- // Only elements backing stores for non-COW arrays need to be copied.
- Handle<FixedArrayBase> elements(object->elements());
- bool has_elements = elements->length() > 0 &&
- elements->map() != isolate()->heap()->fixed_cow_array_map();
-
- // Increase the offset so that subsequent objects end up right after
- // this object and its backing store.
- int object_offset = *offset;
- int object_size = object->map()->instance_size();
- int elements_size = has_elements ? elements->Size() : 0;
- int elements_offset = *offset + object_size;
- if (create_allocation_site_info) {
- elements_offset += AllocationSiteInfo::kSize;
- *offset += AllocationSiteInfo::kSize;
- }
-
- *offset += object_size + elements_size;
-
- // Copy object header.
- ASSERT(object->properties()->length() == 0);
- int inobject_properties = object->map()->inobject_properties();
- int header_size = object_size - inobject_properties * kPointerSize;
- for (int i = 0; i < header_size; i += kPointerSize) {
- if (has_elements && i == JSObject::kElementsOffset) {
- __ add(r2, result, Operand(elements_offset));
- } else {
- __ ldr(r2, FieldMemOperand(source, i));
- }
- __ str(r2, FieldMemOperand(result, object_offset + i));
- }
-
- // Copy in-object properties.
- for (int i = 0; i < inobject_properties; i++) {
- int total_offset = object_offset + object->GetInObjectPropertyOffset(i);
- Handle<Object> value = Handle<Object>(object->InObjectPropertyAt(i),
- isolate());
- if (value->IsJSObject()) {
- Handle<JSObject> value_object = Handle<JSObject>::cast(value);
- __ add(r2, result, Operand(*offset));
- __ str(r2, FieldMemOperand(result, total_offset));
- __ LoadHeapObject(source, value_object);
- EmitDeepCopy(value_object, result, source, offset,
- DONT_TRACK_ALLOCATION_SITE);
- } else if (value->IsHeapObject()) {
- __ LoadHeapObject(r2, Handle<HeapObject>::cast(value));
- __ str(r2, FieldMemOperand(result, total_offset));
- } else {
- __ mov(r2, Operand(value));
- __ str(r2, FieldMemOperand(result, total_offset));
- }
- }
-
- // Build Allocation Site Info if desired
- if (create_allocation_site_info) {
- __ mov(r2, Operand(Handle<Map>(isolate()->heap()->
- allocation_site_info_map())));
- __ str(r2, FieldMemOperand(result, object_size));
- __ str(source, FieldMemOperand(result, object_size + kPointerSize));
- }
-
- if (has_elements) {
- // Copy elements backing store header.
- __ LoadHeapObject(source, elements);
- for (int i = 0; i < FixedArray::kHeaderSize; i += kPointerSize) {
- __ ldr(r2, FieldMemOperand(source, i));
- __ str(r2, FieldMemOperand(result, elements_offset + i));
- }
-
- // Copy elements backing store content.
- int elements_length = has_elements ? elements->length() : 0;
- if (elements->IsFixedDoubleArray()) {
- Handle<FixedDoubleArray> double_array =
- Handle<FixedDoubleArray>::cast(elements);
- for (int i = 0; i < elements_length; i++) {
- int64_t value = double_array->get_representation(i);
- // We only support little endian mode...
- int32_t value_low = static_cast<int32_t>(value & 0xFFFFFFFF);
- int32_t value_high = static_cast<int32_t>(value >> 32);
- int total_offset =
- elements_offset + FixedDoubleArray::OffsetOfElementAt(i);
- __ mov(r2, Operand(value_low));
- __ str(r2, FieldMemOperand(result, total_offset));
- __ mov(r2, Operand(value_high));
- __ str(r2, FieldMemOperand(result, total_offset + 4));
- }
- } else if (elements->IsFixedArray()) {
- Handle<FixedArray> fast_elements = Handle<FixedArray>::cast(elements);
- for (int i = 0; i < elements_length; i++) {
- int total_offset = elements_offset + FixedArray::OffsetOfElementAt(i);
- Handle<Object> value(fast_elements->get(i), isolate());
- if (value->IsJSObject()) {
- Handle<JSObject> value_object = Handle<JSObject>::cast(value);
- __ add(r2, result, Operand(*offset));
- __ str(r2, FieldMemOperand(result, total_offset));
- __ LoadHeapObject(source, value_object);
- EmitDeepCopy(value_object, result, source, offset,
- DONT_TRACK_ALLOCATION_SITE);
- } else if (value->IsHeapObject()) {
- __ LoadHeapObject(r2, Handle<HeapObject>::cast(value));
- __ str(r2, FieldMemOperand(result, total_offset));
- } else {
- __ mov(r2, Operand(value));
- __ str(r2, FieldMemOperand(result, total_offset));
- }
- }
- } else {
- UNREACHABLE();
- }
- }
-}
-
-
-void LCodeGen::DoFastLiteral(LFastLiteral* instr) {
- int size = instr->hydrogen()->total_size();
- ElementsKind boilerplate_elements_kind =
- instr->hydrogen()->boilerplate()->GetElementsKind();
-
- // Deopt if the array literal boilerplate ElementsKind is of a type different
- // than the expected one. The check isn't necessary if the boilerplate has
- // already been converted to TERMINAL_FAST_ELEMENTS_KIND.
- if (CanTransitionToMoreGeneralFastElementsKind(
- boilerplate_elements_kind, true)) {
- __ LoadHeapObject(r1, instr->hydrogen()->boilerplate());
- // Load map into r2.
- __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
- // Load the map's "bit field 2".
- __ ldrb(r2, FieldMemOperand(r2, Map::kBitField2Offset));
- // Retrieve elements_kind from bit field 2.
- __ ubfx(r2, r2, Map::kElementsKindShift, Map::kElementsKindBitCount);
- __ cmp(r2, Operand(boilerplate_elements_kind));
- DeoptimizeIf(ne, instr->environment());
- }
-
- // Allocate all objects that are part of the literal in one big
- // allocation. This avoids multiple limit checks.
- Label allocated, runtime_allocate;
- __ AllocateInNewSpace(size, r0, r2, r3, &runtime_allocate, TAG_OBJECT);
- __ jmp(&allocated);
-
- __ bind(&runtime_allocate);
- __ mov(r0, Operand(Smi::FromInt(size)));
- __ push(r0);
- CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
-
- __ bind(&allocated);
- int offset = 0;
- __ LoadHeapObject(r1, instr->hydrogen()->boilerplate());
- EmitDeepCopy(instr->hydrogen()->boilerplate(), r0, r1, &offset,
- instr->hydrogen()->allocation_site_mode());
- ASSERT_EQ(size, offset);
-}
-
-
-void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
- Handle<FixedArray> literals(instr->environment()->closure()->literals());
- Handle<FixedArray> constant_properties =
- instr->hydrogen()->constant_properties();
-
- // Set up the parameters to the stub/runtime call.
- __ LoadHeapObject(r3, literals);
- __ mov(r2, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
- __ mov(r1, Operand(constant_properties));
- int flags = instr->hydrogen()->fast_elements()
- ? ObjectLiteral::kFastElements
- : ObjectLiteral::kNoFlags;
- __ mov(r0, Operand(Smi::FromInt(flags)));
-
- // Pick the right runtime function or stub to call.
- int properties_count = constant_properties->length() / 2;
- if (instr->hydrogen()->depth() > 1) {
- __ Push(r3, r2, r1, r0);
- CallRuntime(Runtime::kCreateObjectLiteral, 4, instr);
- } else if (flags != ObjectLiteral::kFastElements ||
- properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
- __ Push(r3, r2, r1, r0);
- CallRuntime(Runtime::kCreateObjectLiteralShallow, 4, instr);
- } else {
- FastCloneShallowObjectStub stub(properties_count);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- }
-}
-
-
-void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
- ASSERT(ToRegister(instr->value()).is(r0));
- __ push(r0);
- CallRuntime(Runtime::kToFastProperties, 1, instr);
-}
-
-
-void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
- Label materialized;
- // Registers will be used as follows:
- // r7 = literals array.
- // r1 = regexp literal.
- // r0 = regexp literal clone.
- // r2 and r4-r6 are used as temporaries.
- int literal_offset =
- FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
- __ LoadHeapObject(r7, instr->hydrogen()->literals());
- __ ldr(r1, FieldMemOperand(r7, literal_offset));
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ cmp(r1, ip);
- __ b(ne, &materialized);
-
- // Create regexp literal using runtime function
- // Result will be in r0.
- __ mov(r6, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
- __ mov(r5, Operand(instr->hydrogen()->pattern()));
- __ mov(r4, Operand(instr->hydrogen()->flags()));
- __ Push(r7, r6, r5, r4);
- CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
- __ mov(r1, r0);
-
- __ bind(&materialized);
- int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
- Label allocated, runtime_allocate;
-
- __ AllocateInNewSpace(size, r0, r2, r3, &runtime_allocate, TAG_OBJECT);
- __ jmp(&allocated);
-
- __ bind(&runtime_allocate);
- __ mov(r0, Operand(Smi::FromInt(size)));
- __ Push(r1, r0);
- CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
- __ pop(r1);
-
- __ bind(&allocated);
- // Copy the content into the newly allocated memory.
- // (Unroll copy loop once for better throughput).
- for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
- __ ldr(r3, FieldMemOperand(r1, i));
- __ ldr(r2, FieldMemOperand(r1, i + kPointerSize));
- __ str(r3, FieldMemOperand(r0, i));
- __ str(r2, FieldMemOperand(r0, i + kPointerSize));
- }
- if ((size % (2 * kPointerSize)) != 0) {
- __ ldr(r3, FieldMemOperand(r1, size - kPointerSize));
- __ str(r3, FieldMemOperand(r0, size - kPointerSize));
- }
-}
-
-
-void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
- // Use the fast case closure allocation code that allocates in new
- // space for nested functions that don't need literals cloning.
- Handle<SharedFunctionInfo> shared_info = instr->shared_info();
- bool pretenure = instr->hydrogen()->pretenure();
- if (!pretenure && shared_info->num_literals() == 0) {
- FastNewClosureStub stub(shared_info->language_mode());
- __ mov(r1, Operand(shared_info));
- __ push(r1);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- } else {
- __ mov(r2, Operand(shared_info));
- __ mov(r1, Operand(pretenure
- ? factory()->true_value()
- : factory()->false_value()));
- __ Push(cp, r2, r1);
- CallRuntime(Runtime::kNewClosure, 3, instr);
- }
-}
-
-
-void LCodeGen::DoTypeof(LTypeof* instr) {
- Register input = ToRegister(instr->value());
- __ push(input);
- CallRuntime(Runtime::kTypeof, 1, instr);
-}
-
-
-void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
- Register input = ToRegister(instr->value());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
- Label* true_label = chunk_->GetAssemblyLabel(true_block);
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
-
- Condition final_branch_condition = EmitTypeofIs(true_label,
- false_label,
- input,
- instr->type_literal());
- if (final_branch_condition != kNoCondition) {
- EmitBranch(true_block, false_block, final_branch_condition);
- }
-}
-
-
-Condition LCodeGen::EmitTypeofIs(Label* true_label,
- Label* false_label,
- Register input,
- Handle<String> type_name) {
- Condition final_branch_condition = kNoCondition;
- Register scratch = scratch0();
- if (type_name->Equals(heap()->number_string())) {
- __ JumpIfSmi(input, true_label);
- __ ldr(input, FieldMemOperand(input, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
- __ cmp(input, Operand(ip));
- final_branch_condition = eq;
-
- } else if (type_name->Equals(heap()->string_string())) {
- __ JumpIfSmi(input, false_label);
- __ CompareObjectType(input, input, scratch, FIRST_NONSTRING_TYPE);
- __ b(ge, false_label);
- __ ldrb(ip, FieldMemOperand(input, Map::kBitFieldOffset));
- __ tst(ip, Operand(1 << Map::kIsUndetectable));
- final_branch_condition = eq;
-
- } else if (type_name->Equals(heap()->boolean_string())) {
- __ CompareRoot(input, Heap::kTrueValueRootIndex);
- __ b(eq, true_label);
- __ CompareRoot(input, Heap::kFalseValueRootIndex);
- final_branch_condition = eq;
-
- } else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_string())) {
- __ CompareRoot(input, Heap::kNullValueRootIndex);
- final_branch_condition = eq;
-
- } else if (type_name->Equals(heap()->undefined_string())) {
- __ CompareRoot(input, Heap::kUndefinedValueRootIndex);
- __ b(eq, true_label);
- __ JumpIfSmi(input, false_label);
- // Check for undetectable objects => true.
- __ ldr(input, FieldMemOperand(input, HeapObject::kMapOffset));
- __ ldrb(ip, FieldMemOperand(input, Map::kBitFieldOffset));
- __ tst(ip, Operand(1 << Map::kIsUndetectable));
- final_branch_condition = ne;
-
- } else if (type_name->Equals(heap()->function_string())) {
- STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
- __ JumpIfSmi(input, false_label);
- __ CompareObjectType(input, scratch, input, JS_FUNCTION_TYPE);
- __ b(eq, true_label);
- __ cmp(input, Operand(JS_FUNCTION_PROXY_TYPE));
- final_branch_condition = eq;
-
- } else if (type_name->Equals(heap()->object_string())) {
- __ JumpIfSmi(input, false_label);
- if (!FLAG_harmony_typeof) {
- __ CompareRoot(input, Heap::kNullValueRootIndex);
- __ b(eq, true_label);
- }
- if (FLAG_harmony_symbols) {
- __ CompareObjectType(input, input, scratch, SYMBOL_TYPE);
- __ b(eq, true_label);
- __ CompareInstanceType(input, scratch,
- FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
- } else {
- __ CompareObjectType(input, input, scratch,
- FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
- }
- __ b(lt, false_label);
- __ CompareInstanceType(input, scratch, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
- __ b(gt, false_label);
- // Check for undetectable objects => false.
- __ ldrb(ip, FieldMemOperand(input, Map::kBitFieldOffset));
- __ tst(ip, Operand(1 << Map::kIsUndetectable));
- final_branch_condition = eq;
-
- } else {
- __ b(false_label);
- }
-
- return final_branch_condition;
-}
-
-
-void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
- Register temp1 = ToRegister(instr->temp());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- EmitIsConstructCall(temp1, scratch0());
- EmitBranch(true_block, false_block, eq);
-}
-
-
-void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) {
- ASSERT(!temp1.is(temp2));
- // Get the frame pointer for the calling frame.
- __ ldr(temp1, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
-
- // Skip the arguments adaptor frame if it exists.
- Label check_frame_marker;
- __ ldr(temp2, MemOperand(temp1, StandardFrameConstants::kContextOffset));
- __ cmp(temp2, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ b(ne, &check_frame_marker);
- __ ldr(temp1, MemOperand(temp1, StandardFrameConstants::kCallerFPOffset));
-
- // Check the marker in the calling frame.
- __ bind(&check_frame_marker);
- __ ldr(temp1, MemOperand(temp1, StandardFrameConstants::kMarkerOffset));
- __ cmp(temp1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
-}
-
-
-void LCodeGen::EnsureSpaceForLazyDeopt() {
- if (info()->IsStub()) return;
- // Ensure that we have enough space after the previous lazy-bailout
- // instruction for patching the code here.
- int current_pc = masm()->pc_offset();
- int patch_size = Deoptimizer::patch_size();
- if (current_pc < last_lazy_deopt_pc_ + patch_size) {
- // Block literal pool emission for duration of padding.
- Assembler::BlockConstPoolScope block_const_pool(masm());
- int padding_size = last_lazy_deopt_pc_ + patch_size - current_pc;
- ASSERT_EQ(0, padding_size % Assembler::kInstrSize);
- while (padding_size > 0) {
- __ nop();
- padding_size -= Assembler::kInstrSize;
- }
- }
- last_lazy_deopt_pc_ = masm()->pc_offset();
-}
-
-
-void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
- EnsureSpaceForLazyDeopt();
- ASSERT(instr->HasEnvironment());
- LEnvironment* env = instr->environment();
- RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
- safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
-}
-
-
-void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
- DeoptimizeIf(al, instr->environment());
-}
-
-
-void LCodeGen::DoDummyUse(LDummyUse* instr) {
- // Nothing to see here, move on!
-}
-
-
-void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) {
- Register object = ToRegister(instr->object());
- Register key = ToRegister(instr->key());
- Register strict = scratch0();
- __ mov(strict, Operand(Smi::FromInt(strict_mode_flag())));
- __ Push(object, key, strict);
- ASSERT(instr->HasPointerMap());
- LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
- SafepointGenerator safepoint_generator(
- this, pointers, Safepoint::kLazyDeopt);
- __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, safepoint_generator);
-}
-
-
-void LCodeGen::DoIn(LIn* instr) {
- Register obj = ToRegister(instr->object());
- Register key = ToRegister(instr->key());
- __ Push(key, obj);
- ASSERT(instr->HasPointerMap());
- LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
- SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
- __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION, safepoint_generator);
-}
-
-
-void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
- PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
- __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
- RecordSafepointWithLazyDeopt(
- instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
- ASSERT(instr->HasEnvironment());
- LEnvironment* env = instr->environment();
- safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
-}
-
-
-void LCodeGen::DoStackCheck(LStackCheck* instr) {
- class DeferredStackCheck: public LDeferredCode {
- public:
- DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
- virtual LInstruction* instr() { return instr_; }
- private:
- LStackCheck* instr_;
- };
-
- ASSERT(instr->HasEnvironment());
- LEnvironment* env = instr->environment();
- // There is no LLazyBailout instruction for stack-checks. We have to
- // prepare for lazy deoptimization explicitly here.
- if (instr->hydrogen()->is_function_entry()) {
- // Perform stack overflow check.
- Label done;
- __ LoadRoot(ip, Heap::kStackLimitRootIndex);
- __ cmp(sp, Operand(ip));
- __ b(hs, &done);
- StackCheckStub stub;
- PredictableCodeSizeScope predictable(masm_, 2 * Assembler::kInstrSize);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- EnsureSpaceForLazyDeopt();
- __ bind(&done);
- RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
- safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
- } else {
- ASSERT(instr->hydrogen()->is_backwards_branch());
- // Perform stack overflow check if this goto needs it before jumping.
- DeferredStackCheck* deferred_stack_check =
- new(zone()) DeferredStackCheck(this, instr);
- __ LoadRoot(ip, Heap::kStackLimitRootIndex);
- __ cmp(sp, Operand(ip));
- __ b(lo, deferred_stack_check->entry());
- EnsureSpaceForLazyDeopt();
- __ bind(instr->done_label());
- deferred_stack_check->SetExit(instr->done_label());
- RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
- // Don't record a deoptimization index for the safepoint here.
- // This will be done explicitly when emitting call and the safepoint in
- // the deferred code.
- }
-}
-
-
-void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
- // This is a pseudo-instruction that ensures that the environment here is
- // properly registered for deoptimization and records the assembler's PC
- // offset.
- LEnvironment* environment = instr->environment();
- environment->SetSpilledRegisters(instr->SpilledRegisterArray(),
- instr->SpilledDoubleRegisterArray());
-
- // If the environment were already registered, we would have no way of
- // backpatching it with the spill slot operands.
- ASSERT(!environment->HasBeenRegistered());
- RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
- ASSERT(osr_pc_offset_ == -1);
- osr_pc_offset_ = masm()->pc_offset();
-}
-
-
-void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ cmp(r0, ip);
- DeoptimizeIf(eq, instr->environment());
-
- Register null_value = r5;
- __ LoadRoot(null_value, Heap::kNullValueRootIndex);
- __ cmp(r0, null_value);
- DeoptimizeIf(eq, instr->environment());
-
- __ tst(r0, Operand(kSmiTagMask));
- DeoptimizeIf(eq, instr->environment());
-
- STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
- __ CompareObjectType(r0, r1, r1, LAST_JS_PROXY_TYPE);
- DeoptimizeIf(le, instr->environment());
-
- Label use_cache, call_runtime;
- __ CheckEnumCache(null_value, &call_runtime);
-
- __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ b(&use_cache);
-
- // Get the set of properties to enumerate.
- __ bind(&call_runtime);
- __ push(r0);
- CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
-
- __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kMetaMapRootIndex);
- __ cmp(r1, ip);
- DeoptimizeIf(ne, instr->environment());
- __ bind(&use_cache);
-}
-
-
-void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
- Register map = ToRegister(instr->map());
- Register result = ToRegister(instr->result());
- Label load_cache, done;
- __ EnumLength(result, map);
- __ cmp(result, Operand(Smi::FromInt(0)));
- __ b(ne, &load_cache);
- __ mov(result, Operand(isolate()->factory()->empty_fixed_array()));
- __ jmp(&done);
-
- __ bind(&load_cache);
- __ LoadInstanceDescriptors(map, result);
- __ ldr(result,
- FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
- __ ldr(result,
- FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
- __ cmp(result, Operand::Zero());
- DeoptimizeIf(eq, instr->environment());
-
- __ bind(&done);
-}
-
-
-void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
- Register object = ToRegister(instr->value());
- Register map = ToRegister(instr->map());
- __ ldr(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
- __ cmp(map, scratch0());
- DeoptimizeIf(ne, instr->environment());
-}
-
-
-void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
- Register object = ToRegister(instr->object());
- Register index = ToRegister(instr->index());
- Register result = ToRegister(instr->result());
- Register scratch = scratch0();
-
- Label out_of_object, done;
- __ cmp(index, Operand::Zero());
- __ b(lt, &out_of_object);
-
- STATIC_ASSERT(kPointerSizeLog2 > kSmiTagSize);
- __ add(scratch, object, Operand(index, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ ldr(result, FieldMemOperand(scratch, JSObject::kHeaderSize));
-
- __ b(&done);
-
- __ bind(&out_of_object);
- __ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
- // Index is equal to negated out of object property index plus 1.
- __ sub(scratch, result, Operand(index, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ ldr(result, FieldMemOperand(scratch,
- FixedArray::kHeaderSize - kPointerSize));
- __ bind(&done);
-}
-
-
-#undef __
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/arm/lithium-codegen-arm.h b/src/3rdparty/v8/src/arm/lithium-codegen-arm.h
deleted file mode 100644
index f1e3332..0000000
--- a/src/3rdparty/v8/src/arm/lithium-codegen-arm.h
+++ /dev/null
@@ -1,513 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_ARM_LITHIUM_CODEGEN_ARM_H_
-#define V8_ARM_LITHIUM_CODEGEN_ARM_H_
-
-#include "arm/lithium-arm.h"
-#include "arm/lithium-gap-resolver-arm.h"
-#include "deoptimizer.h"
-#include "safepoint-table.h"
-#include "scopes.h"
-
-namespace v8 {
-namespace internal {
-
-// Forward declarations.
-class LDeferredCode;
-class SafepointGenerator;
-
-class LCodeGen BASE_EMBEDDED {
- public:
- LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
- : zone_(info->zone()),
- chunk_(static_cast<LPlatformChunk*>(chunk)),
- masm_(assembler),
- info_(info),
- current_block_(-1),
- current_instruction_(-1),
- instructions_(chunk->instructions()),
- deoptimizations_(4, info->zone()),
- deopt_jump_table_(4, info->zone()),
- deoptimization_literals_(8, info->zone()),
- prototype_maps_(0, info->zone()),
- inlined_function_count_(0),
- scope_(info->scope()),
- status_(UNUSED),
- translations_(info->zone()),
- deferred_(8, info->zone()),
- osr_pc_offset_(-1),
- last_lazy_deopt_pc_(0),
- frame_is_built_(false),
- safepoints_(info->zone()),
- resolver_(this),
- expected_safepoint_kind_(Safepoint::kSimple) {
- PopulateDeoptimizationLiteralsWithInlinedFunctions();
- }
-
-
- // Simple accessors.
- MacroAssembler* masm() const { return masm_; }
- CompilationInfo* info() const { return info_; }
- Isolate* isolate() const { return info_->isolate(); }
- Factory* factory() const { return isolate()->factory(); }
- Heap* heap() const { return isolate()->heap(); }
- Zone* zone() const { return zone_; }
-
- bool NeedsEagerFrame() const {
- return GetStackSlotCount() > 0 ||
- info()->is_non_deferred_calling() ||
- !info()->IsStub();
- }
- bool NeedsDeferredFrame() const {
- return !NeedsEagerFrame() && info()->is_deferred_calling();
- }
-
- // Support for converting LOperands to assembler types.
- // LOperand must be a register.
- Register ToRegister(LOperand* op) const;
-
- // LOperand is loaded into scratch, unless already a register.
- Register EmitLoadRegister(LOperand* op, Register scratch);
-
- // LOperand must be a double register.
- DwVfpRegister ToDoubleRegister(LOperand* op) const;
-
- // LOperand is loaded into dbl_scratch, unless already a double register.
- DwVfpRegister EmitLoadDoubleRegister(LOperand* op,
- SwVfpRegister flt_scratch,
- DwVfpRegister dbl_scratch);
- int ToInteger32(LConstantOperand* op) const;
- double ToDouble(LConstantOperand* op) const;
- Operand ToOperand(LOperand* op);
- MemOperand ToMemOperand(LOperand* op) const;
- // Returns a MemOperand pointing to the high word of a DoubleStackSlot.
- MemOperand ToHighMemOperand(LOperand* op) const;
-
- bool IsInteger32(LConstantOperand* op) const;
- Handle<Object> ToHandle(LConstantOperand* op) const;
-
- // Try to generate code for the entire chunk, but it may fail if the
- // chunk contains constructs we cannot handle. Returns true if the
- // code generation attempt succeeded.
- bool GenerateCode();
-
- // Finish the code by setting stack height, safepoint, and bailout
- // information on it.
- void FinishCode(Handle<Code> code);
-
- // Deferred code support.
- void DoDeferredBinaryOpStub(LPointerMap* pointer_map,
- LOperand* left_argument,
- LOperand* right_argument,
- Token::Value op);
- void DoDeferredNumberTagD(LNumberTagD* instr);
-
- enum IntegerSignedness { SIGNED_INT32, UNSIGNED_INT32 };
- void DoDeferredNumberTagI(LInstruction* instr,
- LOperand* value,
- IntegerSignedness signedness);
-
- void DoDeferredTaggedToI(LTaggedToI* instr);
- void DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr);
- void DoDeferredStackCheck(LStackCheck* instr);
- void DoDeferredRandom(LRandom* instr);
- void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
- void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
- void DoDeferredAllocateObject(LAllocateObject* instr);
- void DoDeferredAllocate(LAllocate* instr);
- void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
- Label* map_check);
-
- void DoCheckMapCommon(Register map_reg, Handle<Map> map,
- CompareMapMode mode, LEnvironment* env);
-
- // Parallel move support.
- void DoParallelMove(LParallelMove* move);
- void DoGap(LGap* instr);
-
- MemOperand PrepareKeyedOperand(Register key,
- Register base,
- bool key_is_constant,
- int constant_key,
- int element_size,
- int shift_size,
- int additional_index,
- int additional_offset);
-
- // Emit frame translation commands for an environment.
- void WriteTranslation(LEnvironment* environment,
- Translation* translation,
- int* arguments_index,
- int* arguments_count);
-
- // Declare methods that deal with the individual node types.
-#define DECLARE_DO(type) void Do##type(L##type* node);
- LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
-#undef DECLARE_DO
-
- private:
- enum Status {
- UNUSED,
- GENERATING,
- DONE,
- ABORTED
- };
-
- bool is_unused() const { return status_ == UNUSED; }
- bool is_generating() const { return status_ == GENERATING; }
- bool is_done() const { return status_ == DONE; }
- bool is_aborted() const { return status_ == ABORTED; }
-
- StrictModeFlag strict_mode_flag() const {
- return info()->is_classic_mode() ? kNonStrictMode : kStrictMode;
- }
-
- LPlatformChunk* chunk() const { return chunk_; }
- Scope* scope() const { return scope_; }
- HGraph* graph() const { return chunk_->graph(); }
-
- Register scratch0() { return r9; }
- DwVfpRegister double_scratch0() { return kScratchDoubleReg; }
-
- int GetNextEmittedBlock(int block);
- LInstruction* GetNextInstruction();
-
- void EmitClassOfTest(Label* if_true,
- Label* if_false,
- Handle<String> class_name,
- Register input,
- Register temporary,
- Register temporary2);
-
- int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
- int GetParameterCount() const { return info()->num_parameters(); }
-
- void Abort(const char* reason);
- void Comment(const char* format, ...);
-
- void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
-
- // Code generation passes. Returns true if code generation should
- // continue.
- bool GeneratePrologue();
- bool GenerateBody();
- bool GenerateDeferredCode();
- bool GenerateDeoptJumpTable();
- bool GenerateSafepointTable();
-
- enum SafepointMode {
- RECORD_SIMPLE_SAFEPOINT,
- RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS
- };
-
- void CallCode(
- Handle<Code> code,
- RelocInfo::Mode mode,
- LInstruction* instr,
- TargetAddressStorageMode storage_mode = CAN_INLINE_TARGET_ADDRESS);
-
- void CallCodeGeneric(
- Handle<Code> code,
- RelocInfo::Mode mode,
- LInstruction* instr,
- SafepointMode safepoint_mode,
- TargetAddressStorageMode storage_mode = CAN_INLINE_TARGET_ADDRESS);
-
- void CallRuntime(const Runtime::Function* function,
- int num_arguments,
- LInstruction* instr);
-
- void CallRuntime(Runtime::FunctionId id,
- int num_arguments,
- LInstruction* instr) {
- const Runtime::Function* function = Runtime::FunctionForId(id);
- CallRuntime(function, num_arguments, instr);
- }
-
- void CallRuntimeFromDeferred(Runtime::FunctionId id,
- int argc,
- LInstruction* instr);
-
- enum R1State {
- R1_UNINITIALIZED,
- R1_CONTAINS_TARGET
- };
-
- // Generate a direct call to a known function. Expects the function
- // to be in r1.
- void CallKnownFunction(Handle<JSFunction> function,
- int arity,
- LInstruction* instr,
- CallKind call_kind,
- R1State r1_state);
-
- void LoadHeapObject(Register result, Handle<HeapObject> object);
-
- void RecordSafepointWithLazyDeopt(LInstruction* instr,
- SafepointMode safepoint_mode);
-
- void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
- Safepoint::DeoptMode mode);
- void DeoptimizeIf(Condition cc, LEnvironment* environment);
-
- void AddToTranslation(Translation* translation,
- LOperand* op,
- bool is_tagged,
- bool is_uint32,
- bool arguments_known,
- int arguments_index,
- int arguments_count);
- void RegisterDependentCodeForEmbeddedMaps(Handle<Code> code);
- void PopulateDeoptimizationData(Handle<Code> code);
- int DefineDeoptimizationLiteral(Handle<Object> literal);
-
- void PopulateDeoptimizationLiteralsWithInlinedFunctions();
-
- Register ToRegister(int index) const;
- DwVfpRegister ToDoubleRegister(int index) const;
-
- // Specific math operations - used from DoUnaryMathOperation.
- void EmitIntegerMathAbs(LUnaryMathOperation* instr);
- void DoMathAbs(LUnaryMathOperation* instr);
- void DoMathFloor(LUnaryMathOperation* instr);
- void DoMathRound(LUnaryMathOperation* instr);
- void DoMathSqrt(LUnaryMathOperation* instr);
- void DoMathPowHalf(LUnaryMathOperation* instr);
- void DoMathLog(LUnaryMathOperation* instr);
- void DoMathTan(LUnaryMathOperation* instr);
- void DoMathCos(LUnaryMathOperation* instr);
- void DoMathSin(LUnaryMathOperation* instr);
-
- // Support for recording safepoint and position information.
- void RecordSafepoint(LPointerMap* pointers,
- Safepoint::Kind kind,
- int arguments,
- Safepoint::DeoptMode mode);
- void RecordSafepoint(LPointerMap* pointers, Safepoint::DeoptMode mode);
- void RecordSafepoint(Safepoint::DeoptMode mode);
- void RecordSafepointWithRegisters(LPointerMap* pointers,
- int arguments,
- Safepoint::DeoptMode mode);
- void RecordSafepointWithRegistersAndDoubles(LPointerMap* pointers,
- int arguments,
- Safepoint::DeoptMode mode);
- void RecordPosition(int position);
-
- static Condition TokenToCondition(Token::Value op, bool is_unsigned);
- void EmitGoto(int block);
- void EmitBranch(int left_block, int right_block, Condition cc);
- void EmitNumberUntagD(Register input,
- DwVfpRegister result,
- bool deoptimize_on_undefined,
- bool deoptimize_on_minus_zero,
- LEnvironment* env,
- NumberUntagDMode mode);
-
- // Emits optimized code for typeof x == "y". Modifies input register.
- // Returns the condition on which a final split to
- // true and false label should be made, to optimize fallthrough.
- Condition EmitTypeofIs(Label* true_label,
- Label* false_label,
- Register input,
- Handle<String> type_name);
-
- // Emits optimized code for %_IsObject(x). Preserves input register.
- // Returns the condition on which a final split to
- // true and false label should be made, to optimize fallthrough.
- Condition EmitIsObject(Register input,
- Register temp1,
- Label* is_not_object,
- Label* is_object);
-
- // Emits optimized code for %_IsString(x). Preserves input register.
- // Returns the condition on which a final split to
- // true and false label should be made, to optimize fallthrough.
- Condition EmitIsString(Register input,
- Register temp1,
- Label* is_not_string);
-
- // Emits optimized code for %_IsConstructCall().
- // Caller should branch on equal condition.
- void EmitIsConstructCall(Register temp1, Register temp2);
-
- void EmitLoadFieldOrConstantFunction(Register result,
- Register object,
- Handle<Map> type,
- Handle<String> name,
- LEnvironment* env);
-
- // Emits optimized code to deep-copy the contents of statically known
- // object graphs (e.g. object literal boilerplate).
- void EmitDeepCopy(Handle<JSObject> object,
- Register result,
- Register source,
- int* offset,
- AllocationSiteMode mode);
-
- // Emit optimized code for integer division.
- // Inputs are signed.
- // All registers are clobbered.
- // If 'remainder' is no_reg, it is not computed.
- void EmitSignedIntegerDivisionByConstant(Register result,
- Register dividend,
- int32_t divisor,
- Register remainder,
- Register scratch,
- LEnvironment* environment);
-
- struct JumpTableEntry {
- inline JumpTableEntry(Address entry, bool frame, bool is_lazy)
- : label(),
- address(entry),
- needs_frame(frame),
- is_lazy_deopt(is_lazy) { }
- Label label;
- Address address;
- bool needs_frame;
- bool is_lazy_deopt;
- };
-
- void EnsureSpaceForLazyDeopt();
- void DoLoadKeyedExternalArray(LLoadKeyed* instr);
- void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr);
- void DoLoadKeyedFixedArray(LLoadKeyed* instr);
- void DoStoreKeyedExternalArray(LStoreKeyed* instr);
- void DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr);
- void DoStoreKeyedFixedArray(LStoreKeyed* instr);
-
- Zone* zone_;
- LPlatformChunk* const chunk_;
- MacroAssembler* const masm_;
- CompilationInfo* const info_;
-
- int current_block_;
- int current_instruction_;
- const ZoneList<LInstruction*>* instructions_;
- ZoneList<LEnvironment*> deoptimizations_;
- ZoneList<JumpTableEntry> deopt_jump_table_;
- ZoneList<Handle<Object> > deoptimization_literals_;
- ZoneList<Handle<Map> > prototype_maps_;
- int inlined_function_count_;
- Scope* const scope_;
- Status status_;
- TranslationBuffer translations_;
- ZoneList<LDeferredCode*> deferred_;
- int osr_pc_offset_;
- int last_lazy_deopt_pc_;
- bool frame_is_built_;
-
- // Builder that keeps track of safepoints in the code. The table
- // itself is emitted at the end of the generated code.
- SafepointTableBuilder safepoints_;
-
- // Compiler from a set of parallel moves to a sequential list of moves.
- LGapResolver resolver_;
-
- Safepoint::Kind expected_safepoint_kind_;
-
- class PushSafepointRegistersScope BASE_EMBEDDED {
- public:
- PushSafepointRegistersScope(LCodeGen* codegen,
- Safepoint::Kind kind)
- : codegen_(codegen) {
- ASSERT(codegen_->info()->is_calling());
- ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
- codegen_->expected_safepoint_kind_ = kind;
-
- switch (codegen_->expected_safepoint_kind_) {
- case Safepoint::kWithRegisters:
- codegen_->masm_->PushSafepointRegisters();
- break;
- case Safepoint::kWithRegistersAndDoubles:
- codegen_->masm_->PushSafepointRegistersAndDoubles();
- break;
- default:
- UNREACHABLE();
- }
- }
-
- ~PushSafepointRegistersScope() {
- Safepoint::Kind kind = codegen_->expected_safepoint_kind_;
- ASSERT((kind & Safepoint::kWithRegisters) != 0);
- switch (kind) {
- case Safepoint::kWithRegisters:
- codegen_->masm_->PopSafepointRegisters();
- break;
- case Safepoint::kWithRegistersAndDoubles:
- codegen_->masm_->PopSafepointRegistersAndDoubles();
- break;
- default:
- UNREACHABLE();
- }
- codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
- }
-
- private:
- LCodeGen* codegen_;
- };
-
- friend class LDeferredCode;
- friend class LEnvironment;
- friend class SafepointGenerator;
- DISALLOW_COPY_AND_ASSIGN(LCodeGen);
-};
-
-
-class LDeferredCode: public ZoneObject {
- public:
- explicit LDeferredCode(LCodeGen* codegen)
- : codegen_(codegen),
- external_exit_(NULL),
- instruction_index_(codegen->current_instruction_) {
- codegen->AddDeferredCode(this);
- }
-
- virtual ~LDeferredCode() { }
- virtual void Generate() = 0;
- virtual LInstruction* instr() = 0;
-
- void SetExit(Label* exit) { external_exit_ = exit; }
- Label* entry() { return &entry_; }
- Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; }
- int instruction_index() const { return instruction_index_; }
-
- protected:
- LCodeGen* codegen() const { return codegen_; }
- MacroAssembler* masm() const { return codegen_->masm(); }
-
- private:
- LCodeGen* codegen_;
- Label entry_;
- Label exit_;
- Label* external_exit_;
- int instruction_index_;
-};
-
-} } // namespace v8::internal
-
-#endif // V8_ARM_LITHIUM_CODEGEN_ARM_H_
diff --git a/src/3rdparty/v8/src/arm/lithium-gap-resolver-arm.cc b/src/3rdparty/v8/src/arm/lithium-gap-resolver-arm.cc
deleted file mode 100644
index 4df1338..0000000
--- a/src/3rdparty/v8/src/arm/lithium-gap-resolver-arm.cc
+++ /dev/null
@@ -1,318 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "arm/lithium-gap-resolver-arm.h"
-#include "arm/lithium-codegen-arm.h"
-
-namespace v8 {
-namespace internal {
-
-static const Register kSavedValueRegister = { 9 };
-
-LGapResolver::LGapResolver(LCodeGen* owner)
- : cgen_(owner), moves_(32, owner->zone()), root_index_(0), in_cycle_(false),
- saved_destination_(NULL) { }
-
-
-void LGapResolver::Resolve(LParallelMove* parallel_move) {
- ASSERT(moves_.is_empty());
- // Build up a worklist of moves.
- BuildInitialMoveList(parallel_move);
-
- for (int i = 0; i < moves_.length(); ++i) {
- LMoveOperands move = moves_[i];
- // Skip constants to perform them last. They don't block other moves
- // and skipping such moves with register destinations keeps those
- // registers free for the whole algorithm.
- if (!move.IsEliminated() && !move.source()->IsConstantOperand()) {
- root_index_ = i; // Any cycle is found when by reaching this move again.
- PerformMove(i);
- if (in_cycle_) {
- RestoreValue();
- }
- }
- }
-
- // Perform the moves with constant sources.
- for (int i = 0; i < moves_.length(); ++i) {
- if (!moves_[i].IsEliminated()) {
- ASSERT(moves_[i].source()->IsConstantOperand());
- EmitMove(i);
- }
- }
-
- moves_.Rewind(0);
-}
-
-
-void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) {
- // Perform a linear sweep of the moves to add them to the initial list of
- // moves to perform, ignoring any move that is redundant (the source is
- // the same as the destination, the destination is ignored and
- // unallocated, or the move was already eliminated).
- const ZoneList<LMoveOperands>* moves = parallel_move->move_operands();
- for (int i = 0; i < moves->length(); ++i) {
- LMoveOperands move = moves->at(i);
- if (!move.IsRedundant()) moves_.Add(move, cgen_->zone());
- }
- Verify();
-}
-
-
-void LGapResolver::PerformMove(int index) {
- // Each call to this function performs a move and deletes it from the move
- // graph. We first recursively perform any move blocking this one. We
- // mark a move as "pending" on entry to PerformMove in order to detect
- // cycles in the move graph.
-
- // We can only find a cycle, when doing a depth-first traversal of moves,
- // be encountering the starting move again. So by spilling the source of
- // the starting move, we break the cycle. All moves are then unblocked,
- // and the starting move is completed by writing the spilled value to
- // its destination. All other moves from the spilled source have been
- // completed prior to breaking the cycle.
- // An additional complication is that moves to MemOperands with large
- // offsets (more than 1K or 4K) require us to spill this spilled value to
- // the stack, to free up the register.
- ASSERT(!moves_[index].IsPending());
- ASSERT(!moves_[index].IsRedundant());
-
- // Clear this move's destination to indicate a pending move. The actual
- // destination is saved in a stack allocated local. Multiple moves can
- // be pending because this function is recursive.
- ASSERT(moves_[index].source() != NULL); // Or else it will look eliminated.
- LOperand* destination = moves_[index].destination();
- moves_[index].set_destination(NULL);
-
- // Perform a depth-first traversal of the move graph to resolve
- // dependencies. Any unperformed, unpending move with a source the same
- // as this one's destination blocks this one so recursively perform all
- // such moves.
- for (int i = 0; i < moves_.length(); ++i) {
- LMoveOperands other_move = moves_[i];
- if (other_move.Blocks(destination) && !other_move.IsPending()) {
- PerformMove(i);
- // If there is a blocking, pending move it must be moves_[root_index_]
- // and all other moves with the same source as moves_[root_index_] are
- // sucessfully executed (because they are cycle-free) by this loop.
- }
- }
-
- // We are about to resolve this move and don't need it marked as
- // pending, so restore its destination.
- moves_[index].set_destination(destination);
-
- // The move may be blocked on a pending move, which must be the starting move.
- // In this case, we have a cycle, and we save the source of this move to
- // a scratch register to break it.
- LMoveOperands other_move = moves_[root_index_];
- if (other_move.Blocks(destination)) {
- ASSERT(other_move.IsPending());
- BreakCycle(index);
- return;
- }
-
- // This move is no longer blocked.
- EmitMove(index);
-}
-
-
-void LGapResolver::Verify() {
-#ifdef ENABLE_SLOW_ASSERTS
- // No operand should be the destination for more than one move.
- for (int i = 0; i < moves_.length(); ++i) {
- LOperand* destination = moves_[i].destination();
- for (int j = i + 1; j < moves_.length(); ++j) {
- SLOW_ASSERT(!destination->Equals(moves_[j].destination()));
- }
- }
-#endif
-}
-
-#define __ ACCESS_MASM(cgen_->masm())
-
-void LGapResolver::BreakCycle(int index) {
- // We save in a register the value that should end up in the source of
- // moves_[root_index]. After performing all moves in the tree rooted
- // in that move, we save the value to that source.
- ASSERT(moves_[index].destination()->Equals(moves_[root_index_].source()));
- ASSERT(!in_cycle_);
- in_cycle_ = true;
- LOperand* source = moves_[index].source();
- saved_destination_ = moves_[index].destination();
- if (source->IsRegister()) {
- __ mov(kSavedValueRegister, cgen_->ToRegister(source));
- } else if (source->IsStackSlot()) {
- __ ldr(kSavedValueRegister, cgen_->ToMemOperand(source));
- } else if (source->IsDoubleRegister()) {
- CpuFeatures::Scope scope(VFP2);
- __ vmov(kScratchDoubleReg, cgen_->ToDoubleRegister(source));
- } else if (source->IsDoubleStackSlot()) {
- CpuFeatures::Scope scope(VFP2);
- __ vldr(kScratchDoubleReg, cgen_->ToMemOperand(source));
- } else {
- UNREACHABLE();
- }
- // This move will be done by restoring the saved value to the destination.
- moves_[index].Eliminate();
-}
-
-
-void LGapResolver::RestoreValue() {
- ASSERT(in_cycle_);
- ASSERT(saved_destination_ != NULL);
-
- // Spilled value is in kSavedValueRegister or kSavedDoubleValueRegister.
- if (saved_destination_->IsRegister()) {
- __ mov(cgen_->ToRegister(saved_destination_), kSavedValueRegister);
- } else if (saved_destination_->IsStackSlot()) {
- __ str(kSavedValueRegister, cgen_->ToMemOperand(saved_destination_));
- } else if (saved_destination_->IsDoubleRegister()) {
- CpuFeatures::Scope scope(VFP2);
- __ vmov(cgen_->ToDoubleRegister(saved_destination_), kScratchDoubleReg);
- } else if (saved_destination_->IsDoubleStackSlot()) {
- CpuFeatures::Scope scope(VFP2);
- __ vstr(kScratchDoubleReg, cgen_->ToMemOperand(saved_destination_));
- } else {
- UNREACHABLE();
- }
-
- in_cycle_ = false;
- saved_destination_ = NULL;
-}
-
-
-void LGapResolver::EmitMove(int index) {
- LOperand* source = moves_[index].source();
- LOperand* destination = moves_[index].destination();
-
- // Dispatch on the source and destination operand kinds. Not all
- // combinations are possible.
-
- if (source->IsRegister()) {
- Register source_register = cgen_->ToRegister(source);
- if (destination->IsRegister()) {
- __ mov(cgen_->ToRegister(destination), source_register);
- } else {
- ASSERT(destination->IsStackSlot());
- __ str(source_register, cgen_->ToMemOperand(destination));
- }
-
- } else if (source->IsStackSlot()) {
- MemOperand source_operand = cgen_->ToMemOperand(source);
- if (destination->IsRegister()) {
- __ ldr(cgen_->ToRegister(destination), source_operand);
- } else {
- ASSERT(destination->IsStackSlot());
- MemOperand destination_operand = cgen_->ToMemOperand(destination);
- if (in_cycle_) {
- if (!destination_operand.OffsetIsUint12Encodable()) {
- CpuFeatures::Scope scope(VFP2);
- // ip is overwritten while saving the value to the destination.
- // Therefore we can't use ip. It is OK if the read from the source
- // destroys ip, since that happens before the value is read.
- __ vldr(kScratchDoubleReg.low(), source_operand);
- __ vstr(kScratchDoubleReg.low(), destination_operand);
- } else {
- __ ldr(ip, source_operand);
- __ str(ip, destination_operand);
- }
- } else {
- __ ldr(kSavedValueRegister, source_operand);
- __ str(kSavedValueRegister, destination_operand);
- }
- }
-
- } else if (source->IsConstantOperand()) {
- LConstantOperand* constant_source = LConstantOperand::cast(source);
- if (destination->IsRegister()) {
- Register dst = cgen_->ToRegister(destination);
- if (cgen_->IsInteger32(constant_source)) {
- __ mov(dst, Operand(cgen_->ToInteger32(constant_source)));
- } else {
- __ LoadObject(dst, cgen_->ToHandle(constant_source));
- }
- } else {
- ASSERT(destination->IsStackSlot());
- ASSERT(!in_cycle_); // Constant moves happen after all cycles are gone.
- if (cgen_->IsInteger32(constant_source)) {
- __ mov(kSavedValueRegister,
- Operand(cgen_->ToInteger32(constant_source)));
- } else {
- __ LoadObject(kSavedValueRegister,
- cgen_->ToHandle(constant_source));
- }
- __ str(kSavedValueRegister, cgen_->ToMemOperand(destination));
- }
-
- } else if (source->IsDoubleRegister()) {
- CpuFeatures::Scope scope(VFP2);
- DwVfpRegister source_register = cgen_->ToDoubleRegister(source);
- if (destination->IsDoubleRegister()) {
- __ vmov(cgen_->ToDoubleRegister(destination), source_register);
- } else {
- ASSERT(destination->IsDoubleStackSlot());
- __ vstr(source_register, cgen_->ToMemOperand(destination));
- }
-
- } else if (source->IsDoubleStackSlot()) {
- CpuFeatures::Scope scope(VFP2);
- MemOperand source_operand = cgen_->ToMemOperand(source);
- if (destination->IsDoubleRegister()) {
- __ vldr(cgen_->ToDoubleRegister(destination), source_operand);
- } else {
- ASSERT(destination->IsDoubleStackSlot());
- MemOperand destination_operand = cgen_->ToMemOperand(destination);
- if (in_cycle_) {
- // kSavedDoubleValueRegister was used to break the cycle,
- // but kSavedValueRegister is free.
- MemOperand source_high_operand =
- cgen_->ToHighMemOperand(source);
- MemOperand destination_high_operand =
- cgen_->ToHighMemOperand(destination);
- __ ldr(kSavedValueRegister, source_operand);
- __ str(kSavedValueRegister, destination_operand);
- __ ldr(kSavedValueRegister, source_high_operand);
- __ str(kSavedValueRegister, destination_high_operand);
- } else {
- __ vldr(kScratchDoubleReg, source_operand);
- __ vstr(kScratchDoubleReg, destination_operand);
- }
- }
- } else {
- UNREACHABLE();
- }
-
- moves_[index].Eliminate();
-}
-
-
-#undef __
-
-} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/arm/lithium-gap-resolver-arm.h b/src/3rdparty/v8/src/arm/lithium-gap-resolver-arm.h
deleted file mode 100644
index 9dd09c8..0000000
--- a/src/3rdparty/v8/src/arm/lithium-gap-resolver-arm.h
+++ /dev/null
@@ -1,83 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_ARM_LITHIUM_GAP_RESOLVER_ARM_H_
-#define V8_ARM_LITHIUM_GAP_RESOLVER_ARM_H_
-
-#include "v8.h"
-
-#include "lithium.h"
-
-namespace v8 {
-namespace internal {
-
-class LCodeGen;
-class LGapResolver;
-
-class LGapResolver BASE_EMBEDDED {
- public:
- explicit LGapResolver(LCodeGen* owner);
-
- // Resolve a set of parallel moves, emitting assembler instructions.
- void Resolve(LParallelMove* parallel_move);
-
- private:
- // Build the initial list of moves.
- void BuildInitialMoveList(LParallelMove* parallel_move);
-
- // Perform the move at the moves_ index in question (possibly requiring
- // other moves to satisfy dependencies).
- void PerformMove(int index);
-
- // If a cycle is found in the series of moves, save the blocking value to
- // a scratch register. The cycle must be found by hitting the root of the
- // depth-first search.
- void BreakCycle(int index);
-
- // After a cycle has been resolved, restore the value from the scratch
- // register to its proper destination.
- void RestoreValue();
-
- // Emit a move and remove it from the move graph.
- void EmitMove(int index);
-
- // Verify the move list before performing moves.
- void Verify();
-
- LCodeGen* cgen_;
-
- // List of moves not yet resolved.
- ZoneList<LMoveOperands> moves_;
-
- int root_index_;
- bool in_cycle_;
- LOperand* saved_destination_;
-};
-
-} } // namespace v8::internal
-
-#endif // V8_ARM_LITHIUM_GAP_RESOLVER_ARM_H_
diff --git a/src/3rdparty/v8/src/arm/macro-assembler-arm.cc b/src/3rdparty/v8/src/arm/macro-assembler-arm.cc
deleted file mode 100644
index 326f555..0000000
--- a/src/3rdparty/v8/src/arm/macro-assembler-arm.cc
+++ /dev/null
@@ -1,4012 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include <limits.h> // For LONG_MIN, LONG_MAX.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_ARM)
-
-#include "bootstrapper.h"
-#include "codegen.h"
-#include "debug.h"
-#include "runtime.h"
-
-namespace v8 {
-namespace internal {
-
-MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
- : Assembler(arg_isolate, buffer, size),
- generating_stub_(false),
- allow_stub_calls_(true),
- has_frame_(false) {
- if (isolate() != NULL) {
- code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
- isolate());
- }
-}
-
-
-// We always generate arm code, never thumb code, even if V8 is compiled to
-// thumb, so we require inter-working support
-#if defined(__thumb__) && !defined(USE_THUMB_INTERWORK)
-#error "flag -mthumb-interwork missing"
-#endif
-
-
-// We do not support thumb inter-working with an arm architecture not supporting
-// the blx instruction (below v5t). If you know what CPU you are compiling for
-// you can use -march=armv7 or similar.
-#if defined(USE_THUMB_INTERWORK) && !defined(CAN_USE_THUMB_INSTRUCTIONS)
-# error "For thumb inter-working we require an architecture which supports blx"
-#endif
-
-
-// Using bx does not yield better code, so use it only when required
-#if defined(USE_THUMB_INTERWORK)
-#define USE_BX 1
-#endif
-
-
-void MacroAssembler::Jump(Register target, Condition cond) {
-#if USE_BX
- bx(target, cond);
-#else
- mov(pc, Operand(target), LeaveCC, cond);
-#endif
-}
-
-
-void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
- Condition cond) {
-#if USE_BX
- mov(ip, Operand(target, rmode));
- bx(ip, cond);
-#else
- mov(pc, Operand(target, rmode), LeaveCC, cond);
-#endif
-}
-
-
-void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode,
- Condition cond) {
- ASSERT(!RelocInfo::IsCodeTarget(rmode));
- Jump(reinterpret_cast<intptr_t>(target), rmode, cond);
-}
-
-
-void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
- Condition cond) {
- ASSERT(RelocInfo::IsCodeTarget(rmode));
- // 'code' is always generated ARM code, never THUMB code
- Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond);
-}
-
-
-int MacroAssembler::CallSize(Register target, Condition cond) {
-#ifdef USE_BLX
- return kInstrSize;
-#else
- return 2 * kInstrSize;
-#endif
-}
-
-
-void MacroAssembler::Call(Register target, Condition cond) {
- // Block constant pool for the call instruction sequence.
- BlockConstPoolScope block_const_pool(this);
- Label start;
- bind(&start);
-#ifdef USE_BLX
- blx(target, cond);
-#else
- // set lr for return at current pc + 8
- mov(lr, Operand(pc), LeaveCC, cond);
- mov(pc, Operand(target), LeaveCC, cond);
-#endif
- ASSERT_EQ(CallSize(target, cond), SizeOfCodeGeneratedSince(&start));
-}
-
-
-int MacroAssembler::CallSize(
- Address target, RelocInfo::Mode rmode, Condition cond) {
- int size = 2 * kInstrSize;
- Instr mov_instr = cond | MOV | LeaveCC;
- intptr_t immediate = reinterpret_cast<intptr_t>(target);
- if (!Operand(immediate, rmode).is_single_instruction(this, mov_instr)) {
- size += kInstrSize;
- }
- return size;
-}
-
-
-int MacroAssembler::CallSizeNotPredictableCodeSize(
- Address target, RelocInfo::Mode rmode, Condition cond) {
- int size = 2 * kInstrSize;
- Instr mov_instr = cond | MOV | LeaveCC;
- intptr_t immediate = reinterpret_cast<intptr_t>(target);
- if (!Operand(immediate, rmode).is_single_instruction(NULL, mov_instr)) {
- size += kInstrSize;
- }
- return size;
-}
-
-
-void MacroAssembler::Call(Address target,
- RelocInfo::Mode rmode,
- Condition cond,
- TargetAddressStorageMode mode) {
- // Block constant pool for the call instruction sequence.
- BlockConstPoolScope block_const_pool(this);
- Label start;
- bind(&start);
-
- bool old_predictable_code_size = predictable_code_size();
- if (mode == NEVER_INLINE_TARGET_ADDRESS) {
- set_predictable_code_size(true);
- }
-
-#ifdef USE_BLX
- // Call sequence on V7 or later may be :
- // movw ip, #... @ call address low 16
- // movt ip, #... @ call address high 16
- // blx ip
- // @ return address
- // Or for pre-V7 or values that may be back-patched
- // to avoid ICache flushes:
- // ldr ip, [pc, #...] @ call address
- // blx ip
- // @ return address
-
- // Statement positions are expected to be recorded when the target
- // address is loaded. The mov method will automatically record
- // positions when pc is the target, since this is not the case here
- // we have to do it explicitly.
- positions_recorder()->WriteRecordedPositions();
-
- mov(ip, Operand(reinterpret_cast<int32_t>(target), rmode));
- blx(ip, cond);
-
-#else
- // Set lr for return at current pc + 8.
- mov(lr, Operand(pc), LeaveCC, cond);
- // Emit a ldr<cond> pc, [pc + offset of target in constant pool].
- mov(pc, Operand(reinterpret_cast<int32_t>(target), rmode), LeaveCC, cond);
-#endif
- ASSERT_EQ(CallSize(target, rmode, cond), SizeOfCodeGeneratedSince(&start));
- if (mode == NEVER_INLINE_TARGET_ADDRESS) {
- set_predictable_code_size(old_predictable_code_size);
- }
-}
-
-
-int MacroAssembler::CallSize(Handle<Code> code,
- RelocInfo::Mode rmode,
- TypeFeedbackId ast_id,
- Condition cond) {
- return CallSize(reinterpret_cast<Address>(code.location()), rmode, cond);
-}
-
-
-void MacroAssembler::Call(Handle<Code> code,
- RelocInfo::Mode rmode,
- TypeFeedbackId ast_id,
- Condition cond,
- TargetAddressStorageMode mode) {
- Label start;
- bind(&start);
- ASSERT(RelocInfo::IsCodeTarget(rmode));
- if (rmode == RelocInfo::CODE_TARGET && !ast_id.IsNone()) {
- SetRecordedAstId(ast_id);
- rmode = RelocInfo::CODE_TARGET_WITH_ID;
- }
- // 'code' is always generated ARM code, never THUMB code
- Call(reinterpret_cast<Address>(code.location()), rmode, cond, mode);
-}
-
-
-void MacroAssembler::Ret(Condition cond) {
-#if USE_BX
- bx(lr, cond);
-#else
- mov(pc, Operand(lr), LeaveCC, cond);
-#endif
-}
-
-
-void MacroAssembler::Drop(int count, Condition cond) {
- if (count > 0) {
- add(sp, sp, Operand(count * kPointerSize), LeaveCC, cond);
- }
-}
-
-
-void MacroAssembler::Ret(int drop, Condition cond) {
- Drop(drop, cond);
- Ret(cond);
-}
-
-
-void MacroAssembler::Swap(Register reg1,
- Register reg2,
- Register scratch,
- Condition cond) {
- if (scratch.is(no_reg)) {
- eor(reg1, reg1, Operand(reg2), LeaveCC, cond);
- eor(reg2, reg2, Operand(reg1), LeaveCC, cond);
- eor(reg1, reg1, Operand(reg2), LeaveCC, cond);
- } else {
- mov(scratch, reg1, LeaveCC, cond);
- mov(reg1, reg2, LeaveCC, cond);
- mov(reg2, scratch, LeaveCC, cond);
- }
-}
-
-
-void MacroAssembler::Call(Label* target) {
- bl(target);
-}
-
-
-void MacroAssembler::Push(Handle<Object> handle) {
- mov(ip, Operand(handle));
- push(ip);
-}
-
-
-void MacroAssembler::Move(Register dst, Handle<Object> value) {
- mov(dst, Operand(value));
-}
-
-
-void MacroAssembler::Move(Register dst, Register src, Condition cond) {
- if (!dst.is(src)) {
- mov(dst, src, LeaveCC, cond);
- }
-}
-
-
-void MacroAssembler::Move(DwVfpRegister dst, DwVfpRegister src) {
- ASSERT(CpuFeatures::IsSupported(VFP2));
- CpuFeatures::Scope scope(VFP2);
- if (!dst.is(src)) {
- vmov(dst, src);
- }
-}
-
-
-void MacroAssembler::And(Register dst, Register src1, const Operand& src2,
- Condition cond) {
- if (!src2.is_reg() &&
- !src2.must_output_reloc_info(this) &&
- src2.immediate() == 0) {
- mov(dst, Operand::Zero(), LeaveCC, cond);
- } else if (!src2.is_single_instruction(this) &&
- !src2.must_output_reloc_info(this) &&
- CpuFeatures::IsSupported(ARMv7) &&
- IsPowerOf2(src2.immediate() + 1)) {
- ubfx(dst, src1, 0,
- WhichPowerOf2(static_cast<uint32_t>(src2.immediate()) + 1), cond);
- } else {
- and_(dst, src1, src2, LeaveCC, cond);
- }
-}
-
-
-void MacroAssembler::Ubfx(Register dst, Register src1, int lsb, int width,
- Condition cond) {
- ASSERT(lsb < 32);
- if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
- int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
- and_(dst, src1, Operand(mask), LeaveCC, cond);
- if (lsb != 0) {
- mov(dst, Operand(dst, LSR, lsb), LeaveCC, cond);
- }
- } else {
- ubfx(dst, src1, lsb, width, cond);
- }
-}
-
-
-void MacroAssembler::Sbfx(Register dst, Register src1, int lsb, int width,
- Condition cond) {
- ASSERT(lsb < 32);
- if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
- int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
- and_(dst, src1, Operand(mask), LeaveCC, cond);
- int shift_up = 32 - lsb - width;
- int shift_down = lsb + shift_up;
- if (shift_up != 0) {
- mov(dst, Operand(dst, LSL, shift_up), LeaveCC, cond);
- }
- if (shift_down != 0) {
- mov(dst, Operand(dst, ASR, shift_down), LeaveCC, cond);
- }
- } else {
- sbfx(dst, src1, lsb, width, cond);
- }
-}
-
-
-void MacroAssembler::Bfi(Register dst,
- Register src,
- Register scratch,
- int lsb,
- int width,
- Condition cond) {
- ASSERT(0 <= lsb && lsb < 32);
- ASSERT(0 <= width && width < 32);
- ASSERT(lsb + width < 32);
- ASSERT(!scratch.is(dst));
- if (width == 0) return;
- if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
- int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
- bic(dst, dst, Operand(mask));
- and_(scratch, src, Operand((1 << width) - 1));
- mov(scratch, Operand(scratch, LSL, lsb));
- orr(dst, dst, scratch);
- } else {
- bfi(dst, src, lsb, width, cond);
- }
-}
-
-
-void MacroAssembler::Bfc(Register dst, Register src, int lsb, int width,
- Condition cond) {
- ASSERT(lsb < 32);
- if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
- int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
- bic(dst, src, Operand(mask));
- } else {
- Move(dst, src, cond);
- bfc(dst, lsb, width, cond);
- }
-}
-
-
-void MacroAssembler::Usat(Register dst, int satpos, const Operand& src,
- Condition cond) {
- if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
- ASSERT(!dst.is(pc) && !src.rm().is(pc));
- ASSERT((satpos >= 0) && (satpos <= 31));
-
- // These asserts are required to ensure compatibility with the ARMv7
- // implementation.
- ASSERT((src.shift_op() == ASR) || (src.shift_op() == LSL));
- ASSERT(src.rs().is(no_reg));
-
- Label done;
- int satval = (1 << satpos) - 1;
-
- if (cond != al) {
- b(NegateCondition(cond), &done); // Skip saturate if !condition.
- }
- if (!(src.is_reg() && dst.is(src.rm()))) {
- mov(dst, src);
- }
- tst(dst, Operand(~satval));
- b(eq, &done);
- mov(dst, Operand::Zero(), LeaveCC, mi); // 0 if negative.
- mov(dst, Operand(satval), LeaveCC, pl); // satval if positive.
- bind(&done);
- } else {
- usat(dst, satpos, src, cond);
- }
-}
-
-
-void MacroAssembler::LoadRoot(Register destination,
- Heap::RootListIndex index,
- Condition cond) {
- if (CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS) &&
- !Heap::RootCanBeWrittenAfterInitialization(index) &&
- !predictable_code_size()) {
- Handle<Object> root(isolate()->heap()->roots_array_start()[index],
- isolate());
- if (!isolate()->heap()->InNewSpace(*root)) {
- // The CPU supports fast immediate values, and this root will never
- // change. We will load it as a relocatable immediate value.
- mov(destination, Operand(root), LeaveCC, cond);
- return;
- }
- }
- ldr(destination, MemOperand(kRootRegister, index << kPointerSizeLog2), cond);
-}
-
-
-void MacroAssembler::StoreRoot(Register source,
- Heap::RootListIndex index,
- Condition cond) {
- str(source, MemOperand(kRootRegister, index << kPointerSizeLog2), cond);
-}
-
-
-void MacroAssembler::LoadHeapObject(Register result,
- Handle<HeapObject> object) {
- if (isolate()->heap()->InNewSpace(*object)) {
- Handle<JSGlobalPropertyCell> cell =
- isolate()->factory()->NewJSGlobalPropertyCell(object);
- mov(result, Operand(cell));
- ldr(result, FieldMemOperand(result, JSGlobalPropertyCell::kValueOffset));
- } else {
- mov(result, Operand(object));
- }
-}
-
-
-void MacroAssembler::InNewSpace(Register object,
- Register scratch,
- Condition cond,
- Label* branch) {
- ASSERT(cond == eq || cond == ne);
- and_(scratch, object, Operand(ExternalReference::new_space_mask(isolate())));
- cmp(scratch, Operand(ExternalReference::new_space_start(isolate())));
- b(cond, branch);
-}
-
-
-void MacroAssembler::RecordWriteField(
- Register object,
- int offset,
- Register value,
- Register dst,
- LinkRegisterStatus lr_status,
- SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action,
- SmiCheck smi_check) {
- // First, check if a write barrier is even needed. The tests below
- // catch stores of Smis.
- Label done;
-
- // Skip barrier if writing a smi.
- if (smi_check == INLINE_SMI_CHECK) {
- JumpIfSmi(value, &done);
- }
-
- // Although the object register is tagged, the offset is relative to the start
- // of the object, so so offset must be a multiple of kPointerSize.
- ASSERT(IsAligned(offset, kPointerSize));
-
- add(dst, object, Operand(offset - kHeapObjectTag));
- if (emit_debug_code()) {
- Label ok;
- tst(dst, Operand((1 << kPointerSizeLog2) - 1));
- b(eq, &ok);
- stop("Unaligned cell in write barrier");
- bind(&ok);
- }
-
- RecordWrite(object,
- dst,
- value,
- lr_status,
- save_fp,
- remembered_set_action,
- OMIT_SMI_CHECK);
-
- bind(&done);
-
- // Clobber clobbered input registers when running with the debug-code flag
- // turned on to provoke errors.
- if (emit_debug_code()) {
- mov(value, Operand(BitCast<int32_t>(kZapValue + 4)));
- mov(dst, Operand(BitCast<int32_t>(kZapValue + 8)));
- }
-}
-
-
-// Will clobber 4 registers: object, address, scratch, ip. The
-// register 'object' contains a heap object pointer. The heap object
-// tag is shifted away.
-void MacroAssembler::RecordWrite(Register object,
- Register address,
- Register value,
- LinkRegisterStatus lr_status,
- SaveFPRegsMode fp_mode,
- RememberedSetAction remembered_set_action,
- SmiCheck smi_check) {
- // The compiled code assumes that record write doesn't change the
- // context register, so we check that none of the clobbered
- // registers are cp.
- ASSERT(!address.is(cp) && !value.is(cp));
-
- if (emit_debug_code()) {
- ldr(ip, MemOperand(address));
- cmp(ip, value);
- Check(eq, "Wrong address or value passed to RecordWrite");
- }
-
- Label done;
-
- if (smi_check == INLINE_SMI_CHECK) {
- ASSERT_EQ(0, kSmiTag);
- tst(value, Operand(kSmiTagMask));
- b(eq, &done);
- }
-
- CheckPageFlag(value,
- value, // Used as scratch.
- MemoryChunk::kPointersToHereAreInterestingMask,
- eq,
- &done);
- CheckPageFlag(object,
- value, // Used as scratch.
- MemoryChunk::kPointersFromHereAreInterestingMask,
- eq,
- &done);
-
- // Record the actual write.
- if (lr_status == kLRHasNotBeenSaved) {
- push(lr);
- }
- RecordWriteStub stub(object, value, address, remembered_set_action, fp_mode);
- CallStub(&stub);
- if (lr_status == kLRHasNotBeenSaved) {
- pop(lr);
- }
-
- bind(&done);
-
- // Clobber clobbered registers when running with the debug-code flag
- // turned on to provoke errors.
- if (emit_debug_code()) {
- mov(address, Operand(BitCast<int32_t>(kZapValue + 12)));
- mov(value, Operand(BitCast<int32_t>(kZapValue + 16)));
- }
-}
-
-
-void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
- Register address,
- Register scratch,
- SaveFPRegsMode fp_mode,
- RememberedSetFinalAction and_then) {
- Label done;
- if (emit_debug_code()) {
- Label ok;
- JumpIfNotInNewSpace(object, scratch, &ok);
- stop("Remembered set pointer is in new space");
- bind(&ok);
- }
- // Load store buffer top.
- ExternalReference store_buffer =
- ExternalReference::store_buffer_top(isolate());
- mov(ip, Operand(store_buffer));
- ldr(scratch, MemOperand(ip));
- // Store pointer to buffer and increment buffer top.
- str(address, MemOperand(scratch, kPointerSize, PostIndex));
- // Write back new top of buffer.
- str(scratch, MemOperand(ip));
- // Call stub on end of buffer.
- // Check for end of buffer.
- tst(scratch, Operand(StoreBuffer::kStoreBufferOverflowBit));
- if (and_then == kFallThroughAtEnd) {
- b(eq, &done);
- } else {
- ASSERT(and_then == kReturnAtEnd);
- Ret(eq);
- }
- push(lr);
- StoreBufferOverflowStub store_buffer_overflow =
- StoreBufferOverflowStub(fp_mode);
- CallStub(&store_buffer_overflow);
- pop(lr);
- bind(&done);
- if (and_then == kReturnAtEnd) {
- Ret();
- }
-}
-
-
-// Push and pop all registers that can hold pointers.
-void MacroAssembler::PushSafepointRegisters() {
- // Safepoints expect a block of contiguous register values starting with r0:
- ASSERT(((1 << kNumSafepointSavedRegisters) - 1) == kSafepointSavedRegisters);
- // Safepoints expect a block of kNumSafepointRegisters values on the
- // stack, so adjust the stack for unsaved registers.
- const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
- ASSERT(num_unsaved >= 0);
- sub(sp, sp, Operand(num_unsaved * kPointerSize));
- stm(db_w, sp, kSafepointSavedRegisters);
-}
-
-
-void MacroAssembler::PopSafepointRegisters() {
- const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
- ldm(ia_w, sp, kSafepointSavedRegisters);
- add(sp, sp, Operand(num_unsaved * kPointerSize));
-}
-
-
-void MacroAssembler::PushSafepointRegistersAndDoubles() {
- // Number of d-regs not known at snapshot time.
- ASSERT(!Serializer::enabled());
- PushSafepointRegisters();
- sub(sp, sp, Operand(DwVfpRegister::NumAllocatableRegisters() *
- kDoubleSize));
- for (int i = 0; i < DwVfpRegister::NumAllocatableRegisters(); i++) {
- vstr(DwVfpRegister::FromAllocationIndex(i), sp, i * kDoubleSize);
- }
-}
-
-
-void MacroAssembler::PopSafepointRegistersAndDoubles() {
- // Number of d-regs not known at snapshot time.
- ASSERT(!Serializer::enabled());
- for (int i = 0; i < DwVfpRegister::NumAllocatableRegisters(); i++) {
- vldr(DwVfpRegister::FromAllocationIndex(i), sp, i * kDoubleSize);
- }
- add(sp, sp, Operand(DwVfpRegister::NumAllocatableRegisters() *
- kDoubleSize));
- PopSafepointRegisters();
-}
-
-void MacroAssembler::StoreToSafepointRegistersAndDoublesSlot(Register src,
- Register dst) {
- str(src, SafepointRegistersAndDoublesSlot(dst));
-}
-
-
-void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) {
- str(src, SafepointRegisterSlot(dst));
-}
-
-
-void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
- ldr(dst, SafepointRegisterSlot(src));
-}
-
-
-int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
- // The registers are pushed starting with the highest encoding,
- // which means that lowest encodings are closest to the stack pointer.
- ASSERT(reg_code >= 0 && reg_code < kNumSafepointRegisters);
- return reg_code;
-}
-
-
-MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
- return MemOperand(sp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
-}
-
-
-MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
- // Number of d-regs not known at snapshot time.
- ASSERT(!Serializer::enabled());
- // General purpose registers are pushed last on the stack.
- int doubles_size = DwVfpRegister::NumAllocatableRegisters() * kDoubleSize;
- int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
- return MemOperand(sp, doubles_size + register_offset);
-}
-
-
-void MacroAssembler::Ldrd(Register dst1, Register dst2,
- const MemOperand& src, Condition cond) {
- ASSERT(src.rm().is(no_reg));
- ASSERT(!dst1.is(lr)); // r14.
- ASSERT_EQ(0, dst1.code() % 2);
- ASSERT_EQ(dst1.code() + 1, dst2.code());
-
- // V8 does not use this addressing mode, so the fallback code
- // below doesn't support it yet.
- ASSERT((src.am() != PreIndex) && (src.am() != NegPreIndex));
-
- // Generate two ldr instructions if ldrd is not available.
- if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size()) {
- CpuFeatures::Scope scope(ARMv7);
- ldrd(dst1, dst2, src, cond);
- } else {
- if ((src.am() == Offset) || (src.am() == NegOffset)) {
- MemOperand src2(src);
- src2.set_offset(src2.offset() + 4);
- if (dst1.is(src.rn())) {
- ldr(dst2, src2, cond);
- ldr(dst1, src, cond);
- } else {
- ldr(dst1, src, cond);
- ldr(dst2, src2, cond);
- }
- } else { // PostIndex or NegPostIndex.
- ASSERT((src.am() == PostIndex) || (src.am() == NegPostIndex));
- if (dst1.is(src.rn())) {
- ldr(dst2, MemOperand(src.rn(), 4, Offset), cond);
- ldr(dst1, src, cond);
- } else {
- MemOperand src2(src);
- src2.set_offset(src2.offset() - 4);
- ldr(dst1, MemOperand(src.rn(), 4, PostIndex), cond);
- ldr(dst2, src2, cond);
- }
- }
- }
-}
-
-
-void MacroAssembler::Strd(Register src1, Register src2,
- const MemOperand& dst, Condition cond) {
- ASSERT(dst.rm().is(no_reg));
- ASSERT(!src1.is(lr)); // r14.
- ASSERT_EQ(0, src1.code() % 2);
- ASSERT_EQ(src1.code() + 1, src2.code());
-
- // V8 does not use this addressing mode, so the fallback code
- // below doesn't support it yet.
- ASSERT((dst.am() != PreIndex) && (dst.am() != NegPreIndex));
-
- // Generate two str instructions if strd is not available.
- if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size()) {
- CpuFeatures::Scope scope(ARMv7);
- strd(src1, src2, dst, cond);
- } else {
- MemOperand dst2(dst);
- if ((dst.am() == Offset) || (dst.am() == NegOffset)) {
- dst2.set_offset(dst2.offset() + 4);
- str(src1, dst, cond);
- str(src2, dst2, cond);
- } else { // PostIndex or NegPostIndex.
- ASSERT((dst.am() == PostIndex) || (dst.am() == NegPostIndex));
- dst2.set_offset(dst2.offset() - 4);
- str(src1, MemOperand(dst.rn(), 4, PostIndex), cond);
- str(src2, dst2, cond);
- }
- }
-}
-
-
-void MacroAssembler::VFPCompareAndSetFlags(const DwVfpRegister src1,
- const DwVfpRegister src2,
- const Condition cond) {
- // Compare and move FPSCR flags to the normal condition flags.
- VFPCompareAndLoadFlags(src1, src2, pc, cond);
-}
-
-void MacroAssembler::VFPCompareAndSetFlags(const DwVfpRegister src1,
- const double src2,
- const Condition cond) {
- // Compare and move FPSCR flags to the normal condition flags.
- VFPCompareAndLoadFlags(src1, src2, pc, cond);
-}
-
-
-void MacroAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1,
- const DwVfpRegister src2,
- const Register fpscr_flags,
- const Condition cond) {
- // Compare and load FPSCR.
- vcmp(src1, src2, cond);
- vmrs(fpscr_flags, cond);
-}
-
-void MacroAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1,
- const double src2,
- const Register fpscr_flags,
- const Condition cond) {
- // Compare and load FPSCR.
- vcmp(src1, src2, cond);
- vmrs(fpscr_flags, cond);
-}
-
-void MacroAssembler::Vmov(const DwVfpRegister dst,
- const double imm,
- const Register scratch) {
- ASSERT(CpuFeatures::IsEnabled(VFP2));
- static const DoubleRepresentation minus_zero(-0.0);
- static const DoubleRepresentation zero(0.0);
- DoubleRepresentation value(imm);
- // Handle special values first.
- if (value.bits == zero.bits) {
- vmov(dst, kDoubleRegZero);
- } else if (value.bits == minus_zero.bits) {
- vneg(dst, kDoubleRegZero);
- } else {
- vmov(dst, imm, scratch);
- }
-}
-
-
-void MacroAssembler::EnterFrame(StackFrame::Type type) {
- // r0-r3: preserved
- stm(db_w, sp, cp.bit() | fp.bit() | lr.bit());
- mov(ip, Operand(Smi::FromInt(type)));
- push(ip);
- mov(ip, Operand(CodeObject()));
- push(ip);
- add(fp, sp, Operand(3 * kPointerSize)); // Adjust FP to point to saved FP.
-}
-
-
-void MacroAssembler::LeaveFrame(StackFrame::Type type) {
- // r0: preserved
- // r1: preserved
- // r2: preserved
-
- // Drop the execution stack down to the frame pointer and restore
- // the caller frame pointer and return address.
- mov(sp, fp);
- ldm(ia_w, sp, fp.bit() | lr.bit());
-}
-
-
-void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) {
- // Set up the frame structure on the stack.
- ASSERT_EQ(2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement);
- ASSERT_EQ(1 * kPointerSize, ExitFrameConstants::kCallerPCOffset);
- ASSERT_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset);
- Push(lr, fp);
- mov(fp, Operand(sp)); // Set up new frame pointer.
- // Reserve room for saved entry sp and code object.
- sub(sp, sp, Operand(2 * kPointerSize));
- if (emit_debug_code()) {
- mov(ip, Operand::Zero());
- str(ip, MemOperand(fp, ExitFrameConstants::kSPOffset));
- }
- mov(ip, Operand(CodeObject()));
- str(ip, MemOperand(fp, ExitFrameConstants::kCodeOffset));
-
- // Save the frame pointer and the context in top.
- mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
- str(fp, MemOperand(ip));
- mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
- str(cp, MemOperand(ip));
-
- // Optionally save all double registers.
- if (save_doubles) {
- CpuFeatures::Scope scope(VFP2);
- // Check CPU flags for number of registers, setting the Z condition flag.
- CheckFor32DRegs(ip);
-
- // Push registers d0-d15, and possibly d16-d31, on the stack.
- // If d16-d31 are not pushed, decrease the stack pointer instead.
- vstm(db_w, sp, d16, d31, ne);
- sub(sp, sp, Operand(16 * kDoubleSize), LeaveCC, eq);
- vstm(db_w, sp, d0, d15);
- // Note that d0 will be accessible at
- // fp - 2 * kPointerSize - DwVfpRegister::kMaxNumRegisters * kDoubleSize,
- // since the sp slot and code slot were pushed after the fp.
- }
-
- // Reserve place for the return address and stack space and align the frame
- // preparing for calling the runtime function.
- const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
- sub(sp, sp, Operand((stack_space + 1) * kPointerSize));
- if (frame_alignment > 0) {
- ASSERT(IsPowerOf2(frame_alignment));
- and_(sp, sp, Operand(-frame_alignment));
- }
-
- // Set the exit frame sp value to point just before the return address
- // location.
- add(ip, sp, Operand(kPointerSize));
- str(ip, MemOperand(fp, ExitFrameConstants::kSPOffset));
-}
-
-
-void MacroAssembler::InitializeNewString(Register string,
- Register length,
- Heap::RootListIndex map_index,
- Register scratch1,
- Register scratch2) {
- mov(scratch1, Operand(length, LSL, kSmiTagSize));
- LoadRoot(scratch2, map_index);
- str(scratch1, FieldMemOperand(string, String::kLengthOffset));
- mov(scratch1, Operand(String::kEmptyHashField));
- str(scratch2, FieldMemOperand(string, HeapObject::kMapOffset));
- str(scratch1, FieldMemOperand(string, String::kHashFieldOffset));
-}
-
-
-int MacroAssembler::ActivationFrameAlignment() {
-#if defined(V8_HOST_ARCH_ARM)
- // Running on the real platform. Use the alignment as mandated by the local
- // environment.
- // Note: This will break if we ever start generating snapshots on one ARM
- // platform for another ARM platform with a different alignment.
- return OS::ActivationFrameAlignment();
-#else // defined(V8_HOST_ARCH_ARM)
- // If we are using the simulator then we should always align to the expected
- // alignment. As the simulator is used to generate snapshots we do not know
- // if the target platform will need alignment, so this is controlled from a
- // flag.
- return FLAG_sim_stack_alignment;
-#endif // defined(V8_HOST_ARCH_ARM)
-}
-
-
-void MacroAssembler::LeaveExitFrame(bool save_doubles,
- Register argument_count) {
- // Optionally restore all double registers.
- if (save_doubles) {
- CpuFeatures::Scope scope(VFP2);
- // Calculate the stack location of the saved doubles and restore them.
- const int offset = 2 * kPointerSize;
- sub(r3, fp,
- Operand(offset + DwVfpRegister::kMaxNumRegisters * kDoubleSize));
-
- // Check CPU flags for number of registers, setting the Z condition flag.
- CheckFor32DRegs(ip);
-
- // Pop registers d0-d15, and possibly d16-d31, from r3.
- // If d16-d31 are not popped, increase r3 instead.
- vldm(ia_w, r3, d0, d15);
- vldm(ia_w, r3, d16, d31, ne);
- add(r3, r3, Operand(16 * kDoubleSize), LeaveCC, eq);
- }
-
- // Clear top frame.
- mov(r3, Operand::Zero());
- mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
- str(r3, MemOperand(ip));
-
- // Restore current context from top and clear it in debug mode.
- mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
- ldr(cp, MemOperand(ip));
-#ifdef DEBUG
- str(r3, MemOperand(ip));
-#endif
-
- // Tear down the exit frame, pop the arguments, and return.
- mov(sp, Operand(fp));
- ldm(ia_w, sp, fp.bit() | lr.bit());
- if (argument_count.is_valid()) {
- add(sp, sp, Operand(argument_count, LSL, kPointerSizeLog2));
- }
-}
-
-void MacroAssembler::GetCFunctionDoubleResult(const DwVfpRegister dst) {
- ASSERT(CpuFeatures::IsSupported(VFP2));
- if (use_eabi_hardfloat()) {
- Move(dst, d0);
- } else {
- vmov(dst, r0, r1);
- }
-}
-
-
-void MacroAssembler::SetCallKind(Register dst, CallKind call_kind) {
- // This macro takes the dst register to make the code more readable
- // at the call sites. However, the dst register has to be r5 to
- // follow the calling convention which requires the call type to be
- // in r5.
- ASSERT(dst.is(r5));
- if (call_kind == CALL_AS_FUNCTION) {
- mov(dst, Operand(Smi::FromInt(1)));
- } else {
- mov(dst, Operand(Smi::FromInt(0)));
- }
-}
-
-
-void MacroAssembler::InvokePrologue(const ParameterCount& expected,
- const ParameterCount& actual,
- Handle<Code> code_constant,
- Register code_reg,
- Label* done,
- bool* definitely_mismatches,
- InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind) {
- bool definitely_matches = false;
- *definitely_mismatches = false;
- Label regular_invoke;
-
- // Check whether the expected and actual arguments count match. If not,
- // setup registers according to contract with ArgumentsAdaptorTrampoline:
- // r0: actual arguments count
- // r1: function (passed through to callee)
- // r2: expected arguments count
- // r3: callee code entry
-
- // The code below is made a lot easier because the calling code already sets
- // up actual and expected registers according to the contract if values are
- // passed in registers.
- ASSERT(actual.is_immediate() || actual.reg().is(r0));
- ASSERT(expected.is_immediate() || expected.reg().is(r2));
- ASSERT((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(r3));
-
- if (expected.is_immediate()) {
- ASSERT(actual.is_immediate());
- if (expected.immediate() == actual.immediate()) {
- definitely_matches = true;
- } else {
- mov(r0, Operand(actual.immediate()));
- const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
- if (expected.immediate() == sentinel) {
- // Don't worry about adapting arguments for builtins that
- // don't want that done. Skip adaption code by making it look
- // like we have a match between expected and actual number of
- // arguments.
- definitely_matches = true;
- } else {
- *definitely_mismatches = true;
- mov(r2, Operand(expected.immediate()));
- }
- }
- } else {
- if (actual.is_immediate()) {
- cmp(expected.reg(), Operand(actual.immediate()));
- b(eq, &regular_invoke);
- mov(r0, Operand(actual.immediate()));
- } else {
- cmp(expected.reg(), Operand(actual.reg()));
- b(eq, &regular_invoke);
- }
- }
-
- if (!definitely_matches) {
- if (!code_constant.is_null()) {
- mov(r3, Operand(code_constant));
- add(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag));
- }
-
- Handle<Code> adaptor =
- isolate()->builtins()->ArgumentsAdaptorTrampoline();
- if (flag == CALL_FUNCTION) {
- call_wrapper.BeforeCall(CallSize(adaptor));
- SetCallKind(r5, call_kind);
- Call(adaptor);
- call_wrapper.AfterCall();
- if (!*definitely_mismatches) {
- b(done);
- }
- } else {
- SetCallKind(r5, call_kind);
- Jump(adaptor, RelocInfo::CODE_TARGET);
- }
- bind(&regular_invoke);
- }
-}
-
-
-void MacroAssembler::InvokeCode(Register code,
- const ParameterCount& expected,
- const ParameterCount& actual,
- InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind) {
- // You can't call a function without a valid frame.
- ASSERT(flag == JUMP_FUNCTION || has_frame());
-
- Label done;
- bool definitely_mismatches = false;
- InvokePrologue(expected, actual, Handle<Code>::null(), code,
- &done, &definitely_mismatches, flag,
- call_wrapper, call_kind);
- if (!definitely_mismatches) {
- if (flag == CALL_FUNCTION) {
- call_wrapper.BeforeCall(CallSize(code));
- SetCallKind(r5, call_kind);
- Call(code);
- call_wrapper.AfterCall();
- } else {
- ASSERT(flag == JUMP_FUNCTION);
- SetCallKind(r5, call_kind);
- Jump(code);
- }
-
- // Continue here if InvokePrologue does handle the invocation due to
- // mismatched parameter counts.
- bind(&done);
- }
-}
-
-
-void MacroAssembler::InvokeCode(Handle<Code> code,
- const ParameterCount& expected,
- const ParameterCount& actual,
- RelocInfo::Mode rmode,
- InvokeFlag flag,
- CallKind call_kind) {
- // You can't call a function without a valid frame.
- ASSERT(flag == JUMP_FUNCTION || has_frame());
-
- Label done;
- bool definitely_mismatches = false;
- InvokePrologue(expected, actual, code, no_reg,
- &done, &definitely_mismatches, flag,
- NullCallWrapper(), call_kind);
- if (!definitely_mismatches) {
- if (flag == CALL_FUNCTION) {
- SetCallKind(r5, call_kind);
- Call(code, rmode);
- } else {
- SetCallKind(r5, call_kind);
- Jump(code, rmode);
- }
-
- // Continue here if InvokePrologue does handle the invocation due to
- // mismatched parameter counts.
- bind(&done);
- }
-}
-
-
-void MacroAssembler::InvokeFunction(Register fun,
- const ParameterCount& actual,
- InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind) {
- // You can't call a function without a valid frame.
- ASSERT(flag == JUMP_FUNCTION || has_frame());
-
- // Contract with called JS functions requires that function is passed in r1.
- ASSERT(fun.is(r1));
-
- Register expected_reg = r2;
- Register code_reg = r3;
-
- ldr(code_reg, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
- ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
- ldr(expected_reg,
- FieldMemOperand(code_reg,
- SharedFunctionInfo::kFormalParameterCountOffset));
- mov(expected_reg, Operand(expected_reg, ASR, kSmiTagSize));
- ldr(code_reg,
- FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
-
- ParameterCount expected(expected_reg);
- InvokeCode(code_reg, expected, actual, flag, call_wrapper, call_kind);
-}
-
-
-void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
- const ParameterCount& actual,
- InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind) {
- // You can't call a function without a valid frame.
- ASSERT(flag == JUMP_FUNCTION || has_frame());
-
- // Get the function and setup the context.
- LoadHeapObject(r1, function);
- ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
-
- ParameterCount expected(function->shared()->formal_parameter_count());
- // We call indirectly through the code field in the function to
- // allow recompilation to take effect without changing any of the
- // call sites.
- ldr(r3, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
- InvokeCode(r3, expected, actual, flag, call_wrapper, call_kind);
-}
-
-
-void MacroAssembler::IsObjectJSObjectType(Register heap_object,
- Register map,
- Register scratch,
- Label* fail) {
- ldr(map, FieldMemOperand(heap_object, HeapObject::kMapOffset));
- IsInstanceJSObjectType(map, scratch, fail);
-}
-
-
-void MacroAssembler::IsInstanceJSObjectType(Register map,
- Register scratch,
- Label* fail) {
- ldrb(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
- cmp(scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
- b(lt, fail);
- cmp(scratch, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
- b(gt, fail);
-}
-
-
-void MacroAssembler::IsObjectJSStringType(Register object,
- Register scratch,
- Label* fail) {
- ASSERT(kNotStringTag != 0);
-
- ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
- ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
- tst(scratch, Operand(kIsNotStringMask));
- b(ne, fail);
-}
-
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-void MacroAssembler::DebugBreak() {
- mov(r0, Operand::Zero());
- mov(r1, Operand(ExternalReference(Runtime::kDebugBreak, isolate())));
- CEntryStub ces(1);
- ASSERT(AllowThisStubCall(&ces));
- Call(ces.GetCode(isolate()), RelocInfo::DEBUG_BREAK);
-}
-#endif
-
-
-void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
- int handler_index) {
- // Adjust this code if not the case.
- STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
-
- // For the JSEntry handler, we must preserve r0-r4, r5-r7 are available.
- // We will build up the handler from the bottom by pushing on the stack.
- // Set up the code object (r5) and the state (r6) for pushing.
- unsigned state =
- StackHandler::IndexField::encode(handler_index) |
- StackHandler::KindField::encode(kind);
- mov(r5, Operand(CodeObject()));
- mov(r6, Operand(state));
-
- // Push the frame pointer, context, state, and code object.
- if (kind == StackHandler::JS_ENTRY) {
- mov(r7, Operand(Smi::FromInt(0))); // Indicates no context.
- mov(ip, Operand::Zero()); // NULL frame pointer.
- stm(db_w, sp, r5.bit() | r6.bit() | r7.bit() | ip.bit());
- } else {
- stm(db_w, sp, r5.bit() | r6.bit() | cp.bit() | fp.bit());
- }
-
- // Link the current handler as the next handler.
- mov(r6, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
- ldr(r5, MemOperand(r6));
- push(r5);
- // Set this new handler as the current one.
- str(sp, MemOperand(r6));
-}
-
-
-void MacroAssembler::PopTryHandler() {
- STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
- pop(r1);
- mov(ip, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
- add(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize));
- str(r1, MemOperand(ip));
-}
-
-
-void MacroAssembler::JumpToHandlerEntry() {
- // Compute the handler entry address and jump to it. The handler table is
- // a fixed array of (smi-tagged) code offsets.
- // r0 = exception, r1 = code object, r2 = state.
- ldr(r3, FieldMemOperand(r1, Code::kHandlerTableOffset)); // Handler table.
- add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- mov(r2, Operand(r2, LSR, StackHandler::kKindWidth)); // Handler index.
- ldr(r2, MemOperand(r3, r2, LSL, kPointerSizeLog2)); // Smi-tagged offset.
- add(r1, r1, Operand(Code::kHeaderSize - kHeapObjectTag)); // Code start.
- add(pc, r1, Operand(r2, ASR, kSmiTagSize)); // Jump.
-}
-
-
-void MacroAssembler::Throw(Register value) {
- // Adjust this code if not the case.
- STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
- STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
-
- // The exception is expected in r0.
- if (!value.is(r0)) {
- mov(r0, value);
- }
- // Drop the stack pointer to the top of the top handler.
- mov(r3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
- ldr(sp, MemOperand(r3));
- // Restore the next handler.
- pop(r2);
- str(r2, MemOperand(r3));
-
- // Get the code object (r1) and state (r2). Restore the context and frame
- // pointer.
- ldm(ia_w, sp, r1.bit() | r2.bit() | cp.bit() | fp.bit());
-
- // If the handler is a JS frame, restore the context to the frame.
- // (kind == ENTRY) == (fp == 0) == (cp == 0), so we could test either fp
- // or cp.
- tst(cp, cp);
- str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
-
- JumpToHandlerEntry();
-}
-
-
-void MacroAssembler::ThrowUncatchable(Register value) {
- // Adjust this code if not the case.
- STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
-
- // The exception is expected in r0.
- if (!value.is(r0)) {
- mov(r0, value);
- }
- // Drop the stack pointer to the top of the top stack handler.
- mov(r3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
- ldr(sp, MemOperand(r3));
-
- // Unwind the handlers until the ENTRY handler is found.
- Label fetch_next, check_kind;
- jmp(&check_kind);
- bind(&fetch_next);
- ldr(sp, MemOperand(sp, StackHandlerConstants::kNextOffset));
-
- bind(&check_kind);
- STATIC_ASSERT(StackHandler::JS_ENTRY == 0);
- ldr(r2, MemOperand(sp, StackHandlerConstants::kStateOffset));
- tst(r2, Operand(StackHandler::KindField::kMask));
- b(ne, &fetch_next);
-
- // Set the top handler address to next handler past the top ENTRY handler.
- pop(r2);
- str(r2, MemOperand(r3));
- // Get the code object (r1) and state (r2). Clear the context and frame
- // pointer (0 was saved in the handler).
- ldm(ia_w, sp, r1.bit() | r2.bit() | cp.bit() | fp.bit());
-
- JumpToHandlerEntry();
-}
-
-
-void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
- Register scratch,
- Label* miss) {
- Label same_contexts;
-
- ASSERT(!holder_reg.is(scratch));
- ASSERT(!holder_reg.is(ip));
- ASSERT(!scratch.is(ip));
-
- // Load current lexical context from the stack frame.
- ldr(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset));
- // In debug mode, make sure the lexical context is set.
-#ifdef DEBUG
- cmp(scratch, Operand::Zero());
- Check(ne, "we should not have an empty lexical context");
-#endif
-
- // Load the native context of the current context.
- int offset =
- Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
- ldr(scratch, FieldMemOperand(scratch, offset));
- ldr(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
-
- // Check the context is a native context.
- if (emit_debug_code()) {
- // TODO(119): avoid push(holder_reg)/pop(holder_reg)
- // Cannot use ip as a temporary in this verification code. Due to the fact
- // that ip is clobbered as part of cmp with an object Operand.
- push(holder_reg); // Temporarily save holder on the stack.
- // Read the first word and compare to the native_context_map.
- ldr(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset));
- LoadRoot(ip, Heap::kNativeContextMapRootIndex);
- cmp(holder_reg, ip);
- Check(eq, "JSGlobalObject::native_context should be a native context.");
- pop(holder_reg); // Restore holder.
- }
-
- // Check if both contexts are the same.
- ldr(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
- cmp(scratch, Operand(ip));
- b(eq, &same_contexts);
-
- // Check the context is a native context.
- if (emit_debug_code()) {
- // TODO(119): avoid push(holder_reg)/pop(holder_reg)
- // Cannot use ip as a temporary in this verification code. Due to the fact
- // that ip is clobbered as part of cmp with an object Operand.
- push(holder_reg); // Temporarily save holder on the stack.
- mov(holder_reg, ip); // Move ip to its holding place.
- LoadRoot(ip, Heap::kNullValueRootIndex);
- cmp(holder_reg, ip);
- Check(ne, "JSGlobalProxy::context() should not be null.");
-
- ldr(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset));
- LoadRoot(ip, Heap::kNativeContextMapRootIndex);
- cmp(holder_reg, ip);
- Check(eq, "JSGlobalObject::native_context should be a native context.");
- // Restore ip is not needed. ip is reloaded below.
- pop(holder_reg); // Restore holder.
- // Restore ip to holder's context.
- ldr(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
- }
-
- // Check that the security token in the calling global object is
- // compatible with the security token in the receiving global
- // object.
- int token_offset = Context::kHeaderSize +
- Context::SECURITY_TOKEN_INDEX * kPointerSize;
-
- ldr(scratch, FieldMemOperand(scratch, token_offset));
- ldr(ip, FieldMemOperand(ip, token_offset));
- cmp(scratch, Operand(ip));
- b(ne, miss);
-
- bind(&same_contexts);
-}
-
-
-void MacroAssembler::GetNumberHash(Register t0, Register scratch) {
- // First of all we assign the hash seed to scratch.
- LoadRoot(scratch, Heap::kHashSeedRootIndex);
- SmiUntag(scratch);
-
- // Xor original key with a seed.
- eor(t0, t0, Operand(scratch));
-
- // Compute the hash code from the untagged key. This must be kept in sync
- // with ComputeIntegerHash in utils.h.
- //
- // hash = ~hash + (hash << 15);
- mvn(scratch, Operand(t0));
- add(t0, scratch, Operand(t0, LSL, 15));
- // hash = hash ^ (hash >> 12);
- eor(t0, t0, Operand(t0, LSR, 12));
- // hash = hash + (hash << 2);
- add(t0, t0, Operand(t0, LSL, 2));
- // hash = hash ^ (hash >> 4);
- eor(t0, t0, Operand(t0, LSR, 4));
- // hash = hash * 2057;
- mov(scratch, Operand(t0, LSL, 11));
- add(t0, t0, Operand(t0, LSL, 3));
- add(t0, t0, scratch);
- // hash = hash ^ (hash >> 16);
- eor(t0, t0, Operand(t0, LSR, 16));
-}
-
-
-void MacroAssembler::LoadFromNumberDictionary(Label* miss,
- Register elements,
- Register key,
- Register result,
- Register t0,
- Register t1,
- Register t2) {
- // Register use:
- //
- // elements - holds the slow-case elements of the receiver on entry.
- // Unchanged unless 'result' is the same register.
- //
- // key - holds the smi key on entry.
- // Unchanged unless 'result' is the same register.
- //
- // result - holds the result on exit if the load succeeded.
- // Allowed to be the same as 'key' or 'result'.
- // Unchanged on bailout so 'key' or 'result' can be used
- // in further computation.
- //
- // Scratch registers:
- //
- // t0 - holds the untagged key on entry and holds the hash once computed.
- //
- // t1 - used to hold the capacity mask of the dictionary
- //
- // t2 - used for the index into the dictionary.
- Label done;
-
- GetNumberHash(t0, t1);
-
- // Compute the capacity mask.
- ldr(t1, FieldMemOperand(elements, SeededNumberDictionary::kCapacityOffset));
- mov(t1, Operand(t1, ASR, kSmiTagSize)); // convert smi to int
- sub(t1, t1, Operand(1));
-
- // Generate an unrolled loop that performs a few probes before giving up.
- static const int kProbes = 4;
- for (int i = 0; i < kProbes; i++) {
- // Use t2 for index calculations and keep the hash intact in t0.
- mov(t2, t0);
- // Compute the masked index: (hash + i + i * i) & mask.
- if (i > 0) {
- add(t2, t2, Operand(SeededNumberDictionary::GetProbeOffset(i)));
- }
- and_(t2, t2, Operand(t1));
-
- // Scale the index by multiplying by the element size.
- ASSERT(SeededNumberDictionary::kEntrySize == 3);
- add(t2, t2, Operand(t2, LSL, 1)); // t2 = t2 * 3
-
- // Check if the key is identical to the name.
- add(t2, elements, Operand(t2, LSL, kPointerSizeLog2));
- ldr(ip, FieldMemOperand(t2, SeededNumberDictionary::kElementsStartOffset));
- cmp(key, Operand(ip));
- if (i != kProbes - 1) {
- b(eq, &done);
- } else {
- b(ne, miss);
- }
- }
-
- bind(&done);
- // Check that the value is a normal property.
- // t2: elements + (index * kPointerSize)
- const int kDetailsOffset =
- SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
- ldr(t1, FieldMemOperand(t2, kDetailsOffset));
- tst(t1, Operand(Smi::FromInt(PropertyDetails::TypeField::kMask)));
- b(ne, miss);
-
- // Get the value at the masked, scaled index and return.
- const int kValueOffset =
- SeededNumberDictionary::kElementsStartOffset + kPointerSize;
- ldr(result, FieldMemOperand(t2, kValueOffset));
-}
-
-
-void MacroAssembler::AllocateInNewSpace(int object_size,
- Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required,
- AllocationFlags flags) {
- if (!FLAG_inline_new) {
- if (emit_debug_code()) {
- // Trash the registers to simulate an allocation failure.
- mov(result, Operand(0x7091));
- mov(scratch1, Operand(0x7191));
- mov(scratch2, Operand(0x7291));
- }
- jmp(gc_required);
- return;
- }
-
- ASSERT(!result.is(scratch1));
- ASSERT(!result.is(scratch2));
- ASSERT(!scratch1.is(scratch2));
- ASSERT(!scratch1.is(ip));
- ASSERT(!scratch2.is(ip));
-
- // Make object size into bytes.
- if ((flags & SIZE_IN_WORDS) != 0) {
- object_size *= kPointerSize;
- }
- ASSERT_EQ(0, object_size & kObjectAlignmentMask);
-
- // Check relative positions of allocation top and limit addresses.
- // The values must be adjacent in memory to allow the use of LDM.
- // Also, assert that the registers are numbered such that the values
- // are loaded in the correct order.
- ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address(isolate());
- ExternalReference new_space_allocation_limit =
- ExternalReference::new_space_allocation_limit_address(isolate());
- intptr_t top =
- reinterpret_cast<intptr_t>(new_space_allocation_top.address());
- intptr_t limit =
- reinterpret_cast<intptr_t>(new_space_allocation_limit.address());
- ASSERT((limit - top) == kPointerSize);
- ASSERT(result.code() < ip.code());
-
- // Set up allocation top address and object size registers.
- Register topaddr = scratch1;
- Register obj_size_reg = scratch2;
- mov(topaddr, Operand(new_space_allocation_top));
- Operand obj_size_operand = Operand(object_size);
- if (!obj_size_operand.is_single_instruction(this)) {
- // We are about to steal IP, so we need to load this value first
- mov(obj_size_reg, obj_size_operand);
- }
-
- // This code stores a temporary value in ip. This is OK, as the code below
- // does not need ip for implicit literal generation.
- if ((flags & RESULT_CONTAINS_TOP) == 0) {
- // Load allocation top into result and allocation limit into ip.
- ldm(ia, topaddr, result.bit() | ip.bit());
- } else {
- if (emit_debug_code()) {
- // Assert that result actually contains top on entry. ip is used
- // immediately below so this use of ip does not cause difference with
- // respect to register content between debug and release mode.
- ldr(ip, MemOperand(topaddr));
- cmp(result, ip);
- Check(eq, "Unexpected allocation top");
- }
- // Load allocation limit into ip. Result already contains allocation top.
- ldr(ip, MemOperand(topaddr, limit - top));
- }
-
- if ((flags & DOUBLE_ALIGNMENT) != 0) {
- // Align the next allocation. Storing the filler map without checking top is
- // always safe because the limit of the heap is always aligned.
- ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
- and_(scratch2, result, Operand(kDoubleAlignmentMask), SetCC);
- Label aligned;
- b(eq, &aligned);
- mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
- str(scratch2, MemOperand(result, kDoubleSize / 2, PostIndex));
- bind(&aligned);
- }
-
- // Calculate new top and bail out if new space is exhausted. Use result
- // to calculate the new top.
- if (obj_size_operand.is_single_instruction(this)) {
- // We can add the size as an immediate
- add(scratch2, result, obj_size_operand, SetCC);
- } else {
- // Doesn't fit in an immediate, we have to use the register
- add(scratch2, result, obj_size_reg, SetCC);
- }
- b(cs, gc_required);
- cmp(scratch2, Operand(ip));
- b(hi, gc_required);
- str(scratch2, MemOperand(topaddr));
-
- // Tag object if requested.
- if ((flags & TAG_OBJECT) != 0) {
- add(result, result, Operand(kHeapObjectTag));
- }
-}
-
-
-void MacroAssembler::AllocateInNewSpace(Register object_size,
- Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required,
- AllocationFlags flags) {
- if (!FLAG_inline_new) {
- if (emit_debug_code()) {
- // Trash the registers to simulate an allocation failure.
- mov(result, Operand(0x7091));
- mov(scratch1, Operand(0x7191));
- mov(scratch2, Operand(0x7291));
- }
- jmp(gc_required);
- return;
- }
-
- // Assert that the register arguments are different and that none of
- // them are ip. ip is used explicitly in the code generated below.
- ASSERT(!result.is(scratch1));
- ASSERT(!result.is(scratch2));
- ASSERT(!scratch1.is(scratch2));
- ASSERT(!object_size.is(ip));
- ASSERT(!result.is(ip));
- ASSERT(!scratch1.is(ip));
- ASSERT(!scratch2.is(ip));
-
- // Check relative positions of allocation top and limit addresses.
- // The values must be adjacent in memory to allow the use of LDM.
- // Also, assert that the registers are numbered such that the values
- // are loaded in the correct order.
- ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address(isolate());
- ExternalReference new_space_allocation_limit =
- ExternalReference::new_space_allocation_limit_address(isolate());
- intptr_t top =
- reinterpret_cast<intptr_t>(new_space_allocation_top.address());
- intptr_t limit =
- reinterpret_cast<intptr_t>(new_space_allocation_limit.address());
- ASSERT((limit - top) == kPointerSize);
- ASSERT(result.code() < ip.code());
-
- // Set up allocation top address.
- Register topaddr = scratch1;
- mov(topaddr, Operand(new_space_allocation_top));
-
- // This code stores a temporary value in ip. This is OK, as the code below
- // does not need ip for implicit literal generation.
- if ((flags & RESULT_CONTAINS_TOP) == 0) {
- // Load allocation top into result and allocation limit into ip.
- ldm(ia, topaddr, result.bit() | ip.bit());
- } else {
- if (emit_debug_code()) {
- // Assert that result actually contains top on entry. ip is used
- // immediately below so this use of ip does not cause difference with
- // respect to register content between debug and release mode.
- ldr(ip, MemOperand(topaddr));
- cmp(result, ip);
- Check(eq, "Unexpected allocation top");
- }
- // Load allocation limit into ip. Result already contains allocation top.
- ldr(ip, MemOperand(topaddr, limit - top));
- }
-
- if ((flags & DOUBLE_ALIGNMENT) != 0) {
- // Align the next allocation. Storing the filler map without checking top is
- // always safe because the limit of the heap is always aligned.
- ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
- and_(scratch2, result, Operand(kDoubleAlignmentMask), SetCC);
- Label aligned;
- b(eq, &aligned);
- mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
- str(scratch2, MemOperand(result, kDoubleSize / 2, PostIndex));
- bind(&aligned);
- }
-
- // Calculate new top and bail out if new space is exhausted. Use result
- // to calculate the new top. Object size may be in words so a shift is
- // required to get the number of bytes.
- if ((flags & SIZE_IN_WORDS) != 0) {
- add(scratch2, result, Operand(object_size, LSL, kPointerSizeLog2), SetCC);
- } else {
- add(scratch2, result, Operand(object_size), SetCC);
- }
- b(cs, gc_required);
- cmp(scratch2, Operand(ip));
- b(hi, gc_required);
-
- // Update allocation top. result temporarily holds the new top.
- if (emit_debug_code()) {
- tst(scratch2, Operand(kObjectAlignmentMask));
- Check(eq, "Unaligned allocation in new space");
- }
- str(scratch2, MemOperand(topaddr));
-
- // Tag object if requested.
- if ((flags & TAG_OBJECT) != 0) {
- add(result, result, Operand(kHeapObjectTag));
- }
-}
-
-
-void MacroAssembler::UndoAllocationInNewSpace(Register object,
- Register scratch) {
- ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address(isolate());
-
- // Make sure the object has no tag before resetting top.
- and_(object, object, Operand(~kHeapObjectTagMask));
-#ifdef DEBUG
- // Check that the object un-allocated is below the current top.
- mov(scratch, Operand(new_space_allocation_top));
- ldr(scratch, MemOperand(scratch));
- cmp(object, scratch);
- Check(lt, "Undo allocation of non allocated memory");
-#endif
- // Write the address of the object to un-allocate as the current top.
- mov(scratch, Operand(new_space_allocation_top));
- str(object, MemOperand(scratch));
-}
-
-
-void MacroAssembler::AllocateTwoByteString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* gc_required) {
- // Calculate the number of bytes needed for the characters in the string while
- // observing object alignment.
- ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
- mov(scratch1, Operand(length, LSL, 1)); // Length in bytes, not chars.
- add(scratch1, scratch1,
- Operand(kObjectAlignmentMask + SeqTwoByteString::kHeaderSize));
- and_(scratch1, scratch1, Operand(~kObjectAlignmentMask));
-
- // Allocate two-byte string in new space.
- AllocateInNewSpace(scratch1,
- result,
- scratch2,
- scratch3,
- gc_required,
- TAG_OBJECT);
-
- // Set the map, length and hash field.
- InitializeNewString(result,
- length,
- Heap::kStringMapRootIndex,
- scratch1,
- scratch2);
-}
-
-
-void MacroAssembler::AllocateAsciiString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* gc_required) {
- // Calculate the number of bytes needed for the characters in the string while
- // observing object alignment.
- ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
- ASSERT(kCharSize == 1);
- add(scratch1, length,
- Operand(kObjectAlignmentMask + SeqOneByteString::kHeaderSize));
- and_(scratch1, scratch1, Operand(~kObjectAlignmentMask));
-
- // Allocate ASCII string in new space.
- AllocateInNewSpace(scratch1,
- result,
- scratch2,
- scratch3,
- gc_required,
- TAG_OBJECT);
-
- // Set the map, length and hash field.
- InitializeNewString(result,
- length,
- Heap::kAsciiStringMapRootIndex,
- scratch1,
- scratch2);
-}
-
-
-void MacroAssembler::AllocateTwoByteConsString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
- AllocateInNewSpace(ConsString::kSize,
- result,
- scratch1,
- scratch2,
- gc_required,
- TAG_OBJECT);
-
- InitializeNewString(result,
- length,
- Heap::kConsStringMapRootIndex,
- scratch1,
- scratch2);
-}
-
-
-void MacroAssembler::AllocateAsciiConsString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
- AllocateInNewSpace(ConsString::kSize,
- result,
- scratch1,
- scratch2,
- gc_required,
- TAG_OBJECT);
-
- InitializeNewString(result,
- length,
- Heap::kConsAsciiStringMapRootIndex,
- scratch1,
- scratch2);
-}
-
-
-void MacroAssembler::AllocateTwoByteSlicedString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
- AllocateInNewSpace(SlicedString::kSize,
- result,
- scratch1,
- scratch2,
- gc_required,
- TAG_OBJECT);
-
- InitializeNewString(result,
- length,
- Heap::kSlicedStringMapRootIndex,
- scratch1,
- scratch2);
-}
-
-
-void MacroAssembler::AllocateAsciiSlicedString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
- AllocateInNewSpace(SlicedString::kSize,
- result,
- scratch1,
- scratch2,
- gc_required,
- TAG_OBJECT);
-
- InitializeNewString(result,
- length,
- Heap::kSlicedAsciiStringMapRootIndex,
- scratch1,
- scratch2);
-}
-
-
-void MacroAssembler::CompareObjectType(Register object,
- Register map,
- Register type_reg,
- InstanceType type) {
- ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
- CompareInstanceType(map, type_reg, type);
-}
-
-
-void MacroAssembler::CompareInstanceType(Register map,
- Register type_reg,
- InstanceType type) {
- ldrb(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
- cmp(type_reg, Operand(type));
-}
-
-
-void MacroAssembler::CompareRoot(Register obj,
- Heap::RootListIndex index) {
- ASSERT(!obj.is(ip));
- LoadRoot(ip, index);
- cmp(obj, ip);
-}
-
-
-void MacroAssembler::CheckFastElements(Register map,
- Register scratch,
- Label* fail) {
- STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
- STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
- STATIC_ASSERT(FAST_ELEMENTS == 2);
- STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
- ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
- cmp(scratch, Operand(Map::kMaximumBitField2FastHoleyElementValue));
- b(hi, fail);
-}
-
-
-void MacroAssembler::CheckFastObjectElements(Register map,
- Register scratch,
- Label* fail) {
- STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
- STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
- STATIC_ASSERT(FAST_ELEMENTS == 2);
- STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
- ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
- cmp(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
- b(ls, fail);
- cmp(scratch, Operand(Map::kMaximumBitField2FastHoleyElementValue));
- b(hi, fail);
-}
-
-
-void MacroAssembler::CheckFastSmiElements(Register map,
- Register scratch,
- Label* fail) {
- STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
- STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
- ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
- cmp(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
- b(hi, fail);
-}
-
-
-void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
- Register key_reg,
- Register elements_reg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Label* fail,
- int elements_offset) {
- Label smi_value, maybe_nan, have_double_value, is_nan, done;
- Register mantissa_reg = scratch2;
- Register exponent_reg = scratch3;
-
- // Handle smi values specially.
- JumpIfSmi(value_reg, &smi_value);
-
- // Ensure that the object is a heap number
- CheckMap(value_reg,
- scratch1,
- isolate()->factory()->heap_number_map(),
- fail,
- DONT_DO_SMI_CHECK);
-
- // Check for nan: all NaN values have a value greater (signed) than 0x7ff00000
- // in the exponent.
- mov(scratch1, Operand(kNaNOrInfinityLowerBoundUpper32));
- ldr(exponent_reg, FieldMemOperand(value_reg, HeapNumber::kExponentOffset));
- cmp(exponent_reg, scratch1);
- b(ge, &maybe_nan);
-
- ldr(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
-
- bind(&have_double_value);
- add(scratch1, elements_reg,
- Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize));
- str(mantissa_reg, FieldMemOperand(
- scratch1, FixedDoubleArray::kHeaderSize - elements_offset));
- uint32_t offset = FixedDoubleArray::kHeaderSize - elements_offset +
- sizeof(kHoleNanLower32);
- str(exponent_reg, FieldMemOperand(scratch1, offset));
- jmp(&done);
-
- bind(&maybe_nan);
- // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
- // it's an Infinity, and the non-NaN code path applies.
- b(gt, &is_nan);
- ldr(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
- cmp(mantissa_reg, Operand::Zero());
- b(eq, &have_double_value);
- bind(&is_nan);
- // Load canonical NaN for storing into the double array.
- uint64_t nan_int64 = BitCast<uint64_t>(
- FixedDoubleArray::canonical_not_the_hole_nan_as_double());
- mov(mantissa_reg, Operand(static_cast<uint32_t>(nan_int64)));
- mov(exponent_reg, Operand(static_cast<uint32_t>(nan_int64 >> 32)));
- jmp(&have_double_value);
-
- bind(&smi_value);
- add(scratch1, elements_reg,
- Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag -
- elements_offset));
- add(scratch1, scratch1,
- Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize));
- // scratch1 is now effective address of the double element
-
- FloatingPointHelper::Destination destination;
- if (CpuFeatures::IsSupported(VFP2)) {
- destination = FloatingPointHelper::kVFPRegisters;
- } else {
- destination = FloatingPointHelper::kCoreRegisters;
- }
-
- Register untagged_value = elements_reg;
- SmiUntag(untagged_value, value_reg);
- FloatingPointHelper::ConvertIntToDouble(this,
- untagged_value,
- destination,
- d0,
- mantissa_reg,
- exponent_reg,
- scratch4,
- s2);
- if (destination == FloatingPointHelper::kVFPRegisters) {
- CpuFeatures::Scope scope(VFP2);
- vstr(d0, scratch1, 0);
- } else {
- str(mantissa_reg, MemOperand(scratch1, 0));
- str(exponent_reg, MemOperand(scratch1, Register::kSizeInBytes));
- }
- bind(&done);
-}
-
-
-void MacroAssembler::CompareMap(Register obj,
- Register scratch,
- Handle<Map> map,
- Label* early_success,
- CompareMapMode mode) {
- ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
- CompareMap(scratch, map, early_success, mode);
-}
-
-
-void MacroAssembler::CompareMap(Register obj_map,
- Handle<Map> map,
- Label* early_success,
- CompareMapMode mode) {
- cmp(obj_map, Operand(map));
- if (mode == ALLOW_ELEMENT_TRANSITION_MAPS) {
- ElementsKind kind = map->elements_kind();
- if (IsFastElementsKind(kind)) {
- bool packed = IsFastPackedElementsKind(kind);
- Map* current_map = *map;
- while (CanTransitionToMoreGeneralFastElementsKind(kind, packed)) {
- kind = GetNextMoreGeneralFastElementsKind(kind, packed);
- current_map = current_map->LookupElementsTransitionMap(kind);
- if (!current_map) break;
- b(eq, early_success);
- cmp(obj_map, Operand(Handle<Map>(current_map)));
- }
- }
- }
-}
-
-
-void MacroAssembler::CheckMap(Register obj,
- Register scratch,
- Handle<Map> map,
- Label* fail,
- SmiCheckType smi_check_type,
- CompareMapMode mode) {
- if (smi_check_type == DO_SMI_CHECK) {
- JumpIfSmi(obj, fail);
- }
-
- Label success;
- CompareMap(obj, scratch, map, &success, mode);
- b(ne, fail);
- bind(&success);
-}
-
-
-void MacroAssembler::CheckMap(Register obj,
- Register scratch,
- Heap::RootListIndex index,
- Label* fail,
- SmiCheckType smi_check_type) {
- if (smi_check_type == DO_SMI_CHECK) {
- JumpIfSmi(obj, fail);
- }
- ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
- LoadRoot(ip, index);
- cmp(scratch, ip);
- b(ne, fail);
-}
-
-
-void MacroAssembler::DispatchMap(Register obj,
- Register scratch,
- Handle<Map> map,
- Handle<Code> success,
- SmiCheckType smi_check_type) {
- Label fail;
- if (smi_check_type == DO_SMI_CHECK) {
- JumpIfSmi(obj, &fail);
- }
- ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
- mov(ip, Operand(map));
- cmp(scratch, ip);
- Jump(success, RelocInfo::CODE_TARGET, eq);
- bind(&fail);
-}
-
-
-void MacroAssembler::TryGetFunctionPrototype(Register function,
- Register result,
- Register scratch,
- Label* miss,
- bool miss_on_bound_function) {
- // Check that the receiver isn't a smi.
- JumpIfSmi(function, miss);
-
- // Check that the function really is a function. Load map into result reg.
- CompareObjectType(function, result, scratch, JS_FUNCTION_TYPE);
- b(ne, miss);
-
- if (miss_on_bound_function) {
- ldr(scratch,
- FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
- ldr(scratch,
- FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
- tst(scratch,
- Operand(Smi::FromInt(1 << SharedFunctionInfo::kBoundFunction)));
- b(ne, miss);
- }
-
- // Make sure that the function has an instance prototype.
- Label non_instance;
- ldrb(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
- tst(scratch, Operand(1 << Map::kHasNonInstancePrototype));
- b(ne, &non_instance);
-
- // Get the prototype or initial map from the function.
- ldr(result,
- FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
-
- // If the prototype or initial map is the hole, don't return it and
- // simply miss the cache instead. This will allow us to allocate a
- // prototype object on-demand in the runtime system.
- LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- cmp(result, ip);
- b(eq, miss);
-
- // If the function does not have an initial map, we're done.
- Label done;
- CompareObjectType(result, scratch, scratch, MAP_TYPE);
- b(ne, &done);
-
- // Get the prototype from the initial map.
- ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
- jmp(&done);
-
- // Non-instance prototype: Fetch prototype from constructor field
- // in initial map.
- bind(&non_instance);
- ldr(result, FieldMemOperand(result, Map::kConstructorOffset));
-
- // All done.
- bind(&done);
-}
-
-
-void MacroAssembler::CallStub(CodeStub* stub,
- TypeFeedbackId ast_id,
- Condition cond) {
- ASSERT(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
- Call(stub->GetCode(isolate()), RelocInfo::CODE_TARGET, ast_id, cond);
-}
-
-
-void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) {
- ASSERT(allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe());
- Jump(stub->GetCode(isolate()), RelocInfo::CODE_TARGET, cond);
-}
-
-
-static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
- return ref0.address() - ref1.address();
-}
-
-
-void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function,
- int stack_space) {
- ExternalReference next_address =
- ExternalReference::handle_scope_next_address(isolate());
- const int kNextOffset = 0;
- const int kLimitOffset = AddressOffset(
- ExternalReference::handle_scope_limit_address(isolate()),
- next_address);
- const int kLevelOffset = AddressOffset(
- ExternalReference::handle_scope_level_address(isolate()),
- next_address);
-
- // Allocate HandleScope in callee-save registers.
- mov(r7, Operand(next_address));
- ldr(r4, MemOperand(r7, kNextOffset));
- ldr(r5, MemOperand(r7, kLimitOffset));
- ldr(r6, MemOperand(r7, kLevelOffset));
- add(r6, r6, Operand(1));
- str(r6, MemOperand(r7, kLevelOffset));
-
- if (FLAG_log_timer_events) {
- FrameScope frame(this, StackFrame::MANUAL);
- PushSafepointRegisters();
- PrepareCallCFunction(0, r0);
- CallCFunction(ExternalReference::log_enter_external_function(isolate()), 0);
- PopSafepointRegisters();
- }
-
- // Native call returns to the DirectCEntry stub which redirects to the
- // return address pushed on stack (could have moved after GC).
- // DirectCEntry stub itself is generated early and never moves.
- DirectCEntryStub stub;
- stub.GenerateCall(this, function);
-
- if (FLAG_log_timer_events) {
- FrameScope frame(this, StackFrame::MANUAL);
- PushSafepointRegisters();
- PrepareCallCFunction(0, r0);
- CallCFunction(ExternalReference::log_leave_external_function(isolate()), 0);
- PopSafepointRegisters();
- }
-
- Label promote_scheduled_exception;
- Label delete_allocated_handles;
- Label leave_exit_frame;
-
- // If result is non-zero, dereference to get the result value
- // otherwise set it to undefined.
- cmp(r0, Operand::Zero());
- LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
- ldr(r0, MemOperand(r0), ne);
-
- // No more valid handles (the result handle was the last one). Restore
- // previous handle scope.
- str(r4, MemOperand(r7, kNextOffset));
- if (emit_debug_code()) {
- ldr(r1, MemOperand(r7, kLevelOffset));
- cmp(r1, r6);
- Check(eq, "Unexpected level after return from api call");
- }
- sub(r6, r6, Operand(1));
- str(r6, MemOperand(r7, kLevelOffset));
- ldr(ip, MemOperand(r7, kLimitOffset));
- cmp(r5, ip);
- b(ne, &delete_allocated_handles);
-
- // Check if the function scheduled an exception.
- bind(&leave_exit_frame);
- LoadRoot(r4, Heap::kTheHoleValueRootIndex);
- mov(ip, Operand(ExternalReference::scheduled_exception_address(isolate())));
- ldr(r5, MemOperand(ip));
- cmp(r4, r5);
- b(ne, &promote_scheduled_exception);
-
- // LeaveExitFrame expects unwind space to be in a register.
- mov(r4, Operand(stack_space));
- LeaveExitFrame(false, r4);
- mov(pc, lr);
-
- bind(&promote_scheduled_exception);
- TailCallExternalReference(
- ExternalReference(Runtime::kPromoteScheduledException, isolate()),
- 0,
- 1);
-
- // HandleScope limit has changed. Delete allocated extensions.
- bind(&delete_allocated_handles);
- str(r5, MemOperand(r7, kLimitOffset));
- mov(r4, r0);
- PrepareCallCFunction(1, r5);
- mov(r0, Operand(ExternalReference::isolate_address()));
- CallCFunction(
- ExternalReference::delete_handle_scope_extensions(isolate()), 1);
- mov(r0, r4);
- jmp(&leave_exit_frame);
-}
-
-
-bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
- if (!has_frame_ && stub->SometimesSetsUpAFrame()) return false;
- return allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe();
-}
-
-
-void MacroAssembler::IllegalOperation(int num_arguments) {
- if (num_arguments > 0) {
- add(sp, sp, Operand(num_arguments * kPointerSize));
- }
- LoadRoot(r0, Heap::kUndefinedValueRootIndex);
-}
-
-
-void MacroAssembler::IndexFromHash(Register hash, Register index) {
- // If the hash field contains an array index pick it out. The assert checks
- // that the constants for the maximum number of digits for an array index
- // cached in the hash field and the number of bits reserved for it does not
- // conflict.
- ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
- (1 << String::kArrayIndexValueBits));
- // We want the smi-tagged index in key. kArrayIndexValueMask has zeros in
- // the low kHashShift bits.
- STATIC_ASSERT(kSmiTag == 0);
- Ubfx(hash, hash, String::kHashShift, String::kArrayIndexValueBits);
- mov(index, Operand(hash, LSL, kSmiTagSize));
-}
-
-
-void MacroAssembler::IntegerToDoubleConversionWithVFP3(Register inReg,
- Register outHighReg,
- Register outLowReg) {
- // ARMv7 VFP3 instructions to implement integer to double conversion.
- mov(r7, Operand(inReg, ASR, kSmiTagSize));
- vmov(s15, r7);
- vcvt_f64_s32(d7, s15);
- vmov(outLowReg, outHighReg, d7);
-}
-
-
-void MacroAssembler::ObjectToDoubleVFPRegister(Register object,
- DwVfpRegister result,
- Register scratch1,
- Register scratch2,
- Register heap_number_map,
- SwVfpRegister scratch3,
- Label* not_number,
- ObjectToDoubleFlags flags) {
- Label done;
- if ((flags & OBJECT_NOT_SMI) == 0) {
- Label not_smi;
- JumpIfNotSmi(object, &not_smi);
- // Remove smi tag and convert to double.
- mov(scratch1, Operand(object, ASR, kSmiTagSize));
- vmov(scratch3, scratch1);
- vcvt_f64_s32(result, scratch3);
- b(&done);
- bind(&not_smi);
- }
- // Check for heap number and load double value from it.
- ldr(scratch1, FieldMemOperand(object, HeapObject::kMapOffset));
- sub(scratch2, object, Operand(kHeapObjectTag));
- cmp(scratch1, heap_number_map);
- b(ne, not_number);
- if ((flags & AVOID_NANS_AND_INFINITIES) != 0) {
- // If exponent is all ones the number is either a NaN or +/-Infinity.
- ldr(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
- Sbfx(scratch1,
- scratch1,
- HeapNumber::kExponentShift,
- HeapNumber::kExponentBits);
- // All-one value sign extend to -1.
- cmp(scratch1, Operand(-1));
- b(eq, not_number);
- }
- vldr(result, scratch2, HeapNumber::kValueOffset);
- bind(&done);
-}
-
-
-void MacroAssembler::SmiToDoubleVFPRegister(Register smi,
- DwVfpRegister value,
- Register scratch1,
- SwVfpRegister scratch2) {
- mov(scratch1, Operand(smi, ASR, kSmiTagSize));
- vmov(scratch2, scratch1);
- vcvt_f64_s32(value, scratch2);
-}
-
-
-// Tries to get a signed int32 out of a double precision floating point heap
-// number. Rounds towards 0. Branch to 'not_int32' if the double is out of the
-// 32bits signed integer range.
-void MacroAssembler::ConvertToInt32(Register source,
- Register dest,
- Register scratch,
- Register scratch2,
- DwVfpRegister double_scratch,
- Label *not_int32) {
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatures::Scope scope(VFP2);
- sub(scratch, source, Operand(kHeapObjectTag));
- vldr(double_scratch, scratch, HeapNumber::kValueOffset);
- vcvt_s32_f64(double_scratch.low(), double_scratch);
- vmov(dest, double_scratch.low());
- // Signed vcvt instruction will saturate to the minimum (0x80000000) or
- // maximun (0x7fffffff) signed 32bits integer when the double is out of
- // range. When substracting one, the minimum signed integer becomes the
- // maximun signed integer.
- sub(scratch, dest, Operand(1));
- cmp(scratch, Operand(LONG_MAX - 1));
- // If equal then dest was LONG_MAX, if greater dest was LONG_MIN.
- b(ge, not_int32);
- } else {
- // This code is faster for doubles that are in the ranges -0x7fffffff to
- // -0x40000000 or 0x40000000 to 0x7fffffff. This corresponds almost to
- // the range of signed int32 values that are not Smis. Jumps to the label
- // 'not_int32' if the double isn't in the range -0x80000000.0 to
- // 0x80000000.0 (excluding the endpoints).
- Label right_exponent, done;
- // Get exponent word.
- ldr(scratch, FieldMemOperand(source, HeapNumber::kExponentOffset));
- // Get exponent alone in scratch2.
- Ubfx(scratch2,
- scratch,
- HeapNumber::kExponentShift,
- HeapNumber::kExponentBits);
- // Load dest with zero. We use this either for the final shift or
- // for the answer.
- mov(dest, Operand::Zero());
- // Check whether the exponent matches a 32 bit signed int that is not a Smi.
- // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased). This is
- // the exponent that we are fastest at and also the highest exponent we can
- // handle here.
- const uint32_t non_smi_exponent = HeapNumber::kExponentBias + 30;
- // The non_smi_exponent, 0x41d, is too big for ARM's immediate field so we
- // split it up to avoid a constant pool entry. You can't do that in general
- // for cmp because of the overflow flag, but we know the exponent is in the
- // range 0-2047 so there is no overflow.
- int fudge_factor = 0x400;
- sub(scratch2, scratch2, Operand(fudge_factor));
- cmp(scratch2, Operand(non_smi_exponent - fudge_factor));
- // If we have a match of the int32-but-not-Smi exponent then skip some
- // logic.
- b(eq, &right_exponent);
- // If the exponent is higher than that then go to slow case. This catches
- // numbers that don't fit in a signed int32, infinities and NaNs.
- b(gt, not_int32);
-
- // We know the exponent is smaller than 30 (biased). If it is less than
- // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, i.e.
- // it rounds to zero.
- const uint32_t zero_exponent = HeapNumber::kExponentBias + 0;
- sub(scratch2, scratch2, Operand(zero_exponent - fudge_factor), SetCC);
- // Dest already has a Smi zero.
- b(lt, &done);
-
- // We have an exponent between 0 and 30 in scratch2. Subtract from 30 to
- // get how much to shift down.
- rsb(dest, scratch2, Operand(30));
-
- bind(&right_exponent);
- // Get the top bits of the mantissa.
- and_(scratch2, scratch, Operand(HeapNumber::kMantissaMask));
- // Put back the implicit 1.
- orr(scratch2, scratch2, Operand(1 << HeapNumber::kExponentShift));
- // Shift up the mantissa bits to take up the space the exponent used to
- // take. We just orred in the implicit bit so that took care of one and
- // we want to leave the sign bit 0 so we subtract 2 bits from the shift
- // distance.
- const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
- mov(scratch2, Operand(scratch2, LSL, shift_distance));
- // Put sign in zero flag.
- tst(scratch, Operand(HeapNumber::kSignMask));
- // Get the second half of the double. For some exponents we don't
- // actually need this because the bits get shifted out again, but
- // it's probably slower to test than just to do it.
- ldr(scratch, FieldMemOperand(source, HeapNumber::kMantissaOffset));
- // Shift down 22 bits to get the last 10 bits.
- orr(scratch, scratch2, Operand(scratch, LSR, 32 - shift_distance));
- // Move down according to the exponent.
- mov(dest, Operand(scratch, LSR, dest));
- // Fix sign if sign bit was set.
- rsb(dest, dest, Operand::Zero(), LeaveCC, ne);
- bind(&done);
- }
-}
-
-
-void MacroAssembler::TryFastDoubleToInt32(Register result,
- DwVfpRegister double_input,
- DwVfpRegister double_scratch,
- Label* done) {
- ASSERT(!double_input.is(double_scratch));
-
- vcvt_s32_f64(double_scratch.low(), double_input);
- vmov(result, double_scratch.low());
- vcvt_f64_s32(double_scratch, double_scratch.low());
- VFPCompareAndSetFlags(double_input, double_scratch);
- b(eq, done);
-}
-
-
-void MacroAssembler::EmitVFPTruncate(VFPRoundingMode rounding_mode,
- Register result,
- DwVfpRegister double_input,
- Register scratch,
- DwVfpRegister double_scratch,
- CheckForInexactConversion check_inexact) {
- ASSERT(!result.is(scratch));
- ASSERT(!double_input.is(double_scratch));
-
- ASSERT(CpuFeatures::IsSupported(VFP2));
- CpuFeatures::Scope scope(VFP2);
- Register prev_fpscr = result;
- Label done;
-
- // Test for values that can be exactly represented as a signed 32-bit integer.
- TryFastDoubleToInt32(result, double_input, double_scratch, &done);
-
- // Convert to integer, respecting rounding mode.
- int32_t check_inexact_conversion =
- (check_inexact == kCheckForInexactConversion) ? kVFPInexactExceptionBit : 0;
-
- // Set custom FPCSR:
- // - Set rounding mode.
- // - Clear vfp cumulative exception flags.
- // - Make sure Flush-to-zero mode control bit is unset.
- vmrs(prev_fpscr);
- bic(scratch,
- prev_fpscr,
- Operand(kVFPExceptionMask |
- check_inexact_conversion |
- kVFPRoundingModeMask |
- kVFPFlushToZeroMask));
- // 'Round To Nearest' is encoded by 0b00 so no bits need to be set.
- if (rounding_mode != kRoundToNearest) {
- orr(scratch, scratch, Operand(rounding_mode));
- }
- vmsr(scratch);
-
- // Convert the argument to an integer.
- vcvt_s32_f64(double_scratch.low(),
- double_input,
- (rounding_mode == kRoundToZero) ? kDefaultRoundToZero
- : kFPSCRRounding);
-
- // Retrieve FPSCR.
- vmrs(scratch);
- // Restore FPSCR.
- vmsr(prev_fpscr);
- // Move the converted value into the result register.
- vmov(result, double_scratch.low());
- // Check for vfp exceptions.
- tst(scratch, Operand(kVFPExceptionMask | check_inexact_conversion));
-
- bind(&done);
-}
-
-
-void MacroAssembler::EmitOutOfInt32RangeTruncate(Register result,
- Register input_high,
- Register input_low,
- Register scratch) {
- Label done, normal_exponent, restore_sign;
-
- // Extract the biased exponent in result.
- Ubfx(result,
- input_high,
- HeapNumber::kExponentShift,
- HeapNumber::kExponentBits);
-
- // Check for Infinity and NaNs, which should return 0.
- cmp(result, Operand(HeapNumber::kExponentMask));
- mov(result, Operand::Zero(), LeaveCC, eq);
- b(eq, &done);
-
- // Express exponent as delta to (number of mantissa bits + 31).
- sub(result,
- result,
- Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 31),
- SetCC);
-
- // If the delta is strictly positive, all bits would be shifted away,
- // which means that we can return 0.
- b(le, &normal_exponent);
- mov(result, Operand::Zero());
- b(&done);
-
- bind(&normal_exponent);
- const int kShiftBase = HeapNumber::kNonMantissaBitsInTopWord - 1;
- // Calculate shift.
- add(scratch, result, Operand(kShiftBase + HeapNumber::kMantissaBits), SetCC);
-
- // Save the sign.
- Register sign = result;
- result = no_reg;
- and_(sign, input_high, Operand(HeapNumber::kSignMask));
-
- // Set the implicit 1 before the mantissa part in input_high.
- orr(input_high,
- input_high,
- Operand(1 << HeapNumber::kMantissaBitsInTopWord));
- // Shift the mantissa bits to the correct position.
- // We don't need to clear non-mantissa bits as they will be shifted away.
- // If they weren't, it would mean that the answer is in the 32bit range.
- mov(input_high, Operand(input_high, LSL, scratch));
-
- // Replace the shifted bits with bits from the lower mantissa word.
- Label pos_shift, shift_done;
- rsb(scratch, scratch, Operand(32), SetCC);
- b(&pos_shift, ge);
-
- // Negate scratch.
- rsb(scratch, scratch, Operand::Zero());
- mov(input_low, Operand(input_low, LSL, scratch));
- b(&shift_done);
-
- bind(&pos_shift);
- mov(input_low, Operand(input_low, LSR, scratch));
-
- bind(&shift_done);
- orr(input_high, input_high, Operand(input_low));
- // Restore sign if necessary.
- cmp(sign, Operand::Zero());
- result = sign;
- sign = no_reg;
- rsb(result, input_high, Operand::Zero(), LeaveCC, ne);
- mov(result, input_high, LeaveCC, eq);
- bind(&done);
-}
-
-
-void MacroAssembler::EmitECMATruncate(Register result,
- DwVfpRegister double_input,
- DwVfpRegister double_scratch,
- Register scratch,
- Register input_high,
- Register input_low) {
- CpuFeatures::Scope scope(VFP2);
- ASSERT(!input_high.is(result));
- ASSERT(!input_low.is(result));
- ASSERT(!input_low.is(input_high));
- ASSERT(!scratch.is(result) &&
- !scratch.is(input_high) &&
- !scratch.is(input_low));
- ASSERT(!double_input.is(double_scratch));
-
- Label done;
-
- // Test if the value can be exactly represented as a signed integer.
- vcvt_s32_f64(double_scratch.low(), double_input);
- vmov(result, double_scratch.low());
- vcvt_f64_s32(double_scratch, double_scratch.low());
- // Note: this comparison is cheaper than reading the FPSCR exception bits.
- VFPCompareAndSetFlags(double_input, double_scratch);
- b(eq, &done);
-
- // Check the exception flags. If they are not set, we are done.
- // If they are set, it could be because of the conversion above, or because
- // they were set before this code.
- vmrs(scratch);
- tst(scratch, Operand(kVFPOverflowExceptionBit |
- kVFPUnderflowExceptionBit |
- kVFPInvalidOpExceptionBit));
- b(eq, &done);
-
- // Clear cumulative exception flags.
- bic(scratch, scratch, Operand(kVFPExceptionMask));
- vmsr(scratch);
- // Try a conversion to a signed integer.
- vcvt_s32_f64(double_scratch.low(), double_input);
- // Retrieve the FPSCR.
- vmrs(scratch);
- // Check for overflow and NaNs.
- tst(scratch, Operand(kVFPOverflowExceptionBit |
- kVFPUnderflowExceptionBit |
- kVFPInvalidOpExceptionBit));
- // If we had no exceptions we are done.
- b(eq, &done);
-
- // Load the double value and perform a manual truncation.
- vmov(input_low, input_high, double_input);
- EmitOutOfInt32RangeTruncate(result,
- input_high,
- input_low,
- scratch);
- bind(&done);
-}
-
-
-void MacroAssembler::GetLeastBitsFromSmi(Register dst,
- Register src,
- int num_least_bits) {
- if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size()) {
- ubfx(dst, src, kSmiTagSize, num_least_bits);
- } else {
- mov(dst, Operand(src, ASR, kSmiTagSize));
- and_(dst, dst, Operand((1 << num_least_bits) - 1));
- }
-}
-
-
-void MacroAssembler::GetLeastBitsFromInt32(Register dst,
- Register src,
- int num_least_bits) {
- and_(dst, src, Operand((1 << num_least_bits) - 1));
-}
-
-
-void MacroAssembler::CallRuntime(const Runtime::Function* f,
- int num_arguments) {
- // All parameters are on the stack. r0 has the return value after call.
-
- // If the expected number of arguments of the runtime function is
- // constant, we check that the actual number of arguments match the
- // expectation.
- if (f->nargs >= 0 && f->nargs != num_arguments) {
- IllegalOperation(num_arguments);
- return;
- }
-
- // TODO(1236192): Most runtime routines don't need the number of
- // arguments passed in because it is constant. At some point we
- // should remove this need and make the runtime routine entry code
- // smarter.
- mov(r0, Operand(num_arguments));
- mov(r1, Operand(ExternalReference(f, isolate())));
- CEntryStub stub(1);
- CallStub(&stub);
-}
-
-
-void MacroAssembler::CallRuntime(Runtime::FunctionId fid, int num_arguments) {
- CallRuntime(Runtime::FunctionForId(fid), num_arguments);
-}
-
-
-void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
- const Runtime::Function* function = Runtime::FunctionForId(id);
- mov(r0, Operand(function->nargs));
- mov(r1, Operand(ExternalReference(function, isolate())));
- SaveFPRegsMode mode = CpuFeatures::IsSupported(VFP2)
- ? kSaveFPRegs
- : kDontSaveFPRegs;
- CEntryStub stub(1, mode);
- CallStub(&stub);
-}
-
-
-void MacroAssembler::CallExternalReference(const ExternalReference& ext,
- int num_arguments) {
- mov(r0, Operand(num_arguments));
- mov(r1, Operand(ext));
-
- CEntryStub stub(1);
- CallStub(&stub);
-}
-
-
-void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
- int num_arguments,
- int result_size) {
- // TODO(1236192): Most runtime routines don't need the number of
- // arguments passed in because it is constant. At some point we
- // should remove this need and make the runtime routine entry code
- // smarter.
- mov(r0, Operand(num_arguments));
- JumpToExternalReference(ext);
-}
-
-
-void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
- int num_arguments,
- int result_size) {
- TailCallExternalReference(ExternalReference(fid, isolate()),
- num_arguments,
- result_size);
-}
-
-
-void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) {
-#if defined(__thumb__)
- // Thumb mode builtin.
- ASSERT((reinterpret_cast<intptr_t>(builtin.address()) & 1) == 1);
-#endif
- mov(r1, Operand(builtin));
- CEntryStub stub(1);
- Jump(stub.GetCode(isolate()), RelocInfo::CODE_TARGET);
-}
-
-
-void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
- InvokeFlag flag,
- const CallWrapper& call_wrapper) {
- // You can't call a builtin without a valid frame.
- ASSERT(flag == JUMP_FUNCTION || has_frame());
-
- GetBuiltinEntry(r2, id);
- if (flag == CALL_FUNCTION) {
- call_wrapper.BeforeCall(CallSize(r2));
- SetCallKind(r5, CALL_AS_METHOD);
- Call(r2);
- call_wrapper.AfterCall();
- } else {
- ASSERT(flag == JUMP_FUNCTION);
- SetCallKind(r5, CALL_AS_METHOD);
- Jump(r2);
- }
-}
-
-
-void MacroAssembler::GetBuiltinFunction(Register target,
- Builtins::JavaScript id) {
- // Load the builtins object into target register.
- ldr(target,
- MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- ldr(target, FieldMemOperand(target, GlobalObject::kBuiltinsOffset));
- // Load the JavaScript builtin function from the builtins object.
- ldr(target, FieldMemOperand(target,
- JSBuiltinsObject::OffsetOfFunctionWithId(id)));
-}
-
-
-void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
- ASSERT(!target.is(r1));
- GetBuiltinFunction(r1, id);
- // Load the code entry point from the builtins object.
- ldr(target, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
-}
-
-
-void MacroAssembler::SetCounter(StatsCounter* counter, int value,
- Register scratch1, Register scratch2) {
- if (FLAG_native_code_counters && counter->Enabled()) {
- mov(scratch1, Operand(value));
- mov(scratch2, Operand(ExternalReference(counter)));
- str(scratch1, MemOperand(scratch2));
- }
-}
-
-
-void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
- Register scratch1, Register scratch2) {
- ASSERT(value > 0);
- if (FLAG_native_code_counters && counter->Enabled()) {
- mov(scratch2, Operand(ExternalReference(counter)));
- ldr(scratch1, MemOperand(scratch2));
- add(scratch1, scratch1, Operand(value));
- str(scratch1, MemOperand(scratch2));
- }
-}
-
-
-void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
- Register scratch1, Register scratch2) {
- ASSERT(value > 0);
- if (FLAG_native_code_counters && counter->Enabled()) {
- mov(scratch2, Operand(ExternalReference(counter)));
- ldr(scratch1, MemOperand(scratch2));
- sub(scratch1, scratch1, Operand(value));
- str(scratch1, MemOperand(scratch2));
- }
-}
-
-
-void MacroAssembler::Assert(Condition cond, const char* msg) {
- if (emit_debug_code())
- Check(cond, msg);
-}
-
-
-void MacroAssembler::AssertRegisterIsRoot(Register reg,
- Heap::RootListIndex index) {
- if (emit_debug_code()) {
- LoadRoot(ip, index);
- cmp(reg, ip);
- Check(eq, "Register did not match expected root");
- }
-}
-
-
-void MacroAssembler::AssertFastElements(Register elements) {
- if (emit_debug_code()) {
- ASSERT(!elements.is(ip));
- Label ok;
- push(elements);
- ldr(elements, FieldMemOperand(elements, HeapObject::kMapOffset));
- LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
- cmp(elements, ip);
- b(eq, &ok);
- LoadRoot(ip, Heap::kFixedDoubleArrayMapRootIndex);
- cmp(elements, ip);
- b(eq, &ok);
- LoadRoot(ip, Heap::kFixedCOWArrayMapRootIndex);
- cmp(elements, ip);
- b(eq, &ok);
- Abort("JSObject with fast elements map has slow elements");
- bind(&ok);
- pop(elements);
- }
-}
-
-
-void MacroAssembler::Check(Condition cond, const char* msg) {
- Label L;
- b(cond, &L);
- Abort(msg);
- // will not return here
- bind(&L);
-}
-
-
-void MacroAssembler::Abort(const char* msg) {
- Label abort_start;
- bind(&abort_start);
- // We want to pass the msg string like a smi to avoid GC
- // problems, however msg is not guaranteed to be aligned
- // properly. Instead, we pass an aligned pointer that is
- // a proper v8 smi, but also pass the alignment difference
- // from the real pointer as a smi.
- intptr_t p1 = reinterpret_cast<intptr_t>(msg);
- intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
- ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
-#ifdef DEBUG
- if (msg != NULL) {
- RecordComment("Abort message: ");
- RecordComment(msg);
- }
-#endif
-
- mov(r0, Operand(p0));
- push(r0);
- mov(r0, Operand(Smi::FromInt(p1 - p0)));
- push(r0);
- // Disable stub call restrictions to always allow calls to abort.
- if (!has_frame_) {
- // We don't actually want to generate a pile of code for this, so just
- // claim there is a stack frame, without generating one.
- FrameScope scope(this, StackFrame::NONE);
- CallRuntime(Runtime::kAbort, 2);
- } else {
- CallRuntime(Runtime::kAbort, 2);
- }
- // will not return here
- if (is_const_pool_blocked()) {
- // If the calling code cares about the exact number of
- // instructions generated, we insert padding here to keep the size
- // of the Abort macro constant.
- static const int kExpectedAbortInstructions = 10;
- int abort_instructions = InstructionsGeneratedSince(&abort_start);
- ASSERT(abort_instructions <= kExpectedAbortInstructions);
- while (abort_instructions++ < kExpectedAbortInstructions) {
- nop();
- }
- }
-}
-
-
-void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
- if (context_chain_length > 0) {
- // Move up the chain of contexts to the context containing the slot.
- ldr(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX)));
- for (int i = 1; i < context_chain_length; i++) {
- ldr(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
- }
- } else {
- // Slot is in the current function context. Move it into the
- // destination register in case we store into it (the write barrier
- // cannot be allowed to destroy the context in esi).
- mov(dst, cp);
- }
-}
-
-
-void MacroAssembler::LoadTransitionedArrayMapConditional(
- ElementsKind expected_kind,
- ElementsKind transitioned_kind,
- Register map_in_out,
- Register scratch,
- Label* no_map_match) {
- // Load the global or builtins object from the current context.
- ldr(scratch,
- MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- ldr(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
-
- // Check that the function's map is the same as the expected cached map.
- ldr(scratch,
- MemOperand(scratch,
- Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
- size_t offset = expected_kind * kPointerSize +
- FixedArrayBase::kHeaderSize;
- ldr(ip, FieldMemOperand(scratch, offset));
- cmp(map_in_out, ip);
- b(ne, no_map_match);
-
- // Use the transitioned cached map.
- offset = transitioned_kind * kPointerSize +
- FixedArrayBase::kHeaderSize;
- ldr(map_in_out, FieldMemOperand(scratch, offset));
-}
-
-
-void MacroAssembler::LoadInitialArrayMap(
- Register function_in, Register scratch,
- Register map_out, bool can_have_holes) {
- ASSERT(!function_in.is(map_out));
- Label done;
- ldr(map_out, FieldMemOperand(function_in,
- JSFunction::kPrototypeOrInitialMapOffset));
- if (!FLAG_smi_only_arrays) {
- ElementsKind kind = can_have_holes ? FAST_HOLEY_ELEMENTS : FAST_ELEMENTS;
- LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- kind,
- map_out,
- scratch,
- &done);
- } else if (can_have_holes) {
- LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- FAST_HOLEY_SMI_ELEMENTS,
- map_out,
- scratch,
- &done);
- }
- bind(&done);
-}
-
-
-void MacroAssembler::LoadGlobalFunction(int index, Register function) {
- // Load the global or builtins object from the current context.
- ldr(function,
- MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- // Load the native context from the global or builtins object.
- ldr(function, FieldMemOperand(function,
- GlobalObject::kNativeContextOffset));
- // Load the function from the native context.
- ldr(function, MemOperand(function, Context::SlotOffset(index)));
-}
-
-
-void MacroAssembler::LoadArrayFunction(Register function) {
- // Load the global or builtins object from the current context.
- ldr(function,
- MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- // Load the global context from the global or builtins object.
- ldr(function,
- FieldMemOperand(function, GlobalObject::kGlobalContextOffset));
- // Load the array function from the native context.
- ldr(function,
- MemOperand(function, Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
-}
-
-
-void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
- Register map,
- Register scratch) {
- // Load the initial map. The global functions all have initial maps.
- ldr(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
- if (emit_debug_code()) {
- Label ok, fail;
- CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK);
- b(&ok);
- bind(&fail);
- Abort("Global functions must have initial map");
- bind(&ok);
- }
-}
-
-
-void MacroAssembler::JumpIfNotPowerOfTwoOrZero(
- Register reg,
- Register scratch,
- Label* not_power_of_two_or_zero) {
- sub(scratch, reg, Operand(1), SetCC);
- b(mi, not_power_of_two_or_zero);
- tst(scratch, reg);
- b(ne, not_power_of_two_or_zero);
-}
-
-
-void MacroAssembler::JumpIfNotPowerOfTwoOrZeroAndNeg(
- Register reg,
- Register scratch,
- Label* zero_and_neg,
- Label* not_power_of_two) {
- sub(scratch, reg, Operand(1), SetCC);
- b(mi, zero_and_neg);
- tst(scratch, reg);
- b(ne, not_power_of_two);
-}
-
-
-void MacroAssembler::JumpIfNotBothSmi(Register reg1,
- Register reg2,
- Label* on_not_both_smi) {
- STATIC_ASSERT(kSmiTag == 0);
- tst(reg1, Operand(kSmiTagMask));
- tst(reg2, Operand(kSmiTagMask), eq);
- b(ne, on_not_both_smi);
-}
-
-
-void MacroAssembler::UntagAndJumpIfSmi(
- Register dst, Register src, Label* smi_case) {
- STATIC_ASSERT(kSmiTag == 0);
- mov(dst, Operand(src, ASR, kSmiTagSize), SetCC);
- b(cc, smi_case); // Shifter carry is not set for a smi.
-}
-
-
-void MacroAssembler::UntagAndJumpIfNotSmi(
- Register dst, Register src, Label* non_smi_case) {
- STATIC_ASSERT(kSmiTag == 0);
- mov(dst, Operand(src, ASR, kSmiTagSize), SetCC);
- b(cs, non_smi_case); // Shifter carry is set for a non-smi.
-}
-
-
-void MacroAssembler::JumpIfEitherSmi(Register reg1,
- Register reg2,
- Label* on_either_smi) {
- STATIC_ASSERT(kSmiTag == 0);
- tst(reg1, Operand(kSmiTagMask));
- tst(reg2, Operand(kSmiTagMask), ne);
- b(eq, on_either_smi);
-}
-
-
-void MacroAssembler::AssertNotSmi(Register object) {
- if (emit_debug_code()) {
- STATIC_ASSERT(kSmiTag == 0);
- tst(object, Operand(kSmiTagMask));
- Check(ne, "Operand is a smi");
- }
-}
-
-
-void MacroAssembler::AssertSmi(Register object) {
- if (emit_debug_code()) {
- STATIC_ASSERT(kSmiTag == 0);
- tst(object, Operand(kSmiTagMask));
- Check(eq, "Operand is not smi");
- }
-}
-
-
-void MacroAssembler::AssertString(Register object) {
- if (emit_debug_code()) {
- STATIC_ASSERT(kSmiTag == 0);
- tst(object, Operand(kSmiTagMask));
- Check(ne, "Operand is a smi and not a string");
- push(object);
- ldr(object, FieldMemOperand(object, HeapObject::kMapOffset));
- CompareInstanceType(object, object, FIRST_NONSTRING_TYPE);
- pop(object);
- Check(lo, "Operand is not a string");
- }
-}
-
-
-
-void MacroAssembler::AssertRootValue(Register src,
- Heap::RootListIndex root_value_index,
- const char* message) {
- if (emit_debug_code()) {
- CompareRoot(src, root_value_index);
- Check(eq, message);
- }
-}
-
-
-void MacroAssembler::JumpIfNotHeapNumber(Register object,
- Register heap_number_map,
- Register scratch,
- Label* on_not_heap_number) {
- ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
- AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
- cmp(scratch, heap_number_map);
- b(ne, on_not_heap_number);
-}
-
-
-void MacroAssembler::JumpIfNonSmisNotBothSequentialAsciiStrings(
- Register first,
- Register second,
- Register scratch1,
- Register scratch2,
- Label* failure) {
- // Test that both first and second are sequential ASCII strings.
- // Assume that they are non-smis.
- ldr(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
- ldr(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
- ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
- ldrb(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
-
- JumpIfBothInstanceTypesAreNotSequentialAscii(scratch1,
- scratch2,
- scratch1,
- scratch2,
- failure);
-}
-
-void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first,
- Register second,
- Register scratch1,
- Register scratch2,
- Label* failure) {
- // Check that neither is a smi.
- STATIC_ASSERT(kSmiTag == 0);
- and_(scratch1, first, Operand(second));
- JumpIfSmi(scratch1, failure);
- JumpIfNonSmisNotBothSequentialAsciiStrings(first,
- second,
- scratch1,
- scratch2,
- failure);
-}
-
-
-// Allocates a heap number or jumps to the need_gc label if the young space
-// is full and a scavenge is needed.
-void MacroAssembler::AllocateHeapNumber(Register result,
- Register scratch1,
- Register scratch2,
- Register heap_number_map,
- Label* gc_required,
- TaggingMode tagging_mode) {
- // Allocate an object in the heap for the heap number and tag it as a heap
- // object.
- AllocateInNewSpace(HeapNumber::kSize,
- result,
- scratch1,
- scratch2,
- gc_required,
- tagging_mode == TAG_RESULT ? TAG_OBJECT :
- NO_ALLOCATION_FLAGS);
-
- // Store heap number map in the allocated object.
- AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
- if (tagging_mode == TAG_RESULT) {
- str(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
- } else {
- str(heap_number_map, MemOperand(result, HeapObject::kMapOffset));
- }
-}
-
-
-void MacroAssembler::AllocateHeapNumberWithValue(Register result,
- DwVfpRegister value,
- Register scratch1,
- Register scratch2,
- Register heap_number_map,
- Label* gc_required) {
- AllocateHeapNumber(result, scratch1, scratch2, heap_number_map, gc_required);
- sub(scratch1, result, Operand(kHeapObjectTag));
- vstr(value, scratch1, HeapNumber::kValueOffset);
-}
-
-
-// Copies a fixed number of fields of heap objects from src to dst.
-void MacroAssembler::CopyFields(Register dst,
- Register src,
- RegList temps,
- int field_count) {
- // At least one bit set in the first 15 registers.
- ASSERT((temps & ((1 << 15) - 1)) != 0);
- ASSERT((temps & dst.bit()) == 0);
- ASSERT((temps & src.bit()) == 0);
- // Primitive implementation using only one temporary register.
-
- Register tmp = no_reg;
- // Find a temp register in temps list.
- for (int i = 0; i < 15; i++) {
- if ((temps & (1 << i)) != 0) {
- tmp.set_code(i);
- break;
- }
- }
- ASSERT(!tmp.is(no_reg));
-
- for (int i = 0; i < field_count; i++) {
- ldr(tmp, FieldMemOperand(src, i * kPointerSize));
- str(tmp, FieldMemOperand(dst, i * kPointerSize));
- }
-}
-
-
-void MacroAssembler::CopyBytes(Register src,
- Register dst,
- Register length,
- Register scratch) {
- Label align_loop, align_loop_1, word_loop, byte_loop, byte_loop_1, done;
-
- // Align src before copying in word size chunks.
- bind(&align_loop);
- cmp(length, Operand::Zero());
- b(eq, &done);
- bind(&align_loop_1);
- tst(src, Operand(kPointerSize - 1));
- b(eq, &word_loop);
- ldrb(scratch, MemOperand(src, 1, PostIndex));
- strb(scratch, MemOperand(dst, 1, PostIndex));
- sub(length, length, Operand(1), SetCC);
- b(ne, &byte_loop_1);
-
- // Copy bytes in word size chunks.
- bind(&word_loop);
- if (emit_debug_code()) {
- tst(src, Operand(kPointerSize - 1));
- Assert(eq, "Expecting alignment for CopyBytes");
- }
- cmp(length, Operand(kPointerSize));
- b(lt, &byte_loop);
- ldr(scratch, MemOperand(src, kPointerSize, PostIndex));
- if (CpuFeatures::IsSupported(UNALIGNED_ACCESSES)) {
- str(scratch, MemOperand(dst, kPointerSize, PostIndex));
- } else {
- strb(scratch, MemOperand(dst, 1, PostIndex));
- mov(scratch, Operand(scratch, LSR, 8));
- strb(scratch, MemOperand(dst, 1, PostIndex));
- mov(scratch, Operand(scratch, LSR, 8));
- strb(scratch, MemOperand(dst, 1, PostIndex));
- mov(scratch, Operand(scratch, LSR, 8));
- strb(scratch, MemOperand(dst, 1, PostIndex));
- }
- sub(length, length, Operand(kPointerSize));
- b(&word_loop);
-
- // Copy the last bytes if any left.
- bind(&byte_loop);
- cmp(length, Operand::Zero());
- b(eq, &done);
- bind(&byte_loop_1);
- ldrb(scratch, MemOperand(src, 1, PostIndex));
- strb(scratch, MemOperand(dst, 1, PostIndex));
- sub(length, length, Operand(1), SetCC);
- b(ne, &byte_loop_1);
- bind(&done);
-}
-
-
-void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
- Register end_offset,
- Register filler) {
- Label loop, entry;
- b(&entry);
- bind(&loop);
- str(filler, MemOperand(start_offset, kPointerSize, PostIndex));
- bind(&entry);
- cmp(start_offset, end_offset);
- b(lt, &loop);
-}
-
-
-void MacroAssembler::CountLeadingZeros(Register zeros, // Answer.
- Register source, // Input.
- Register scratch) {
- ASSERT(!zeros.is(source) || !source.is(scratch));
- ASSERT(!zeros.is(scratch));
- ASSERT(!scratch.is(ip));
- ASSERT(!source.is(ip));
- ASSERT(!zeros.is(ip));
-#ifdef CAN_USE_ARMV5_INSTRUCTIONS
- clz(zeros, source); // This instruction is only supported after ARM5.
-#else
- // Order of the next two lines is important: zeros register
- // can be the same as source register.
- Move(scratch, source);
- mov(zeros, Operand::Zero());
- // Top 16.
- tst(scratch, Operand(0xffff0000));
- add(zeros, zeros, Operand(16), LeaveCC, eq);
- mov(scratch, Operand(scratch, LSL, 16), LeaveCC, eq);
- // Top 8.
- tst(scratch, Operand(0xff000000));
- add(zeros, zeros, Operand(8), LeaveCC, eq);
- mov(scratch, Operand(scratch, LSL, 8), LeaveCC, eq);
- // Top 4.
- tst(scratch, Operand(0xf0000000));
- add(zeros, zeros, Operand(4), LeaveCC, eq);
- mov(scratch, Operand(scratch, LSL, 4), LeaveCC, eq);
- // Top 2.
- tst(scratch, Operand(0xc0000000));
- add(zeros, zeros, Operand(2), LeaveCC, eq);
- mov(scratch, Operand(scratch, LSL, 2), LeaveCC, eq);
- // Top bit.
- tst(scratch, Operand(0x80000000u));
- add(zeros, zeros, Operand(1), LeaveCC, eq);
-#endif
-}
-
-
-void MacroAssembler::CheckFor32DRegs(Register scratch) {
- mov(scratch, Operand(ExternalReference::cpu_features()));
- ldr(scratch, MemOperand(scratch));
- tst(scratch, Operand(1u << VFP32DREGS));
-}
-
-
-void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
- Register first,
- Register second,
- Register scratch1,
- Register scratch2,
- Label* failure) {
- int kFlatAsciiStringMask =
- kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
- int kFlatAsciiStringTag = ASCII_STRING_TYPE;
- and_(scratch1, first, Operand(kFlatAsciiStringMask));
- and_(scratch2, second, Operand(kFlatAsciiStringMask));
- cmp(scratch1, Operand(kFlatAsciiStringTag));
- // Ignore second test if first test failed.
- cmp(scratch2, Operand(kFlatAsciiStringTag), eq);
- b(ne, failure);
-}
-
-
-void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(Register type,
- Register scratch,
- Label* failure) {
- int kFlatAsciiStringMask =
- kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
- int kFlatAsciiStringTag = ASCII_STRING_TYPE;
- and_(scratch, type, Operand(kFlatAsciiStringMask));
- cmp(scratch, Operand(kFlatAsciiStringTag));
- b(ne, failure);
-}
-
-static const int kRegisterPassedArguments = 4;
-
-
-int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
- int num_double_arguments) {
- int stack_passed_words = 0;
- if (use_eabi_hardfloat()) {
- // In the hard floating point calling convention, we can use
- // all double registers to pass doubles.
- if (num_double_arguments > DoubleRegister::NumRegisters()) {
- stack_passed_words +=
- 2 * (num_double_arguments - DoubleRegister::NumRegisters());
- }
- } else {
- // In the soft floating point calling convention, every double
- // argument is passed using two registers.
- num_reg_arguments += 2 * num_double_arguments;
- }
- // Up to four simple arguments are passed in registers r0..r3.
- if (num_reg_arguments > kRegisterPassedArguments) {
- stack_passed_words += num_reg_arguments - kRegisterPassedArguments;
- }
- return stack_passed_words;
-}
-
-
-void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
- int num_double_arguments,
- Register scratch) {
- int frame_alignment = ActivationFrameAlignment();
- int stack_passed_arguments = CalculateStackPassedWords(
- num_reg_arguments, num_double_arguments);
- if (frame_alignment > kPointerSize) {
- // Make stack end at alignment and make room for num_arguments - 4 words
- // and the original value of sp.
- mov(scratch, sp);
- sub(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize));
- ASSERT(IsPowerOf2(frame_alignment));
- and_(sp, sp, Operand(-frame_alignment));
- str(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
- } else {
- sub(sp, sp, Operand(stack_passed_arguments * kPointerSize));
- }
-}
-
-
-void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
- Register scratch) {
- PrepareCallCFunction(num_reg_arguments, 0, scratch);
-}
-
-
-void MacroAssembler::SetCallCDoubleArguments(DwVfpRegister dreg) {
- ASSERT(CpuFeatures::IsSupported(VFP2));
- if (use_eabi_hardfloat()) {
- Move(d0, dreg);
- } else {
- vmov(r0, r1, dreg);
- }
-}
-
-
-void MacroAssembler::SetCallCDoubleArguments(DwVfpRegister dreg1,
- DwVfpRegister dreg2) {
- ASSERT(CpuFeatures::IsSupported(VFP2));
- if (use_eabi_hardfloat()) {
- if (dreg2.is(d0)) {
- ASSERT(!dreg1.is(d1));
- Move(d1, dreg2);
- Move(d0, dreg1);
- } else {
- Move(d0, dreg1);
- Move(d1, dreg2);
- }
- } else {
- vmov(r0, r1, dreg1);
- vmov(r2, r3, dreg2);
- }
-}
-
-
-void MacroAssembler::SetCallCDoubleArguments(DwVfpRegister dreg,
- Register reg) {
- ASSERT(CpuFeatures::IsSupported(VFP2));
- if (use_eabi_hardfloat()) {
- Move(d0, dreg);
- Move(r0, reg);
- } else {
- Move(r2, reg);
- vmov(r0, r1, dreg);
- }
-}
-
-
-void MacroAssembler::CallCFunction(ExternalReference function,
- int num_reg_arguments,
- int num_double_arguments) {
- mov(ip, Operand(function));
- CallCFunctionHelper(ip, num_reg_arguments, num_double_arguments);
-}
-
-
-void MacroAssembler::CallCFunction(Register function,
- int num_reg_arguments,
- int num_double_arguments) {
- CallCFunctionHelper(function, num_reg_arguments, num_double_arguments);
-}
-
-
-void MacroAssembler::CallCFunction(ExternalReference function,
- int num_arguments) {
- CallCFunction(function, num_arguments, 0);
-}
-
-
-void MacroAssembler::CallCFunction(Register function,
- int num_arguments) {
- CallCFunction(function, num_arguments, 0);
-}
-
-
-void MacroAssembler::CallCFunctionHelper(Register function,
- int num_reg_arguments,
- int num_double_arguments) {
- ASSERT(has_frame());
- // Make sure that the stack is aligned before calling a C function unless
- // running in the simulator. The simulator has its own alignment check which
- // provides more information.
-#if defined(V8_HOST_ARCH_ARM)
- if (emit_debug_code()) {
- int frame_alignment = OS::ActivationFrameAlignment();
- int frame_alignment_mask = frame_alignment - 1;
- if (frame_alignment > kPointerSize) {
- ASSERT(IsPowerOf2(frame_alignment));
- Label alignment_as_expected;
- tst(sp, Operand(frame_alignment_mask));
- b(eq, &alignment_as_expected);
- // Don't use Check here, as it will call Runtime_Abort possibly
- // re-entering here.
- stop("Unexpected alignment");
- bind(&alignment_as_expected);
- }
- }
-#endif
-
- // Just call directly. The function called cannot cause a GC, or
- // allow preemption, so the return address in the link register
- // stays correct.
- Call(function);
- int stack_passed_arguments = CalculateStackPassedWords(
- num_reg_arguments, num_double_arguments);
- if (ActivationFrameAlignment() > kPointerSize) {
- ldr(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
- } else {
- add(sp, sp, Operand(stack_passed_arguments * sizeof(kPointerSize)));
- }
-}
-
-
-void MacroAssembler::GetRelocatedValueLocation(Register ldr_location,
- Register result) {
- const uint32_t kLdrOffsetMask = (1 << 12) - 1;
- const int32_t kPCRegOffset = 2 * kPointerSize;
- ldr(result, MemOperand(ldr_location));
- if (emit_debug_code()) {
- // Check that the instruction is a ldr reg, [pc + offset] .
- and_(result, result, Operand(kLdrPCPattern));
- cmp(result, Operand(kLdrPCPattern));
- Check(eq, "The instruction to patch should be a load from pc.");
- // Result was clobbered. Restore it.
- ldr(result, MemOperand(ldr_location));
- }
- // Get the address of the constant.
- and_(result, result, Operand(kLdrOffsetMask));
- add(result, ldr_location, Operand(result));
- add(result, result, Operand(kPCRegOffset));
-}
-
-
-void MacroAssembler::CheckPageFlag(
- Register object,
- Register scratch,
- int mask,
- Condition cc,
- Label* condition_met) {
- Bfc(scratch, object, 0, kPageSizeBits);
- ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
- tst(scratch, Operand(mask));
- b(cc, condition_met);
-}
-
-
-void MacroAssembler::JumpIfBlack(Register object,
- Register scratch0,
- Register scratch1,
- Label* on_black) {
- HasColor(object, scratch0, scratch1, on_black, 1, 0); // kBlackBitPattern.
- ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
-}
-
-
-void MacroAssembler::HasColor(Register object,
- Register bitmap_scratch,
- Register mask_scratch,
- Label* has_color,
- int first_bit,
- int second_bit) {
- ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, no_reg));
-
- GetMarkBits(object, bitmap_scratch, mask_scratch);
-
- Label other_color, word_boundary;
- ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
- tst(ip, Operand(mask_scratch));
- b(first_bit == 1 ? eq : ne, &other_color);
- // Shift left 1 by adding.
- add(mask_scratch, mask_scratch, Operand(mask_scratch), SetCC);
- b(eq, &word_boundary);
- tst(ip, Operand(mask_scratch));
- b(second_bit == 1 ? ne : eq, has_color);
- jmp(&other_color);
-
- bind(&word_boundary);
- ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize));
- tst(ip, Operand(1));
- b(second_bit == 1 ? ne : eq, has_color);
- bind(&other_color);
-}
-
-
-// Detect some, but not all, common pointer-free objects. This is used by the
-// incremental write barrier which doesn't care about oddballs (they are always
-// marked black immediately so this code is not hit).
-void MacroAssembler::JumpIfDataObject(Register value,
- Register scratch,
- Label* not_data_object) {
- Label is_data_object;
- ldr(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
- CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
- b(eq, &is_data_object);
- ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
- ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
- // If it's a string and it's not a cons string then it's an object containing
- // no GC pointers.
- ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
- tst(scratch, Operand(kIsIndirectStringMask | kIsNotStringMask));
- b(ne, not_data_object);
- bind(&is_data_object);
-}
-
-
-void MacroAssembler::GetMarkBits(Register addr_reg,
- Register bitmap_reg,
- Register mask_reg) {
- ASSERT(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg));
- and_(bitmap_reg, addr_reg, Operand(~Page::kPageAlignmentMask));
- Ubfx(mask_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2);
- const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
- Ubfx(ip, addr_reg, kLowBits, kPageSizeBits - kLowBits);
- add(bitmap_reg, bitmap_reg, Operand(ip, LSL, kPointerSizeLog2));
- mov(ip, Operand(1));
- mov(mask_reg, Operand(ip, LSL, mask_reg));
-}
-
-
-void MacroAssembler::EnsureNotWhite(
- Register value,
- Register bitmap_scratch,
- Register mask_scratch,
- Register load_scratch,
- Label* value_is_white_and_not_data) {
- ASSERT(!AreAliased(value, bitmap_scratch, mask_scratch, ip));
- GetMarkBits(value, bitmap_scratch, mask_scratch);
-
- // If the value is black or grey we don't need to do anything.
- ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
- ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
- ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
- ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
-
- Label done;
-
- // Since both black and grey have a 1 in the first position and white does
- // not have a 1 there we only need to check one bit.
- ldr(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
- tst(mask_scratch, load_scratch);
- b(ne, &done);
-
- if (emit_debug_code()) {
- // Check for impossible bit pattern.
- Label ok;
- // LSL may overflow, making the check conservative.
- tst(load_scratch, Operand(mask_scratch, LSL, 1));
- b(eq, &ok);
- stop("Impossible marking bit pattern");
- bind(&ok);
- }
-
- // Value is white. We check whether it is data that doesn't need scanning.
- // Currently only checks for HeapNumber and non-cons strings.
- Register map = load_scratch; // Holds map while checking type.
- Register length = load_scratch; // Holds length of object after testing type.
- Label is_data_object;
-
- // Check for heap-number
- ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
- CompareRoot(map, Heap::kHeapNumberMapRootIndex);
- mov(length, Operand(HeapNumber::kSize), LeaveCC, eq);
- b(eq, &is_data_object);
-
- // Check for strings.
- ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
- ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
- // If it's a string and it's not a cons string then it's an object containing
- // no GC pointers.
- Register instance_type = load_scratch;
- ldrb(instance_type, FieldMemOperand(map, Map::kInstanceTypeOffset));
- tst(instance_type, Operand(kIsIndirectStringMask | kIsNotStringMask));
- b(ne, value_is_white_and_not_data);
- // It's a non-indirect (non-cons and non-slice) string.
- // If it's external, the length is just ExternalString::kSize.
- // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
- // External strings are the only ones with the kExternalStringTag bit
- // set.
- ASSERT_EQ(0, kSeqStringTag & kExternalStringTag);
- ASSERT_EQ(0, kConsStringTag & kExternalStringTag);
- tst(instance_type, Operand(kExternalStringTag));
- mov(length, Operand(ExternalString::kSize), LeaveCC, ne);
- b(ne, &is_data_object);
-
- // Sequential string, either ASCII or UC16.
- // For ASCII (char-size of 1) we shift the smi tag away to get the length.
- // For UC16 (char-size of 2) we just leave the smi tag in place, thereby
- // getting the length multiplied by 2.
- ASSERT(kOneByteStringTag == 4 && kStringEncodingMask == 4);
- ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
- ldr(ip, FieldMemOperand(value, String::kLengthOffset));
- tst(instance_type, Operand(kStringEncodingMask));
- mov(ip, Operand(ip, LSR, 1), LeaveCC, ne);
- add(length, ip, Operand(SeqString::kHeaderSize + kObjectAlignmentMask));
- and_(length, length, Operand(~kObjectAlignmentMask));
-
- bind(&is_data_object);
- // Value is a data object, and it is white. Mark it black. Since we know
- // that the object is white we can make it black by flipping one bit.
- ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
- orr(ip, ip, Operand(mask_scratch));
- str(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
-
- and_(bitmap_scratch, bitmap_scratch, Operand(~Page::kPageAlignmentMask));
- ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
- add(ip, ip, Operand(length));
- str(ip, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
-
- bind(&done);
-}
-
-
-void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
- Usat(output_reg, 8, Operand(input_reg));
-}
-
-
-void MacroAssembler::ClampDoubleToUint8(Register result_reg,
- DwVfpRegister input_reg,
- DwVfpRegister temp_double_reg) {
- Label above_zero;
- Label done;
- Label in_bounds;
-
- Vmov(temp_double_reg, 0.0);
- VFPCompareAndSetFlags(input_reg, temp_double_reg);
- b(gt, &above_zero);
-
- // Double value is less than zero, NaN or Inf, return 0.
- mov(result_reg, Operand::Zero());
- b(al, &done);
-
- // Double value is >= 255, return 255.
- bind(&above_zero);
- Vmov(temp_double_reg, 255.0, result_reg);
- VFPCompareAndSetFlags(input_reg, temp_double_reg);
- b(le, &in_bounds);
- mov(result_reg, Operand(255));
- b(al, &done);
-
- // In 0-255 range, round and truncate.
- bind(&in_bounds);
- // Save FPSCR.
- vmrs(ip);
- // Set rounding mode to round to the nearest integer by clearing bits[23:22].
- bic(result_reg, ip, Operand(kVFPRoundingModeMask));
- vmsr(result_reg);
- vcvt_s32_f64(input_reg.low(), input_reg, kFPSCRRounding);
- vmov(result_reg, input_reg.low());
- // Restore FPSCR.
- vmsr(ip);
- bind(&done);
-}
-
-
-void MacroAssembler::LoadInstanceDescriptors(Register map,
- Register descriptors) {
- ldr(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset));
-}
-
-
-void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
- ldr(dst, FieldMemOperand(map, Map::kBitField3Offset));
- DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
-}
-
-
-void MacroAssembler::EnumLength(Register dst, Register map) {
- STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
- ldr(dst, FieldMemOperand(map, Map::kBitField3Offset));
- and_(dst, dst, Operand(Smi::FromInt(Map::EnumLengthBits::kMask)));
-}
-
-
-void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
- Register empty_fixed_array_value = r6;
- LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
- Label next, start;
- mov(r2, r0);
-
- // Check if the enum length field is properly initialized, indicating that
- // there is an enum cache.
- ldr(r1, FieldMemOperand(r2, HeapObject::kMapOffset));
-
- EnumLength(r3, r1);
- cmp(r3, Operand(Smi::FromInt(Map::kInvalidEnumCache)));
- b(eq, call_runtime);
-
- jmp(&start);
-
- bind(&next);
- ldr(r1, FieldMemOperand(r2, HeapObject::kMapOffset));
-
- // For all objects but the receiver, check that the cache is empty.
- EnumLength(r3, r1);
- cmp(r3, Operand(Smi::FromInt(0)));
- b(ne, call_runtime);
-
- bind(&start);
-
- // Check that there are no elements. Register r2 contains the current JS
- // object we've reached through the prototype chain.
- ldr(r2, FieldMemOperand(r2, JSObject::kElementsOffset));
- cmp(r2, empty_fixed_array_value);
- b(ne, call_runtime);
-
- ldr(r2, FieldMemOperand(r1, Map::kPrototypeOffset));
- cmp(r2, null_value);
- b(ne, &next);
-}
-
-
-void MacroAssembler::TestJSArrayForAllocationSiteInfo(
- Register receiver_reg,
- Register scratch_reg) {
- Label no_info_available;
- ExternalReference new_space_start =
- ExternalReference::new_space_start(isolate());
- ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address(isolate());
- add(scratch_reg, receiver_reg,
- Operand(JSArray::kSize + AllocationSiteInfo::kSize - kHeapObjectTag));
- cmp(scratch_reg, Operand(new_space_start));
- b(lt, &no_info_available);
- mov(ip, Operand(new_space_allocation_top));
- ldr(ip, MemOperand(ip));
- cmp(scratch_reg, ip);
- b(gt, &no_info_available);
- ldr(scratch_reg, MemOperand(scratch_reg, -AllocationSiteInfo::kSize));
- cmp(scratch_reg,
- Operand(Handle<Map>(isolate()->heap()->allocation_site_info_map())));
- bind(&no_info_available);
-}
-
-
-#ifdef DEBUG
-bool AreAliased(Register reg1,
- Register reg2,
- Register reg3,
- Register reg4,
- Register reg5,
- Register reg6) {
- int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() +
- reg3.is_valid() + reg4.is_valid() + reg5.is_valid() + reg6.is_valid();
-
- RegList regs = 0;
- if (reg1.is_valid()) regs |= reg1.bit();
- if (reg2.is_valid()) regs |= reg2.bit();
- if (reg3.is_valid()) regs |= reg3.bit();
- if (reg4.is_valid()) regs |= reg4.bit();
- if (reg5.is_valid()) regs |= reg5.bit();
- if (reg6.is_valid()) regs |= reg6.bit();
- int n_of_non_aliasing_regs = NumRegs(regs);
-
- return n_of_valid_regs != n_of_non_aliasing_regs;
-}
-#endif
-
-
-CodePatcher::CodePatcher(byte* address, int instructions)
- : address_(address),
- size_(instructions * Assembler::kInstrSize),
- masm_(NULL, address, size_ + Assembler::kGap) {
- // Create a new macro assembler pointing to the address of the code to patch.
- // The size is adjusted with kGap on order for the assembler to generate size
- // bytes of instructions without failing with buffer size constraints.
- ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
-}
-
-
-CodePatcher::~CodePatcher() {
- // Indicate that code has changed.
- CPU::FlushICache(address_, size_);
-
- // Check that the code was patched as expected.
- ASSERT(masm_.pc_ == address_ + size_);
- ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
-}
-
-
-void CodePatcher::Emit(Instr instr) {
- masm()->emit(instr);
-}
-
-
-void CodePatcher::Emit(Address addr) {
- masm()->emit(reinterpret_cast<Instr>(addr));
-}
-
-
-void CodePatcher::EmitCondition(Condition cond) {
- Instr instr = Assembler::instr_at(masm_.pc_);
- instr = (instr & ~kCondMask) | cond;
- masm_.emit(instr);
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_ARM
diff --git a/src/3rdparty/v8/src/arm/macro-assembler-arm.h b/src/3rdparty/v8/src/arm/macro-assembler-arm.h
deleted file mode 100644
index 7b05a67..0000000
--- a/src/3rdparty/v8/src/arm/macro-assembler-arm.h
+++ /dev/null
@@ -1,1439 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_ARM_MACRO_ASSEMBLER_ARM_H_
-#define V8_ARM_MACRO_ASSEMBLER_ARM_H_
-
-#include "assembler.h"
-#include "frames.h"
-#include "v8globals.h"
-
-namespace v8 {
-namespace internal {
-
-// ----------------------------------------------------------------------------
-// Static helper functions
-
-// Generate a MemOperand for loading a field from an object.
-inline MemOperand FieldMemOperand(Register object, int offset) {
- return MemOperand(object, offset - kHeapObjectTag);
-}
-
-
-inline Operand SmiUntagOperand(Register object) {
- return Operand(object, ASR, kSmiTagSize);
-}
-
-
-
-// Give alias names to registers
-const Register cp = { 8 }; // JavaScript context pointer
-const Register kRootRegister = { 10 }; // Roots array pointer.
-
-// Flags used for AllocateHeapNumber
-enum TaggingMode {
- // Tag the result.
- TAG_RESULT,
- // Don't tag
- DONT_TAG_RESULT
-};
-
-// Flags used for the ObjectToDoubleVFPRegister function.
-enum ObjectToDoubleFlags {
- // No special flags.
- NO_OBJECT_TO_DOUBLE_FLAGS = 0,
- // Object is known to be a non smi.
- OBJECT_NOT_SMI = 1 << 0,
- // Don't load NaNs or infinities, branch to the non number case instead.
- AVOID_NANS_AND_INFINITIES = 1 << 1
-};
-
-
-enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
-enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
-enum LinkRegisterStatus { kLRHasNotBeenSaved, kLRHasBeenSaved };
-
-
-#ifdef DEBUG
-bool AreAliased(Register reg1,
- Register reg2,
- Register reg3 = no_reg,
- Register reg4 = no_reg,
- Register reg5 = no_reg,
- Register reg6 = no_reg);
-#endif
-
-
-enum TargetAddressStorageMode {
- CAN_INLINE_TARGET_ADDRESS,
- NEVER_INLINE_TARGET_ADDRESS
-};
-
-// MacroAssembler implements a collection of frequently used macros.
-class MacroAssembler: public Assembler {
- public:
- // The isolate parameter can be NULL if the macro assembler should
- // not use isolate-dependent functionality. In this case, it's the
- // responsibility of the caller to never invoke such function on the
- // macro assembler.
- MacroAssembler(Isolate* isolate, void* buffer, int size);
-
- // Jump, Call, and Ret pseudo instructions implementing inter-working.
- void Jump(Register target, Condition cond = al);
- void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al);
- void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
- static int CallSize(Register target, Condition cond = al);
- void Call(Register target, Condition cond = al);
- int CallSize(Address target, RelocInfo::Mode rmode, Condition cond = al);
- static int CallSizeNotPredictableCodeSize(Address target,
- RelocInfo::Mode rmode,
- Condition cond = al);
- void Call(Address target, RelocInfo::Mode rmode,
- Condition cond = al,
- TargetAddressStorageMode mode = CAN_INLINE_TARGET_ADDRESS);
- int CallSize(Handle<Code> code,
- RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
- TypeFeedbackId ast_id = TypeFeedbackId::None(),
- Condition cond = al);
- void Call(Handle<Code> code,
- RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
- TypeFeedbackId ast_id = TypeFeedbackId::None(),
- Condition cond = al,
- TargetAddressStorageMode mode = CAN_INLINE_TARGET_ADDRESS);
- void Ret(Condition cond = al);
-
- // Emit code to discard a non-negative number of pointer-sized elements
- // from the stack, clobbering only the sp register.
- void Drop(int count, Condition cond = al);
-
- void Ret(int drop, Condition cond = al);
-
- // Swap two registers. If the scratch register is omitted then a slightly
- // less efficient form using xor instead of mov is emitted.
- void Swap(Register reg1,
- Register reg2,
- Register scratch = no_reg,
- Condition cond = al);
-
-
- void And(Register dst, Register src1, const Operand& src2,
- Condition cond = al);
- void Ubfx(Register dst, Register src, int lsb, int width,
- Condition cond = al);
- void Sbfx(Register dst, Register src, int lsb, int width,
- Condition cond = al);
- // The scratch register is not used for ARMv7.
- // scratch can be the same register as src (in which case it is trashed), but
- // not the same as dst.
- void Bfi(Register dst,
- Register src,
- Register scratch,
- int lsb,
- int width,
- Condition cond = al);
- void Bfc(Register dst, Register src, int lsb, int width, Condition cond = al);
- void Usat(Register dst, int satpos, const Operand& src,
- Condition cond = al);
-
- void Call(Label* target);
-
- // Register move. May do nothing if the registers are identical.
- void Move(Register dst, Handle<Object> value);
- void Move(Register dst, Register src, Condition cond = al);
- void Move(DwVfpRegister dst, DwVfpRegister src);
-
- // Load an object from the root table.
- void LoadRoot(Register destination,
- Heap::RootListIndex index,
- Condition cond = al);
- // Store an object to the root table.
- void StoreRoot(Register source,
- Heap::RootListIndex index,
- Condition cond = al);
-
- void LoadHeapObject(Register dst, Handle<HeapObject> object);
-
- void LoadObject(Register result, Handle<Object> object) {
- if (object->IsHeapObject()) {
- LoadHeapObject(result, Handle<HeapObject>::cast(object));
- } else {
- Move(result, object);
- }
- }
-
- // ---------------------------------------------------------------------------
- // GC Support
-
- void IncrementalMarkingRecordWriteHelper(Register object,
- Register value,
- Register address);
-
- enum RememberedSetFinalAction {
- kReturnAtEnd,
- kFallThroughAtEnd
- };
-
- // Record in the remembered set the fact that we have a pointer to new space
- // at the address pointed to by the addr register. Only works if addr is not
- // in new space.
- void RememberedSetHelper(Register object, // Used for debug code.
- Register addr,
- Register scratch,
- SaveFPRegsMode save_fp,
- RememberedSetFinalAction and_then);
-
- void CheckPageFlag(Register object,
- Register scratch,
- int mask,
- Condition cc,
- Label* condition_met);
-
- // Check if object is in new space. Jumps if the object is not in new space.
- // The register scratch can be object itself, but scratch will be clobbered.
- void JumpIfNotInNewSpace(Register object,
- Register scratch,
- Label* branch) {
- InNewSpace(object, scratch, ne, branch);
- }
-
- // Check if object is in new space. Jumps if the object is in new space.
- // The register scratch can be object itself, but it will be clobbered.
- void JumpIfInNewSpace(Register object,
- Register scratch,
- Label* branch) {
- InNewSpace(object, scratch, eq, branch);
- }
-
- // Check if an object has a given incremental marking color.
- void HasColor(Register object,
- Register scratch0,
- Register scratch1,
- Label* has_color,
- int first_bit,
- int second_bit);
-
- void JumpIfBlack(Register object,
- Register scratch0,
- Register scratch1,
- Label* on_black);
-
- // Checks the color of an object. If the object is already grey or black
- // then we just fall through, since it is already live. If it is white and
- // we can determine that it doesn't need to be scanned, then we just mark it
- // black and fall through. For the rest we jump to the label so the
- // incremental marker can fix its assumptions.
- void EnsureNotWhite(Register object,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* object_is_white_and_not_data);
-
- // Detects conservatively whether an object is data-only, i.e. it does need to
- // be scanned by the garbage collector.
- void JumpIfDataObject(Register value,
- Register scratch,
- Label* not_data_object);
-
- // Notify the garbage collector that we wrote a pointer into an object.
- // |object| is the object being stored into, |value| is the object being
- // stored. value and scratch registers are clobbered by the operation.
- // The offset is the offset from the start of the object, not the offset from
- // the tagged HeapObject pointer. For use with FieldOperand(reg, off).
- void RecordWriteField(
- Register object,
- int offset,
- Register value,
- Register scratch,
- LinkRegisterStatus lr_status,
- SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK);
-
- // As above, but the offset has the tag presubtracted. For use with
- // MemOperand(reg, off).
- inline void RecordWriteContextSlot(
- Register context,
- int offset,
- Register value,
- Register scratch,
- LinkRegisterStatus lr_status,
- SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK) {
- RecordWriteField(context,
- offset + kHeapObjectTag,
- value,
- scratch,
- lr_status,
- save_fp,
- remembered_set_action,
- smi_check);
- }
-
- // For a given |object| notify the garbage collector that the slot |address|
- // has been written. |value| is the object being stored. The value and
- // address registers are clobbered by the operation.
- void RecordWrite(
- Register object,
- Register address,
- Register value,
- LinkRegisterStatus lr_status,
- SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK);
-
- // Push a handle.
- void Push(Handle<Object> handle);
- void Push(Smi* smi) { Push(Handle<Smi>(smi, isolate())); }
-
- // Push two registers. Pushes leftmost register first (to highest address).
- void Push(Register src1, Register src2, Condition cond = al) {
- ASSERT(!src1.is(src2));
- if (src1.code() > src2.code()) {
- stm(db_w, sp, src1.bit() | src2.bit(), cond);
- } else {
- str(src1, MemOperand(sp, 4, NegPreIndex), cond);
- str(src2, MemOperand(sp, 4, NegPreIndex), cond);
- }
- }
-
- // Push three registers. Pushes leftmost register first (to highest address).
- void Push(Register src1, Register src2, Register src3, Condition cond = al) {
- ASSERT(!src1.is(src2));
- ASSERT(!src2.is(src3));
- ASSERT(!src1.is(src3));
- if (src1.code() > src2.code()) {
- if (src2.code() > src3.code()) {
- stm(db_w, sp, src1.bit() | src2.bit() | src3.bit(), cond);
- } else {
- stm(db_w, sp, src1.bit() | src2.bit(), cond);
- str(src3, MemOperand(sp, 4, NegPreIndex), cond);
- }
- } else {
- str(src1, MemOperand(sp, 4, NegPreIndex), cond);
- Push(src2, src3, cond);
- }
- }
-
- // Push four registers. Pushes leftmost register first (to highest address).
- void Push(Register src1,
- Register src2,
- Register src3,
- Register src4,
- Condition cond = al) {
- ASSERT(!src1.is(src2));
- ASSERT(!src2.is(src3));
- ASSERT(!src1.is(src3));
- ASSERT(!src1.is(src4));
- ASSERT(!src2.is(src4));
- ASSERT(!src3.is(src4));
- if (src1.code() > src2.code()) {
- if (src2.code() > src3.code()) {
- if (src3.code() > src4.code()) {
- stm(db_w,
- sp,
- src1.bit() | src2.bit() | src3.bit() | src4.bit(),
- cond);
- } else {
- stm(db_w, sp, src1.bit() | src2.bit() | src3.bit(), cond);
- str(src4, MemOperand(sp, 4, NegPreIndex), cond);
- }
- } else {
- stm(db_w, sp, src1.bit() | src2.bit(), cond);
- Push(src3, src4, cond);
- }
- } else {
- str(src1, MemOperand(sp, 4, NegPreIndex), cond);
- Push(src2, src3, src4, cond);
- }
- }
-
- // Pop two registers. Pops rightmost register first (from lower address).
- void Pop(Register src1, Register src2, Condition cond = al) {
- ASSERT(!src1.is(src2));
- if (src1.code() > src2.code()) {
- ldm(ia_w, sp, src1.bit() | src2.bit(), cond);
- } else {
- ldr(src2, MemOperand(sp, 4, PostIndex), cond);
- ldr(src1, MemOperand(sp, 4, PostIndex), cond);
- }
- }
-
- // Pop three registers. Pops rightmost register first (from lower address).
- void Pop(Register src1, Register src2, Register src3, Condition cond = al) {
- ASSERT(!src1.is(src2));
- ASSERT(!src2.is(src3));
- ASSERT(!src1.is(src3));
- if (src1.code() > src2.code()) {
- if (src2.code() > src3.code()) {
- ldm(ia_w, sp, src1.bit() | src2.bit() | src3.bit(), cond);
- } else {
- ldr(src3, MemOperand(sp, 4, PostIndex), cond);
- ldm(ia_w, sp, src1.bit() | src2.bit(), cond);
- }
- } else {
- Pop(src2, src3, cond);
- str(src1, MemOperand(sp, 4, PostIndex), cond);
- }
- }
-
- // Pop four registers. Pops rightmost register first (from lower address).
- void Pop(Register src1,
- Register src2,
- Register src3,
- Register src4,
- Condition cond = al) {
- ASSERT(!src1.is(src2));
- ASSERT(!src2.is(src3));
- ASSERT(!src1.is(src3));
- ASSERT(!src1.is(src4));
- ASSERT(!src2.is(src4));
- ASSERT(!src3.is(src4));
- if (src1.code() > src2.code()) {
- if (src2.code() > src3.code()) {
- if (src3.code() > src4.code()) {
- ldm(ia_w,
- sp,
- src1.bit() | src2.bit() | src3.bit() | src4.bit(),
- cond);
- } else {
- ldr(src4, MemOperand(sp, 4, PostIndex), cond);
- ldm(ia_w, sp, src1.bit() | src2.bit() | src3.bit(), cond);
- }
- } else {
- Pop(src3, src4, cond);
- ldm(ia_w, sp, src1.bit() | src2.bit(), cond);
- }
- } else {
- Pop(src2, src3, src4, cond);
- ldr(src1, MemOperand(sp, 4, PostIndex), cond);
- }
- }
-
- // Push and pop the registers that can hold pointers, as defined by the
- // RegList constant kSafepointSavedRegisters.
- void PushSafepointRegisters();
- void PopSafepointRegisters();
- void PushSafepointRegistersAndDoubles();
- void PopSafepointRegistersAndDoubles();
- // Store value in register src in the safepoint stack slot for
- // register dst.
- void StoreToSafepointRegisterSlot(Register src, Register dst);
- void StoreToSafepointRegistersAndDoublesSlot(Register src, Register dst);
- // Load the value of the src register from its safepoint stack slot
- // into register dst.
- void LoadFromSafepointRegisterSlot(Register dst, Register src);
-
- // Load two consecutive registers with two consecutive memory locations.
- void Ldrd(Register dst1,
- Register dst2,
- const MemOperand& src,
- Condition cond = al);
-
- // Store two consecutive registers to two consecutive memory locations.
- void Strd(Register src1,
- Register src2,
- const MemOperand& dst,
- Condition cond = al);
-
- // Compare double values and move the result to the normal condition flags.
- void VFPCompareAndSetFlags(const DwVfpRegister src1,
- const DwVfpRegister src2,
- const Condition cond = al);
- void VFPCompareAndSetFlags(const DwVfpRegister src1,
- const double src2,
- const Condition cond = al);
-
- // Compare double values and then load the fpscr flags to a register.
- void VFPCompareAndLoadFlags(const DwVfpRegister src1,
- const DwVfpRegister src2,
- const Register fpscr_flags,
- const Condition cond = al);
- void VFPCompareAndLoadFlags(const DwVfpRegister src1,
- const double src2,
- const Register fpscr_flags,
- const Condition cond = al);
-
- void Vmov(const DwVfpRegister dst,
- const double imm,
- const Register scratch = no_reg);
-
- // Enter exit frame.
- // stack_space - extra stack space, used for alignment before call to C.
- void EnterExitFrame(bool save_doubles, int stack_space = 0);
-
- // Leave the current exit frame. Expects the return value in r0.
- // Expect the number of values, pushed prior to the exit frame, to
- // remove in a register (or no_reg, if there is nothing to remove).
- void LeaveExitFrame(bool save_doubles, Register argument_count);
-
- // Get the actual activation frame alignment for target environment.
- static int ActivationFrameAlignment();
-
- void LoadContext(Register dst, int context_chain_length);
-
- // Conditionally load the cached Array transitioned map of type
- // transitioned_kind from the native context if the map in register
- // map_in_out is the cached Array map in the native context of
- // expected_kind.
- void LoadTransitionedArrayMapConditional(
- ElementsKind expected_kind,
- ElementsKind transitioned_kind,
- Register map_in_out,
- Register scratch,
- Label* no_map_match);
-
- // Load the initial map for new Arrays from a JSFunction.
- void LoadInitialArrayMap(Register function_in,
- Register scratch,
- Register map_out,
- bool can_have_holes);
-
- void LoadGlobalFunction(int index, Register function);
- void LoadArrayFunction(Register function);
-
- // Load the initial map from the global function. The registers
- // function and map can be the same, function is then overwritten.
- void LoadGlobalFunctionInitialMap(Register function,
- Register map,
- Register scratch);
-
- void InitializeRootRegister() {
- ExternalReference roots_array_start =
- ExternalReference::roots_array_start(isolate());
- mov(kRootRegister, Operand(roots_array_start));
- }
-
- // ---------------------------------------------------------------------------
- // JavaScript invokes
-
- // Set up call kind marking in ecx. The method takes ecx as an
- // explicit first parameter to make the code more readable at the
- // call sites.
- void SetCallKind(Register dst, CallKind kind);
-
- // Invoke the JavaScript function code by either calling or jumping.
- void InvokeCode(Register code,
- const ParameterCount& expected,
- const ParameterCount& actual,
- InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind);
-
- void InvokeCode(Handle<Code> code,
- const ParameterCount& expected,
- const ParameterCount& actual,
- RelocInfo::Mode rmode,
- InvokeFlag flag,
- CallKind call_kind);
-
- // Invoke the JavaScript function in the given register. Changes the
- // current context to the context in the function before invoking.
- void InvokeFunction(Register function,
- const ParameterCount& actual,
- InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind);
-
- void InvokeFunction(Handle<JSFunction> function,
- const ParameterCount& actual,
- InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind);
-
- void IsObjectJSObjectType(Register heap_object,
- Register map,
- Register scratch,
- Label* fail);
-
- void IsInstanceJSObjectType(Register map,
- Register scratch,
- Label* fail);
-
- void IsObjectJSStringType(Register object,
- Register scratch,
- Label* fail);
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- // ---------------------------------------------------------------------------
- // Debugger Support
-
- void DebugBreak();
-#endif
-
- // ---------------------------------------------------------------------------
- // Exception handling
-
- // Push a new try handler and link into try handler chain.
- void PushTryHandler(StackHandler::Kind kind, int handler_index);
-
- // Unlink the stack handler on top of the stack from the try handler chain.
- // Must preserve the result register.
- void PopTryHandler();
-
- // Passes thrown value to the handler of top of the try handler chain.
- void Throw(Register value);
-
- // Propagates an uncatchable exception to the top of the current JS stack's
- // handler chain.
- void ThrowUncatchable(Register value);
-
- // ---------------------------------------------------------------------------
- // Inline caching support
-
- // Generate code for checking access rights - used for security checks
- // on access to global objects across environments. The holder register
- // is left untouched, whereas both scratch registers are clobbered.
- void CheckAccessGlobalProxy(Register holder_reg,
- Register scratch,
- Label* miss);
-
- void GetNumberHash(Register t0, Register scratch);
-
- void LoadFromNumberDictionary(Label* miss,
- Register elements,
- Register key,
- Register result,
- Register t0,
- Register t1,
- Register t2);
-
-
- inline void MarkCode(NopMarkerTypes type) {
- nop(type);
- }
-
- // Check if the given instruction is a 'type' marker.
- // i.e. check if is is a mov r<type>, r<type> (referenced as nop(type))
- // These instructions are generated to mark special location in the code,
- // like some special IC code.
- static inline bool IsMarkedCode(Instr instr, int type) {
- ASSERT((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER));
- return IsNop(instr, type);
- }
-
-
- static inline int GetCodeMarker(Instr instr) {
- int dst_reg_offset = 12;
- int dst_mask = 0xf << dst_reg_offset;
- int src_mask = 0xf;
- int dst_reg = (instr & dst_mask) >> dst_reg_offset;
- int src_reg = instr & src_mask;
- uint32_t non_register_mask = ~(dst_mask | src_mask);
- uint32_t mov_mask = al | 13 << 21;
-
- // Return <n> if we have a mov rn rn, else return -1.
- int type = ((instr & non_register_mask) == mov_mask) &&
- (dst_reg == src_reg) &&
- (FIRST_IC_MARKER <= dst_reg) && (dst_reg < LAST_CODE_MARKER)
- ? src_reg
- : -1;
- ASSERT((type == -1) ||
- ((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER)));
- return type;
- }
-
-
- // ---------------------------------------------------------------------------
- // Allocation support
-
- // Allocate an object in new space. The object_size is specified
- // either in bytes or in words if the allocation flag SIZE_IN_WORDS
- // is passed. If the new space is exhausted control continues at the
- // gc_required label. The allocated object is returned in result. If
- // the flag tag_allocated_object is true the result is tagged as as
- // a heap object. All registers are clobbered also when control
- // continues at the gc_required label.
- void AllocateInNewSpace(int object_size,
- Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required,
- AllocationFlags flags);
- void AllocateInNewSpace(Register object_size,
- Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required,
- AllocationFlags flags);
-
- // Undo allocation in new space. The object passed and objects allocated after
- // it will no longer be allocated. The caller must make sure that no pointers
- // are left to the object(s) no longer allocated as they would be invalid when
- // allocation is undone.
- void UndoAllocationInNewSpace(Register object, Register scratch);
-
-
- void AllocateTwoByteString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* gc_required);
- void AllocateAsciiString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* gc_required);
- void AllocateTwoByteConsString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Label* gc_required);
- void AllocateAsciiConsString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Label* gc_required);
- void AllocateTwoByteSlicedString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Label* gc_required);
- void AllocateAsciiSlicedString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Label* gc_required);
-
- // Allocates a heap number or jumps to the gc_required label if the young
- // space is full and a scavenge is needed. All registers are clobbered also
- // when control continues at the gc_required label.
- void AllocateHeapNumber(Register result,
- Register scratch1,
- Register scratch2,
- Register heap_number_map,
- Label* gc_required,
- TaggingMode tagging_mode = TAG_RESULT);
- void AllocateHeapNumberWithValue(Register result,
- DwVfpRegister value,
- Register scratch1,
- Register scratch2,
- Register heap_number_map,
- Label* gc_required);
-
- // Copies a fixed number of fields of heap objects from src to dst.
- void CopyFields(Register dst, Register src, RegList temps, int field_count);
-
- // Copies a number of bytes from src to dst. All registers are clobbered. On
- // exit src and dst will point to the place just after where the last byte was
- // read or written and length will be zero.
- void CopyBytes(Register src,
- Register dst,
- Register length,
- Register scratch);
-
- // Initialize fields with filler values. Fields starting at |start_offset|
- // not including end_offset are overwritten with the value in |filler|. At
- // the end the loop, |start_offset| takes the value of |end_offset|.
- void InitializeFieldsWithFiller(Register start_offset,
- Register end_offset,
- Register filler);
-
- // ---------------------------------------------------------------------------
- // Support functions.
-
- // Try to get function prototype of a function and puts the value in
- // the result register. Checks that the function really is a
- // function and jumps to the miss label if the fast checks fail. The
- // function register will be untouched; the other registers may be
- // clobbered.
- void TryGetFunctionPrototype(Register function,
- Register result,
- Register scratch,
- Label* miss,
- bool miss_on_bound_function = false);
-
- // Compare object type for heap object. heap_object contains a non-Smi
- // whose object type should be compared with the given type. This both
- // sets the flags and leaves the object type in the type_reg register.
- // It leaves the map in the map register (unless the type_reg and map register
- // are the same register). It leaves the heap object in the heap_object
- // register unless the heap_object register is the same register as one of the
- // other registers.
- void CompareObjectType(Register heap_object,
- Register map,
- Register type_reg,
- InstanceType type);
-
- // Compare instance type in a map. map contains a valid map object whose
- // object type should be compared with the given type. This both
- // sets the flags and leaves the object type in the type_reg register.
- void CompareInstanceType(Register map,
- Register type_reg,
- InstanceType type);
-
-
- // Check if a map for a JSObject indicates that the object has fast elements.
- // Jump to the specified label if it does not.
- void CheckFastElements(Register map,
- Register scratch,
- Label* fail);
-
- // Check if a map for a JSObject indicates that the object can have both smi
- // and HeapObject elements. Jump to the specified label if it does not.
- void CheckFastObjectElements(Register map,
- Register scratch,
- Label* fail);
-
- // Check if a map for a JSObject indicates that the object has fast smi only
- // elements. Jump to the specified label if it does not.
- void CheckFastSmiElements(Register map,
- Register scratch,
- Label* fail);
-
- // Check to see if maybe_number can be stored as a double in
- // FastDoubleElements. If it can, store it at the index specified by key in
- // the FastDoubleElements array elements. Otherwise jump to fail, in which
- // case scratch2, scratch3 and scratch4 are unmodified.
- void StoreNumberToDoubleElements(Register value_reg,
- Register key_reg,
- // All regs below here overwritten.
- Register elements_reg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Label* fail,
- int elements_offset = 0);
-
- // Compare an object's map with the specified map and its transitioned
- // elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. Condition flags are
- // set with result of map compare. If multiple map compares are required, the
- // compare sequences branches to early_success.
- void CompareMap(Register obj,
- Register scratch,
- Handle<Map> map,
- Label* early_success,
- CompareMapMode mode = REQUIRE_EXACT_MAP);
-
- // As above, but the map of the object is already loaded into the register
- // which is preserved by the code generated.
- void CompareMap(Register obj_map,
- Handle<Map> map,
- Label* early_success,
- CompareMapMode mode = REQUIRE_EXACT_MAP);
-
- // Check if the map of an object is equal to a specified map and branch to
- // label if not. Skip the smi check if not required (object is known to be a
- // heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match
- // against maps that are ElementsKind transition maps of the specified map.
- void CheckMap(Register obj,
- Register scratch,
- Handle<Map> map,
- Label* fail,
- SmiCheckType smi_check_type,
- CompareMapMode mode = REQUIRE_EXACT_MAP);
-
-
- void CheckMap(Register obj,
- Register scratch,
- Heap::RootListIndex index,
- Label* fail,
- SmiCheckType smi_check_type);
-
-
- // Check if the map of an object is equal to a specified map and branch to a
- // specified target if equal. Skip the smi check if not required (object is
- // known to be a heap object)
- void DispatchMap(Register obj,
- Register scratch,
- Handle<Map> map,
- Handle<Code> success,
- SmiCheckType smi_check_type);
-
-
- // Compare the object in a register to a value from the root list.
- // Uses the ip register as scratch.
- void CompareRoot(Register obj, Heap::RootListIndex index);
-
-
- // Load and check the instance type of an object for being a string.
- // Loads the type into the second argument register.
- // Returns a condition that will be enabled if the object was a string
- // and the passed-in condition passed. If the passed-in condition failed
- // then flags remain unchanged.
- Condition IsObjectStringType(Register obj,
- Register type,
- Condition cond = al) {
- ldr(type, FieldMemOperand(obj, HeapObject::kMapOffset), cond);
- ldrb(type, FieldMemOperand(type, Map::kInstanceTypeOffset), cond);
- tst(type, Operand(kIsNotStringMask), cond);
- ASSERT_EQ(0, kStringTag);
- return eq;
- }
-
-
- // Generates code for reporting that an illegal operation has
- // occurred.
- void IllegalOperation(int num_arguments);
-
- // Picks out an array index from the hash field.
- // Register use:
- // hash - holds the index's hash. Clobbered.
- // index - holds the overwritten index on exit.
- void IndexFromHash(Register hash, Register index);
-
- // Get the number of least significant bits from a register
- void GetLeastBitsFromSmi(Register dst, Register src, int num_least_bits);
- void GetLeastBitsFromInt32(Register dst, Register src, int mun_least_bits);
-
- // Uses VFP instructions to Convert a Smi to a double.
- void IntegerToDoubleConversionWithVFP3(Register inReg,
- Register outHighReg,
- Register outLowReg);
-
- // Load the value of a number object into a VFP double register. If the object
- // is not a number a jump to the label not_number is performed and the VFP
- // double register is unchanged.
- void ObjectToDoubleVFPRegister(
- Register object,
- DwVfpRegister value,
- Register scratch1,
- Register scratch2,
- Register heap_number_map,
- SwVfpRegister scratch3,
- Label* not_number,
- ObjectToDoubleFlags flags = NO_OBJECT_TO_DOUBLE_FLAGS);
-
- // Load the value of a smi object into a VFP double register. The register
- // scratch1 can be the same register as smi in which case smi will hold the
- // untagged value afterwards.
- void SmiToDoubleVFPRegister(Register smi,
- DwVfpRegister value,
- Register scratch1,
- SwVfpRegister scratch2);
-
- // Convert the HeapNumber pointed to by source to a 32bits signed integer
- // dest. If the HeapNumber does not fit into a 32bits signed integer branch
- // to not_int32 label. If VFP3 is available double_scratch is used but not
- // scratch2.
- void ConvertToInt32(Register source,
- Register dest,
- Register scratch,
- Register scratch2,
- DwVfpRegister double_scratch,
- Label *not_int32);
-
- // Try to convert a double to a signed 32-bit integer. If the double value
- // can be exactly represented as an integer, the code jumps to 'done' and
- // 'result' contains the integer value. Otherwise, the code falls through.
- void TryFastDoubleToInt32(Register result,
- DwVfpRegister double_input,
- DwVfpRegister double_scratch,
- Label* done);
-
- // Truncates a double using a specific rounding mode, and writes the value
- // to the result register.
- // Clears the z flag (ne condition) if an overflow occurs.
- // If kCheckForInexactConversion is passed, the z flag is also cleared if the
- // conversion was inexact, i.e. if the double value could not be converted
- // exactly to a 32-bit integer.
- void EmitVFPTruncate(VFPRoundingMode rounding_mode,
- Register result,
- DwVfpRegister double_input,
- Register scratch,
- DwVfpRegister double_scratch,
- CheckForInexactConversion check
- = kDontCheckForInexactConversion);
-
- // Helper for EmitECMATruncate.
- // This will truncate a floating-point value outside of the signed 32bit
- // integer range to a 32bit signed integer.
- // Expects the double value loaded in input_high and input_low.
- // Exits with the answer in 'result'.
- // Note that this code does not work for values in the 32bit range!
- void EmitOutOfInt32RangeTruncate(Register result,
- Register input_high,
- Register input_low,
- Register scratch);
-
- // Performs a truncating conversion of a floating point number as used by
- // the JS bitwise operations. See ECMA-262 9.5: ToInt32.
- // Exits with 'result' holding the answer and all other registers clobbered.
- void EmitECMATruncate(Register result,
- DwVfpRegister double_input,
- DwVfpRegister double_scratch,
- Register scratch,
- Register scratch2,
- Register scratch3);
-
- // Count leading zeros in a 32 bit word. On ARM5 and later it uses the clz
- // instruction. On pre-ARM5 hardware this routine gives the wrong answer
- // for 0 (31 instead of 32). Source and scratch can be the same in which case
- // the source is clobbered. Source and zeros can also be the same in which
- // case scratch should be a different register.
- void CountLeadingZeros(Register zeros,
- Register source,
- Register scratch);
-
- // Check whether d16-d31 are available on the CPU. The result is given by the
- // Z condition flag: Z==0 if d16-d31 available, Z==1 otherwise.
- void CheckFor32DRegs(Register scratch);
-
-
- // ---------------------------------------------------------------------------
- // Runtime calls
-
- // Call a code stub.
- void CallStub(CodeStub* stub,
- TypeFeedbackId ast_id = TypeFeedbackId::None(),
- Condition cond = al);
-
- // Call a code stub.
- void TailCallStub(CodeStub* stub, Condition cond = al);
-
- // Call a runtime routine.
- void CallRuntime(const Runtime::Function* f, int num_arguments);
- void CallRuntimeSaveDoubles(Runtime::FunctionId id);
-
- // Convenience function: Same as above, but takes the fid instead.
- void CallRuntime(Runtime::FunctionId fid, int num_arguments);
-
- // Convenience function: call an external reference.
- void CallExternalReference(const ExternalReference& ext,
- int num_arguments);
-
- // Tail call of a runtime routine (jump).
- // Like JumpToExternalReference, but also takes care of passing the number
- // of parameters.
- void TailCallExternalReference(const ExternalReference& ext,
- int num_arguments,
- int result_size);
-
- // Convenience function: tail call a runtime routine (jump).
- void TailCallRuntime(Runtime::FunctionId fid,
- int num_arguments,
- int result_size);
-
- int CalculateStackPassedWords(int num_reg_arguments,
- int num_double_arguments);
-
- // Before calling a C-function from generated code, align arguments on stack.
- // After aligning the frame, non-register arguments must be stored in
- // sp[0], sp[4], etc., not pushed. The argument count assumes all arguments
- // are word sized. If double arguments are used, this function assumes that
- // all double arguments are stored before core registers; otherwise the
- // correct alignment of the double values is not guaranteed.
- // Some compilers/platforms require the stack to be aligned when calling
- // C++ code.
- // Needs a scratch register to do some arithmetic. This register will be
- // trashed.
- void PrepareCallCFunction(int num_reg_arguments,
- int num_double_registers,
- Register scratch);
- void PrepareCallCFunction(int num_reg_arguments,
- Register scratch);
-
- // There are two ways of passing double arguments on ARM, depending on
- // whether soft or hard floating point ABI is used. These functions
- // abstract parameter passing for the three different ways we call
- // C functions from generated code.
- void SetCallCDoubleArguments(DwVfpRegister dreg);
- void SetCallCDoubleArguments(DwVfpRegister dreg1, DwVfpRegister dreg2);
- void SetCallCDoubleArguments(DwVfpRegister dreg, Register reg);
-
- // Calls a C function and cleans up the space for arguments allocated
- // by PrepareCallCFunction. The called function is not allowed to trigger a
- // garbage collection, since that might move the code and invalidate the
- // return address (unless this is somehow accounted for by the called
- // function).
- void CallCFunction(ExternalReference function, int num_arguments);
- void CallCFunction(Register function, int num_arguments);
- void CallCFunction(ExternalReference function,
- int num_reg_arguments,
- int num_double_arguments);
- void CallCFunction(Register function,
- int num_reg_arguments,
- int num_double_arguments);
-
- void GetCFunctionDoubleResult(const DwVfpRegister dst);
-
- // Calls an API function. Allocates HandleScope, extracts returned value
- // from handle and propagates exceptions. Restores context. stack_space
- // - space to be unwound on exit (includes the call JS arguments space and
- // the additional space allocated for the fast call).
- void CallApiFunctionAndReturn(ExternalReference function, int stack_space);
-
- // Jump to a runtime routine.
- void JumpToExternalReference(const ExternalReference& builtin);
-
- // Invoke specified builtin JavaScript function. Adds an entry to
- // the unresolved list if the name does not resolve.
- void InvokeBuiltin(Builtins::JavaScript id,
- InvokeFlag flag,
- const CallWrapper& call_wrapper = NullCallWrapper());
-
- // Store the code object for the given builtin in the target register and
- // setup the function in r1.
- void GetBuiltinEntry(Register target, Builtins::JavaScript id);
-
- // Store the function for the given builtin in the target register.
- void GetBuiltinFunction(Register target, Builtins::JavaScript id);
-
- Handle<Object> CodeObject() {
- ASSERT(!code_object_.is_null());
- return code_object_;
- }
-
-
- // ---------------------------------------------------------------------------
- // StatsCounter support
-
- void SetCounter(StatsCounter* counter, int value,
- Register scratch1, Register scratch2);
- void IncrementCounter(StatsCounter* counter, int value,
- Register scratch1, Register scratch2);
- void DecrementCounter(StatsCounter* counter, int value,
- Register scratch1, Register scratch2);
-
-
- // ---------------------------------------------------------------------------
- // Debugging
-
- // Calls Abort(msg) if the condition cond is not satisfied.
- // Use --debug_code to enable.
- void Assert(Condition cond, const char* msg);
- void AssertRegisterIsRoot(Register reg, Heap::RootListIndex index);
- void AssertFastElements(Register elements);
-
- // Like Assert(), but always enabled.
- void Check(Condition cond, const char* msg);
-
- // Print a message to stdout and abort execution.
- void Abort(const char* msg);
-
- // Verify restrictions about code generated in stubs.
- void set_generating_stub(bool value) { generating_stub_ = value; }
- bool generating_stub() { return generating_stub_; }
- void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; }
- bool allow_stub_calls() { return allow_stub_calls_; }
- void set_has_frame(bool value) { has_frame_ = value; }
- bool has_frame() { return has_frame_; }
- inline bool AllowThisStubCall(CodeStub* stub);
-
- // EABI variant for double arguments in use.
- bool use_eabi_hardfloat() {
-#if USE_EABI_HARDFLOAT
- return true;
-#else
- return false;
-#endif
- }
-
- // ---------------------------------------------------------------------------
- // Number utilities
-
- // Check whether the value of reg is a power of two and not zero. If not
- // control continues at the label not_power_of_two. If reg is a power of two
- // the register scratch contains the value of (reg - 1) when control falls
- // through.
- void JumpIfNotPowerOfTwoOrZero(Register reg,
- Register scratch,
- Label* not_power_of_two_or_zero);
- // Check whether the value of reg is a power of two and not zero.
- // Control falls through if it is, with scratch containing the mask
- // value (reg - 1).
- // Otherwise control jumps to the 'zero_and_neg' label if the value of reg is
- // zero or negative, or jumps to the 'not_power_of_two' label if the value is
- // strictly positive but not a power of two.
- void JumpIfNotPowerOfTwoOrZeroAndNeg(Register reg,
- Register scratch,
- Label* zero_and_neg,
- Label* not_power_of_two);
-
- // ---------------------------------------------------------------------------
- // Smi utilities
-
- void SmiTag(Register reg, SBit s = LeaveCC) {
- add(reg, reg, Operand(reg), s);
- }
- void SmiTag(Register dst, Register src, SBit s = LeaveCC) {
- add(dst, src, Operand(src), s);
- }
-
- // Try to convert int32 to smi. If the value is to large, preserve
- // the original value and jump to not_a_smi. Destroys scratch and
- // sets flags.
- void TrySmiTag(Register reg, Label* not_a_smi, Register scratch) {
- mov(scratch, reg);
- SmiTag(scratch, SetCC);
- b(vs, not_a_smi);
- mov(reg, scratch);
- }
-
- void SmiUntag(Register reg, SBit s = LeaveCC) {
- mov(reg, Operand(reg, ASR, kSmiTagSize), s);
- }
- void SmiUntag(Register dst, Register src, SBit s = LeaveCC) {
- mov(dst, Operand(src, ASR, kSmiTagSize), s);
- }
-
- // Untag the source value into destination and jump if source is a smi.
- // Souce and destination can be the same register.
- void UntagAndJumpIfSmi(Register dst, Register src, Label* smi_case);
-
- // Untag the source value into destination and jump if source is not a smi.
- // Souce and destination can be the same register.
- void UntagAndJumpIfNotSmi(Register dst, Register src, Label* non_smi_case);
-
- // Jump if the register contains a smi.
- inline void JumpIfSmi(Register value, Label* smi_label) {
- tst(value, Operand(kSmiTagMask));
- b(eq, smi_label);
- }
- // Jump if either of the registers contain a non-smi.
- inline void JumpIfNotSmi(Register value, Label* not_smi_label) {
- tst(value, Operand(kSmiTagMask));
- b(ne, not_smi_label);
- }
- // Jump if either of the registers contain a non-smi.
- void JumpIfNotBothSmi(Register reg1, Register reg2, Label* on_not_both_smi);
- // Jump if either of the registers contain a smi.
- void JumpIfEitherSmi(Register reg1, Register reg2, Label* on_either_smi);
-
- // Abort execution if argument is a smi, enabled via --debug-code.
- void AssertNotSmi(Register object);
- void AssertSmi(Register object);
-
- // Abort execution if argument is a string, enabled via --debug-code.
- void AssertString(Register object);
-
- // Abort execution if argument is not the root value with the given index,
- // enabled via --debug-code.
- void AssertRootValue(Register src,
- Heap::RootListIndex root_value_index,
- const char* message);
-
- // ---------------------------------------------------------------------------
- // HeapNumber utilities
-
- void JumpIfNotHeapNumber(Register object,
- Register heap_number_map,
- Register scratch,
- Label* on_not_heap_number);
-
- // ---------------------------------------------------------------------------
- // String utilities
-
- // Checks if both objects are sequential ASCII strings and jumps to label
- // if either is not. Assumes that neither object is a smi.
- void JumpIfNonSmisNotBothSequentialAsciiStrings(Register object1,
- Register object2,
- Register scratch1,
- Register scratch2,
- Label* failure);
-
- // Checks if both objects are sequential ASCII strings and jumps to label
- // if either is not.
- void JumpIfNotBothSequentialAsciiStrings(Register first,
- Register second,
- Register scratch1,
- Register scratch2,
- Label* not_flat_ascii_strings);
-
- // Checks if both instance types are sequential ASCII strings and jumps to
- // label if either is not.
- void JumpIfBothInstanceTypesAreNotSequentialAscii(
- Register first_object_instance_type,
- Register second_object_instance_type,
- Register scratch1,
- Register scratch2,
- Label* failure);
-
- // Check if instance type is sequential ASCII string and jump to label if
- // it is not.
- void JumpIfInstanceTypeIsNotSequentialAscii(Register type,
- Register scratch,
- Label* failure);
-
-
- // ---------------------------------------------------------------------------
- // Patching helpers.
-
- // Get the location of a relocated constant (its address in the constant pool)
- // from its load site.
- void GetRelocatedValueLocation(Register ldr_location,
- Register result);
-
-
- void ClampUint8(Register output_reg, Register input_reg);
-
- void ClampDoubleToUint8(Register result_reg,
- DwVfpRegister input_reg,
- DwVfpRegister temp_double_reg);
-
-
- void LoadInstanceDescriptors(Register map, Register descriptors);
- void EnumLength(Register dst, Register map);
- void NumberOfOwnDescriptors(Register dst, Register map);
-
- template<typename Field>
- void DecodeField(Register reg) {
- static const int shift = Field::kShift;
- static const int mask = (Field::kMask >> shift) << kSmiTagSize;
- mov(reg, Operand(reg, LSR, shift));
- and_(reg, reg, Operand(mask));
- }
-
- // Activation support.
- void EnterFrame(StackFrame::Type type);
- void LeaveFrame(StackFrame::Type type);
-
- // Expects object in r0 and returns map with validated enum cache
- // in r0. Assumes that any other register can be used as a scratch.
- void CheckEnumCache(Register null_value, Label* call_runtime);
-
- // AllocationSiteInfo support. Arrays may have an associated
- // AllocationSiteInfo object that can be checked for in order to pretransition
- // to another type.
- // On entry, receiver_reg should point to the array object.
- // scratch_reg gets clobbered.
- // If allocation info is present, condition flags are set to eq
- void TestJSArrayForAllocationSiteInfo(Register receiver_reg,
- Register scratch_reg);
-
- private:
- void CallCFunctionHelper(Register function,
- int num_reg_arguments,
- int num_double_arguments);
-
- void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al);
-
- // Helper functions for generating invokes.
- void InvokePrologue(const ParameterCount& expected,
- const ParameterCount& actual,
- Handle<Code> code_constant,
- Register code_reg,
- Label* done,
- bool* definitely_mismatches,
- InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind);
-
- void InitializeNewString(Register string,
- Register length,
- Heap::RootListIndex map_index,
- Register scratch1,
- Register scratch2);
-
- // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
- void InNewSpace(Register object,
- Register scratch,
- Condition cond, // eq for new space, ne otherwise.
- Label* branch);
-
- // Helper for finding the mark bits for an address. Afterwards, the
- // bitmap register points at the word with the mark bits and the mask
- // the position of the first bit. Leaves addr_reg unchanged.
- inline void GetMarkBits(Register addr_reg,
- Register bitmap_reg,
- Register mask_reg);
-
- // Helper for throwing exceptions. Compute a handler address and jump to
- // it. See the implementation for register usage.
- void JumpToHandlerEntry();
-
- // Compute memory operands for safepoint stack slots.
- static int SafepointRegisterStackIndex(int reg_code);
- MemOperand SafepointRegisterSlot(Register reg);
- MemOperand SafepointRegistersAndDoublesSlot(Register reg);
-
- bool generating_stub_;
- bool allow_stub_calls_;
- bool has_frame_;
- // This handle will be patched with the code object on installation.
- Handle<Object> code_object_;
-
- // Needs access to SafepointRegisterStackIndex for compiled frame
- // traversal.
- friend class StandardFrame;
-};
-
-
-// The code patcher is used to patch (typically) small parts of code e.g. for
-// debugging and other types of instrumentation. When using the code patcher
-// the exact number of bytes specified must be emitted. It is not legal to emit
-// relocation information. If any of these constraints are violated it causes
-// an assertion to fail.
-class CodePatcher {
- public:
- CodePatcher(byte* address, int instructions);
- virtual ~CodePatcher();
-
- // Macro assembler to emit code.
- MacroAssembler* masm() { return &masm_; }
-
- // Emit an instruction directly.
- void Emit(Instr instr);
-
- // Emit an address directly.
- void Emit(Address addr);
-
- // Emit the condition part of an instruction leaving the rest of the current
- // instruction unchanged.
- void EmitCondition(Condition cond);
-
- private:
- byte* address_; // The address of the code being patched.
- int size_; // Number of bytes of the expected patch size.
- MacroAssembler masm_; // Macro assembler used to generate the code.
-};
-
-
-// -----------------------------------------------------------------------------
-// Static helper functions.
-
-inline MemOperand ContextOperand(Register context, int index) {
- return MemOperand(context, Context::SlotOffset(index));
-}
-
-
-inline MemOperand GlobalObjectOperand() {
- return ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX);
-}
-
-
-static inline MemOperand QmlGlobalObjectOperand() {
- return ContextOperand(cp, Context::QML_GLOBAL_OBJECT_INDEX);
-}
-
-
-#ifdef GENERATED_CODE_COVERAGE
-#define CODE_COVERAGE_STRINGIFY(x) #x
-#define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
-#define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__)
-#define ACCESS_MASM(masm) masm->stop(__FILE_LINE__); masm->
-#else
-#define ACCESS_MASM(masm) masm->
-#endif
-
-
-} } // namespace v8::internal
-
-#endif // V8_ARM_MACRO_ASSEMBLER_ARM_H_
diff --git a/src/3rdparty/v8/src/arm/regexp-macro-assembler-arm.cc b/src/3rdparty/v8/src/arm/regexp-macro-assembler-arm.cc
deleted file mode 100644
index acb24ef..0000000
--- a/src/3rdparty/v8/src/arm/regexp-macro-assembler-arm.cc
+++ /dev/null
@@ -1,1429 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_ARM)
-
-#include "unicode.h"
-#include "log.h"
-#include "code-stubs.h"
-#include "regexp-stack.h"
-#include "macro-assembler.h"
-#include "regexp-macro-assembler.h"
-#include "arm/regexp-macro-assembler-arm.h"
-
-namespace v8 {
-namespace internal {
-
-#ifndef V8_INTERPRETED_REGEXP
-/*
- * This assembler uses the following register assignment convention
- * - r4 : Temporarily stores the index of capture start after a matching pass
- * for a global regexp.
- * - r5 : Pointer to current code object (Code*) including heap object tag.
- * - r6 : Current position in input, as negative offset from end of string.
- * Please notice that this is the byte offset, not the character offset!
- * - r7 : Currently loaded character. Must be loaded using
- * LoadCurrentCharacter before using any of the dispatch methods.
- * - r8 : Points to tip of backtrack stack
- * - r9 : Unused, might be used by C code and expected unchanged.
- * - r10 : End of input (points to byte after last character in input).
- * - r11 : Frame pointer. Used to access arguments, local variables and
- * RegExp registers.
- * - r12 : IP register, used by assembler. Very volatile.
- * - r13/sp : Points to tip of C stack.
- *
- * The remaining registers are free for computations.
- * Each call to a public method should retain this convention.
- *
- * The stack will have the following structure:
- * - fp[56] Isolate* isolate (address of the current isolate)
- * - fp[52] direct_call (if 1, direct call from JavaScript code,
- * if 0, call through the runtime system).
- * - fp[48] stack_area_base (high end of the memory area to use as
- * backtracking stack).
- * - fp[44] capture array size (may fit multiple sets of matches)
- * - fp[40] int* capture_array (int[num_saved_registers_], for output).
- * - fp[36] secondary link/return address used by native call.
- * --- sp when called ---
- * - fp[32] return address (lr).
- * - fp[28] old frame pointer (r11).
- * - fp[0..24] backup of registers r4..r10.
- * --- frame pointer ----
- * - fp[-4] end of input (address of end of string).
- * - fp[-8] start of input (address of first character in string).
- * - fp[-12] start index (character index of start).
- * - fp[-16] void* input_string (location of a handle containing the string).
- * - fp[-20] success counter (only for global regexps to count matches).
- * - fp[-24] Offset of location before start of input (effectively character
- * position -1). Used to initialize capture registers to a
- * non-position.
- * - fp[-28] At start (if 1, we are starting at the start of the
- * string, otherwise 0)
- * - fp[-32] register 0 (Only positions must be stored in the first
- * - register 1 num_saved_registers_ registers)
- * - ...
- * - register num_registers-1
- * --- sp ---
- *
- * The first num_saved_registers_ registers are initialized to point to
- * "character -1" in the string (i.e., char_size() bytes before the first
- * character of the string). The remaining registers start out as garbage.
- *
- * The data up to the return address must be placed there by the calling
- * code and the remaining arguments are passed in registers, e.g. by calling the
- * code entry as cast to a function with the signature:
- * int (*match)(String* input_string,
- * int start_index,
- * Address start,
- * Address end,
- * Address secondary_return_address, // Only used by native call.
- * int* capture_output_array,
- * byte* stack_area_base,
- * bool direct_call = false)
- * The call is performed by NativeRegExpMacroAssembler::Execute()
- * (in regexp-macro-assembler.cc) via the CALL_GENERATED_REGEXP_CODE macro
- * in arm/simulator-arm.h.
- * When calling as a non-direct call (i.e., from C++ code), the return address
- * area is overwritten with the LR register by the RegExp code. When doing a
- * direct call from generated code, the return address is placed there by
- * the calling code, as in a normal exit frame.
- */
-
-#define __ ACCESS_MASM(masm_)
-
-RegExpMacroAssemblerARM::RegExpMacroAssemblerARM(
- Mode mode,
- int registers_to_save,
- Zone* zone)
- : NativeRegExpMacroAssembler(zone),
- masm_(new MacroAssembler(Isolate::Current(), NULL, kRegExpCodeSize)),
- mode_(mode),
- num_registers_(registers_to_save),
- num_saved_registers_(registers_to_save),
- entry_label_(),
- start_label_(),
- success_label_(),
- backtrack_label_(),
- exit_label_() {
- ASSERT_EQ(0, registers_to_save % 2);
- __ jmp(&entry_label_); // We'll write the entry code later.
- EmitBacktrackConstantPool();
- __ bind(&start_label_); // And then continue from here.
-}
-
-
-RegExpMacroAssemblerARM::~RegExpMacroAssemblerARM() {
- delete masm_;
- // Unuse labels in case we throw away the assembler without calling GetCode.
- entry_label_.Unuse();
- start_label_.Unuse();
- success_label_.Unuse();
- backtrack_label_.Unuse();
- exit_label_.Unuse();
- check_preempt_label_.Unuse();
- stack_overflow_label_.Unuse();
-}
-
-
-int RegExpMacroAssemblerARM::stack_limit_slack() {
- return RegExpStack::kStackLimitSlack;
-}
-
-
-void RegExpMacroAssemblerARM::AdvanceCurrentPosition(int by) {
- if (by != 0) {
- __ add(current_input_offset(),
- current_input_offset(), Operand(by * char_size()));
- }
-}
-
-
-void RegExpMacroAssemblerARM::AdvanceRegister(int reg, int by) {
- ASSERT(reg >= 0);
- ASSERT(reg < num_registers_);
- if (by != 0) {
- __ ldr(r0, register_location(reg));
- __ add(r0, r0, Operand(by));
- __ str(r0, register_location(reg));
- }
-}
-
-
-void RegExpMacroAssemblerARM::Backtrack() {
- CheckPreemption();
- // Pop Code* offset from backtrack stack, add Code* and jump to location.
- Pop(r0);
- __ add(pc, r0, Operand(code_pointer()));
-}
-
-
-void RegExpMacroAssemblerARM::Bind(Label* label) {
- __ bind(label);
-}
-
-
-void RegExpMacroAssemblerARM::CheckCharacter(uint32_t c, Label* on_equal) {
- __ cmp(current_character(), Operand(c));
- BranchOrBacktrack(eq, on_equal);
-}
-
-
-void RegExpMacroAssemblerARM::CheckCharacterGT(uc16 limit, Label* on_greater) {
- __ cmp(current_character(), Operand(limit));
- BranchOrBacktrack(gt, on_greater);
-}
-
-
-void RegExpMacroAssemblerARM::CheckAtStart(Label* on_at_start) {
- Label not_at_start;
- // Did we start the match at the start of the string at all?
- __ ldr(r0, MemOperand(frame_pointer(), kStartIndex));
- __ cmp(r0, Operand::Zero());
- BranchOrBacktrack(ne, &not_at_start);
-
- // If we did, are we still at the start of the input?
- __ ldr(r1, MemOperand(frame_pointer(), kInputStart));
- __ add(r0, end_of_input_address(), Operand(current_input_offset()));
- __ cmp(r0, r1);
- BranchOrBacktrack(eq, on_at_start);
- __ bind(&not_at_start);
-}
-
-
-void RegExpMacroAssemblerARM::CheckNotAtStart(Label* on_not_at_start) {
- // Did we start the match at the start of the string at all?
- __ ldr(r0, MemOperand(frame_pointer(), kStartIndex));
- __ cmp(r0, Operand::Zero());
- BranchOrBacktrack(ne, on_not_at_start);
- // If we did, are we still at the start of the input?
- __ ldr(r1, MemOperand(frame_pointer(), kInputStart));
- __ add(r0, end_of_input_address(), Operand(current_input_offset()));
- __ cmp(r0, r1);
- BranchOrBacktrack(ne, on_not_at_start);
-}
-
-
-void RegExpMacroAssemblerARM::CheckCharacterLT(uc16 limit, Label* on_less) {
- __ cmp(current_character(), Operand(limit));
- BranchOrBacktrack(lt, on_less);
-}
-
-
-void RegExpMacroAssemblerARM::CheckCharacters(Vector<const uc16> str,
- int cp_offset,
- Label* on_failure,
- bool check_end_of_string) {
- if (on_failure == NULL) {
- // Instead of inlining a backtrack for each test, (re)use the global
- // backtrack target.
- on_failure = &backtrack_label_;
- }
-
- if (check_end_of_string) {
- // Is last character of required match inside string.
- CheckPosition(cp_offset + str.length() - 1, on_failure);
- }
-
- __ add(r0, end_of_input_address(), Operand(current_input_offset()));
- if (cp_offset != 0) {
- int byte_offset = cp_offset * char_size();
- __ add(r0, r0, Operand(byte_offset));
- }
-
- // r0 : Address of characters to match against str.
- int stored_high_byte = 0;
- for (int i = 0; i < str.length(); i++) {
- if (mode_ == ASCII) {
- __ ldrb(r1, MemOperand(r0, char_size(), PostIndex));
- ASSERT(str[i] <= String::kMaxOneByteCharCode);
- __ cmp(r1, Operand(str[i]));
- } else {
- __ ldrh(r1, MemOperand(r0, char_size(), PostIndex));
- uc16 match_char = str[i];
- int match_high_byte = (match_char >> 8);
- if (match_high_byte == 0) {
- __ cmp(r1, Operand(str[i]));
- } else {
- if (match_high_byte != stored_high_byte) {
- __ mov(r2, Operand(match_high_byte));
- stored_high_byte = match_high_byte;
- }
- __ add(r3, r2, Operand(match_char & 0xff));
- __ cmp(r1, r3);
- }
- }
- BranchOrBacktrack(ne, on_failure);
- }
-}
-
-
-void RegExpMacroAssemblerARM::CheckGreedyLoop(Label* on_equal) {
- __ ldr(r0, MemOperand(backtrack_stackpointer(), 0));
- __ cmp(current_input_offset(), r0);
- __ add(backtrack_stackpointer(),
- backtrack_stackpointer(), Operand(kPointerSize), LeaveCC, eq);
- BranchOrBacktrack(eq, on_equal);
-}
-
-
-void RegExpMacroAssemblerARM::CheckNotBackReferenceIgnoreCase(
- int start_reg,
- Label* on_no_match) {
- Label fallthrough;
- __ ldr(r0, register_location(start_reg)); // Index of start of capture
- __ ldr(r1, register_location(start_reg + 1)); // Index of end of capture
- __ sub(r1, r1, r0, SetCC); // Length of capture.
-
- // If length is zero, either the capture is empty or it is not participating.
- // In either case succeed immediately.
- __ b(eq, &fallthrough);
-
- // Check that there are enough characters left in the input.
- __ cmn(r1, Operand(current_input_offset()));
- BranchOrBacktrack(gt, on_no_match);
-
- if (mode_ == ASCII) {
- Label success;
- Label fail;
- Label loop_check;
-
- // r0 - offset of start of capture
- // r1 - length of capture
- __ add(r0, r0, Operand(end_of_input_address()));
- __ add(r2, end_of_input_address(), Operand(current_input_offset()));
- __ add(r1, r0, Operand(r1));
-
- // r0 - Address of start of capture.
- // r1 - Address of end of capture
- // r2 - Address of current input position.
-
- Label loop;
- __ bind(&loop);
- __ ldrb(r3, MemOperand(r0, char_size(), PostIndex));
- __ ldrb(r4, MemOperand(r2, char_size(), PostIndex));
- __ cmp(r4, r3);
- __ b(eq, &loop_check);
-
- // Mismatch, try case-insensitive match (converting letters to lower-case).
- __ orr(r3, r3, Operand(0x20)); // Convert capture character to lower-case.
- __ orr(r4, r4, Operand(0x20)); // Also convert input character.
- __ cmp(r4, r3);
- __ b(ne, &fail);
- __ sub(r3, r3, Operand('a'));
- __ cmp(r3, Operand('z' - 'a')); // Is r3 a lowercase letter?
-#ifndef ENABLE_LATIN_1
- __ b(hi, &fail);
-#else
- __ b(ls, &loop_check); // In range 'a'-'z'.
- // Latin-1: Check for values in range [224,254] but not 247.
- __ sub(r3, r3, Operand(224 - 'a'));
- __ cmp(r3, Operand(254 - 224));
- __ b(hi, &fail); // Weren't Latin-1 letters.
- __ cmp(r3, Operand(247 - 224)); // Check for 247.
- __ b(eq, &fail);
-#endif
-
- __ bind(&loop_check);
- __ cmp(r0, r1);
- __ b(lt, &loop);
- __ jmp(&success);
-
- __ bind(&fail);
- BranchOrBacktrack(al, on_no_match);
-
- __ bind(&success);
- // Compute new value of character position after the matched part.
- __ sub(current_input_offset(), r2, end_of_input_address());
- } else {
- ASSERT(mode_ == UC16);
- int argument_count = 4;
- __ PrepareCallCFunction(argument_count, r2);
-
- // r0 - offset of start of capture
- // r1 - length of capture
-
- // Put arguments into arguments registers.
- // Parameters are
- // r0: Address byte_offset1 - Address captured substring's start.
- // r1: Address byte_offset2 - Address of current character position.
- // r2: size_t byte_length - length of capture in bytes(!)
- // r3: Isolate* isolate
-
- // Address of start of capture.
- __ add(r0, r0, Operand(end_of_input_address()));
- // Length of capture.
- __ mov(r2, Operand(r1));
- // Save length in callee-save register for use on return.
- __ mov(r4, Operand(r1));
- // Address of current input position.
- __ add(r1, current_input_offset(), Operand(end_of_input_address()));
- // Isolate.
- __ mov(r3, Operand(ExternalReference::isolate_address()));
-
- {
- AllowExternalCallThatCantCauseGC scope(masm_);
- ExternalReference function =
- ExternalReference::re_case_insensitive_compare_uc16(masm_->isolate());
- __ CallCFunction(function, argument_count);
- }
-
- // Check if function returned non-zero for success or zero for failure.
- __ cmp(r0, Operand::Zero());
- BranchOrBacktrack(eq, on_no_match);
- // On success, increment position by length of capture.
- __ add(current_input_offset(), current_input_offset(), Operand(r4));
- }
-
- __ bind(&fallthrough);
-}
-
-
-void RegExpMacroAssemblerARM::CheckNotBackReference(
- int start_reg,
- Label* on_no_match) {
- Label fallthrough;
- Label success;
-
- // Find length of back-referenced capture.
- __ ldr(r0, register_location(start_reg));
- __ ldr(r1, register_location(start_reg + 1));
- __ sub(r1, r1, r0, SetCC); // Length to check.
- // Succeed on empty capture (including no capture).
- __ b(eq, &fallthrough);
-
- // Check that there are enough characters left in the input.
- __ cmn(r1, Operand(current_input_offset()));
- BranchOrBacktrack(gt, on_no_match);
-
- // Compute pointers to match string and capture string
- __ add(r0, r0, Operand(end_of_input_address()));
- __ add(r2, end_of_input_address(), Operand(current_input_offset()));
- __ add(r1, r1, Operand(r0));
-
- Label loop;
- __ bind(&loop);
- if (mode_ == ASCII) {
- __ ldrb(r3, MemOperand(r0, char_size(), PostIndex));
- __ ldrb(r4, MemOperand(r2, char_size(), PostIndex));
- } else {
- ASSERT(mode_ == UC16);
- __ ldrh(r3, MemOperand(r0, char_size(), PostIndex));
- __ ldrh(r4, MemOperand(r2, char_size(), PostIndex));
- }
- __ cmp(r3, r4);
- BranchOrBacktrack(ne, on_no_match);
- __ cmp(r0, r1);
- __ b(lt, &loop);
-
- // Move current character position to position after match.
- __ sub(current_input_offset(), r2, end_of_input_address());
- __ bind(&fallthrough);
-}
-
-
-void RegExpMacroAssemblerARM::CheckNotCharacter(unsigned c,
- Label* on_not_equal) {
- __ cmp(current_character(), Operand(c));
- BranchOrBacktrack(ne, on_not_equal);
-}
-
-
-void RegExpMacroAssemblerARM::CheckCharacterAfterAnd(uint32_t c,
- uint32_t mask,
- Label* on_equal) {
- if (c == 0) {
- __ tst(current_character(), Operand(mask));
- } else {
- __ and_(r0, current_character(), Operand(mask));
- __ cmp(r0, Operand(c));
- }
- BranchOrBacktrack(eq, on_equal);
-}
-
-
-void RegExpMacroAssemblerARM::CheckNotCharacterAfterAnd(unsigned c,
- unsigned mask,
- Label* on_not_equal) {
- if (c == 0) {
- __ tst(current_character(), Operand(mask));
- } else {
- __ and_(r0, current_character(), Operand(mask));
- __ cmp(r0, Operand(c));
- }
- BranchOrBacktrack(ne, on_not_equal);
-}
-
-
-void RegExpMacroAssemblerARM::CheckNotCharacterAfterMinusAnd(
- uc16 c,
- uc16 minus,
- uc16 mask,
- Label* on_not_equal) {
- ASSERT(minus < String::kMaxUtf16CodeUnit);
- __ sub(r0, current_character(), Operand(minus));
- __ and_(r0, r0, Operand(mask));
- __ cmp(r0, Operand(c));
- BranchOrBacktrack(ne, on_not_equal);
-}
-
-
-void RegExpMacroAssemblerARM::CheckCharacterInRange(
- uc16 from,
- uc16 to,
- Label* on_in_range) {
- __ sub(r0, current_character(), Operand(from));
- __ cmp(r0, Operand(to - from));
- BranchOrBacktrack(ls, on_in_range); // Unsigned lower-or-same condition.
-}
-
-
-void RegExpMacroAssemblerARM::CheckCharacterNotInRange(
- uc16 from,
- uc16 to,
- Label* on_not_in_range) {
- __ sub(r0, current_character(), Operand(from));
- __ cmp(r0, Operand(to - from));
- BranchOrBacktrack(hi, on_not_in_range); // Unsigned higher condition.
-}
-
-
-void RegExpMacroAssemblerARM::CheckBitInTable(
- Handle<ByteArray> table,
- Label* on_bit_set) {
- __ mov(r0, Operand(table));
- if (mode_ != ASCII || kTableMask != String::kMaxOneByteCharCode) {
- __ and_(r1, current_character(), Operand(kTableSize - 1));
- __ add(r1, r1, Operand(ByteArray::kHeaderSize - kHeapObjectTag));
- } else {
- __ add(r1,
- current_character(),
- Operand(ByteArray::kHeaderSize - kHeapObjectTag));
- }
- __ ldrb(r0, MemOperand(r0, r1));
- __ cmp(r0, Operand::Zero());
- BranchOrBacktrack(ne, on_bit_set);
-}
-
-
-bool RegExpMacroAssemblerARM::CheckSpecialCharacterClass(uc16 type,
- Label* on_no_match) {
- // Range checks (c in min..max) are generally implemented by an unsigned
- // (c - min) <= (max - min) check
- switch (type) {
- case 's':
- // Match space-characters
- if (mode_ == ASCII) {
- // ASCII space characters are '\t'..'\r' and ' '.
- Label success;
- __ cmp(current_character(), Operand(' '));
- __ b(eq, &success);
- // Check range 0x09..0x0d
- __ sub(r0, current_character(), Operand('\t'));
- __ cmp(r0, Operand('\r' - '\t'));
- BranchOrBacktrack(hi, on_no_match);
- __ bind(&success);
- return true;
- }
- return false;
- case 'S':
- // Match non-space characters.
- if (mode_ == ASCII) {
- // ASCII space characters are '\t'..'\r' and ' '.
- __ cmp(current_character(), Operand(' '));
- BranchOrBacktrack(eq, on_no_match);
- __ sub(r0, current_character(), Operand('\t'));
- __ cmp(r0, Operand('\r' - '\t'));
- BranchOrBacktrack(ls, on_no_match);
- return true;
- }
- return false;
- case 'd':
- // Match ASCII digits ('0'..'9')
- __ sub(r0, current_character(), Operand('0'));
- __ cmp(current_character(), Operand('9' - '0'));
- BranchOrBacktrack(hi, on_no_match);
- return true;
- case 'D':
- // Match non ASCII-digits
- __ sub(r0, current_character(), Operand('0'));
- __ cmp(r0, Operand('9' - '0'));
- BranchOrBacktrack(ls, on_no_match);
- return true;
- case '.': {
- // Match non-newlines (not 0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029)
- __ eor(r0, current_character(), Operand(0x01));
- // See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c
- __ sub(r0, r0, Operand(0x0b));
- __ cmp(r0, Operand(0x0c - 0x0b));
- BranchOrBacktrack(ls, on_no_match);
- if (mode_ == UC16) {
- // Compare original value to 0x2028 and 0x2029, using the already
- // computed (current_char ^ 0x01 - 0x0b). I.e., check for
- // 0x201d (0x2028 - 0x0b) or 0x201e.
- __ sub(r0, r0, Operand(0x2028 - 0x0b));
- __ cmp(r0, Operand(1));
- BranchOrBacktrack(ls, on_no_match);
- }
- return true;
- }
- case 'n': {
- // Match newlines (0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029)
- __ eor(r0, current_character(), Operand(0x01));
- // See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c
- __ sub(r0, r0, Operand(0x0b));
- __ cmp(r0, Operand(0x0c - 0x0b));
- if (mode_ == ASCII) {
- BranchOrBacktrack(hi, on_no_match);
- } else {
- Label done;
- __ b(ls, &done);
- // Compare original value to 0x2028 and 0x2029, using the already
- // computed (current_char ^ 0x01 - 0x0b). I.e., check for
- // 0x201d (0x2028 - 0x0b) or 0x201e.
- __ sub(r0, r0, Operand(0x2028 - 0x0b));
- __ cmp(r0, Operand(1));
- BranchOrBacktrack(hi, on_no_match);
- __ bind(&done);
- }
- return true;
- }
- case 'w': {
- if (mode_ != ASCII) {
- // Table is 128 entries, so all ASCII characters can be tested.
- __ cmp(current_character(), Operand('z'));
- BranchOrBacktrack(hi, on_no_match);
- }
- ExternalReference map = ExternalReference::re_word_character_map();
- __ mov(r0, Operand(map));
- __ ldrb(r0, MemOperand(r0, current_character()));
- __ cmp(r0, Operand::Zero());
- BranchOrBacktrack(eq, on_no_match);
- return true;
- }
- case 'W': {
- Label done;
- if (mode_ != ASCII) {
- // Table is 128 entries, so all ASCII characters can be tested.
- __ cmp(current_character(), Operand('z'));
- __ b(hi, &done);
- }
- ExternalReference map = ExternalReference::re_word_character_map();
- __ mov(r0, Operand(map));
- __ ldrb(r0, MemOperand(r0, current_character()));
- __ cmp(r0, Operand::Zero());
- BranchOrBacktrack(ne, on_no_match);
- if (mode_ != ASCII) {
- __ bind(&done);
- }
- return true;
- }
- case '*':
- // Match any character.
- return true;
- // No custom implementation (yet): s(UC16), S(UC16).
- default:
- return false;
- }
-}
-
-
-void RegExpMacroAssemblerARM::Fail() {
- __ mov(r0, Operand(FAILURE));
- __ jmp(&exit_label_);
-}
-
-
-Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
- Label return_r0;
- // Finalize code - write the entry point code now we know how many
- // registers we need.
-
- // Entry code:
- __ bind(&entry_label_);
-
- // Tell the system that we have a stack frame. Because the type is MANUAL, no
- // is generated.
- FrameScope scope(masm_, StackFrame::MANUAL);
-
- // Actually emit code to start a new stack frame.
- // Push arguments
- // Save callee-save registers.
- // Start new stack frame.
- // Store link register in existing stack-cell.
- // Order here should correspond to order of offset constants in header file.
- RegList registers_to_retain = r4.bit() | r5.bit() | r6.bit() |
- r7.bit() | r8.bit() | r9.bit() | r10.bit() | fp.bit();
- RegList argument_registers = r0.bit() | r1.bit() | r2.bit() | r3.bit();
- __ stm(db_w, sp, argument_registers | registers_to_retain | lr.bit());
- // Set frame pointer in space for it if this is not a direct call
- // from generated code.
- __ add(frame_pointer(), sp, Operand(4 * kPointerSize));
- __ mov(r0, Operand::Zero());
- __ push(r0); // Make room for success counter and initialize it to 0.
- __ push(r0); // Make room for "position - 1" constant (value is irrelevant).
- // Check if we have space on the stack for registers.
- Label stack_limit_hit;
- Label stack_ok;
-
- ExternalReference stack_limit =
- ExternalReference::address_of_stack_limit(masm_->isolate());
- __ mov(r0, Operand(stack_limit));
- __ ldr(r0, MemOperand(r0));
- __ sub(r0, sp, r0, SetCC);
- // Handle it if the stack pointer is already below the stack limit.
- __ b(ls, &stack_limit_hit);
- // Check if there is room for the variable number of registers above
- // the stack limit.
- __ cmp(r0, Operand(num_registers_ * kPointerSize));
- __ b(hs, &stack_ok);
- // Exit with OutOfMemory exception. There is not enough space on the stack
- // for our working registers.
- __ mov(r0, Operand(EXCEPTION));
- __ jmp(&return_r0);
-
- __ bind(&stack_limit_hit);
- CallCheckStackGuardState(r0);
- __ cmp(r0, Operand::Zero());
- // If returned value is non-zero, we exit with the returned value as result.
- __ b(ne, &return_r0);
-
- __ bind(&stack_ok);
-
- // Allocate space on stack for registers.
- __ sub(sp, sp, Operand(num_registers_ * kPointerSize));
- // Load string end.
- __ ldr(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
- // Load input start.
- __ ldr(r0, MemOperand(frame_pointer(), kInputStart));
- // Find negative length (offset of start relative to end).
- __ sub(current_input_offset(), r0, end_of_input_address());
- // Set r0 to address of char before start of the input string
- // (effectively string position -1).
- __ ldr(r1, MemOperand(frame_pointer(), kStartIndex));
- __ sub(r0, current_input_offset(), Operand(char_size()));
- __ sub(r0, r0, Operand(r1, LSL, (mode_ == UC16) ? 1 : 0));
- // Store this value in a local variable, for use when clearing
- // position registers.
- __ str(r0, MemOperand(frame_pointer(), kInputStartMinusOne));
-
- // Initialize code pointer register
- __ mov(code_pointer(), Operand(masm_->CodeObject()));
-
- Label load_char_start_regexp, start_regexp;
- // Load newline if index is at start, previous character otherwise.
- __ cmp(r1, Operand::Zero());
- __ b(ne, &load_char_start_regexp);
- __ mov(current_character(), Operand('\n'), LeaveCC, eq);
- __ jmp(&start_regexp);
-
- // Global regexp restarts matching here.
- __ bind(&load_char_start_regexp);
- // Load previous char as initial value of current character register.
- LoadCurrentCharacterUnchecked(-1, 1);
- __ bind(&start_regexp);
-
- // Initialize on-stack registers.
- if (num_saved_registers_ > 0) { // Always is, if generated from a regexp.
- // Fill saved registers with initial value = start offset - 1
- if (num_saved_registers_ > 8) {
- // Address of register 0.
- __ add(r1, frame_pointer(), Operand(kRegisterZero));
- __ mov(r2, Operand(num_saved_registers_));
- Label init_loop;
- __ bind(&init_loop);
- __ str(r0, MemOperand(r1, kPointerSize, NegPostIndex));
- __ sub(r2, r2, Operand(1), SetCC);
- __ b(ne, &init_loop);
- } else {
- for (int i = 0; i < num_saved_registers_; i++) {
- __ str(r0, register_location(i));
- }
- }
- }
-
- // Initialize backtrack stack pointer.
- __ ldr(backtrack_stackpointer(), MemOperand(frame_pointer(), kStackHighEnd));
-
- __ jmp(&start_label_);
-
- // Exit code:
- if (success_label_.is_linked()) {
- // Save captures when successful.
- __ bind(&success_label_);
- if (num_saved_registers_ > 0) {
- // copy captures to output
- __ ldr(r1, MemOperand(frame_pointer(), kInputStart));
- __ ldr(r0, MemOperand(frame_pointer(), kRegisterOutput));
- __ ldr(r2, MemOperand(frame_pointer(), kStartIndex));
- __ sub(r1, end_of_input_address(), r1);
- // r1 is length of input in bytes.
- if (mode_ == UC16) {
- __ mov(r1, Operand(r1, LSR, 1));
- }
- // r1 is length of input in characters.
- __ add(r1, r1, Operand(r2));
- // r1 is length of string in characters.
-
- ASSERT_EQ(0, num_saved_registers_ % 2);
- // Always an even number of capture registers. This allows us to
- // unroll the loop once to add an operation between a load of a register
- // and the following use of that register.
- for (int i = 0; i < num_saved_registers_; i += 2) {
- __ ldr(r2, register_location(i));
- __ ldr(r3, register_location(i + 1));
- if (i == 0 && global_with_zero_length_check()) {
- // Keep capture start in r4 for the zero-length check later.
- __ mov(r4, r2);
- }
- if (mode_ == UC16) {
- __ add(r2, r1, Operand(r2, ASR, 1));
- __ add(r3, r1, Operand(r3, ASR, 1));
- } else {
- __ add(r2, r1, Operand(r2));
- __ add(r3, r1, Operand(r3));
- }
- __ str(r2, MemOperand(r0, kPointerSize, PostIndex));
- __ str(r3, MemOperand(r0, kPointerSize, PostIndex));
- }
- }
-
- if (global()) {
- // Restart matching if the regular expression is flagged as global.
- __ ldr(r0, MemOperand(frame_pointer(), kSuccessfulCaptures));
- __ ldr(r1, MemOperand(frame_pointer(), kNumOutputRegisters));
- __ ldr(r2, MemOperand(frame_pointer(), kRegisterOutput));
- // Increment success counter.
- __ add(r0, r0, Operand(1));
- __ str(r0, MemOperand(frame_pointer(), kSuccessfulCaptures));
- // Capture results have been stored, so the number of remaining global
- // output registers is reduced by the number of stored captures.
- __ sub(r1, r1, Operand(num_saved_registers_));
- // Check whether we have enough room for another set of capture results.
- __ cmp(r1, Operand(num_saved_registers_));
- __ b(lt, &return_r0);
-
- __ str(r1, MemOperand(frame_pointer(), kNumOutputRegisters));
- // Advance the location for output.
- __ add(r2, r2, Operand(num_saved_registers_ * kPointerSize));
- __ str(r2, MemOperand(frame_pointer(), kRegisterOutput));
-
- // Prepare r0 to initialize registers with its value in the next run.
- __ ldr(r0, MemOperand(frame_pointer(), kInputStartMinusOne));
-
- if (global_with_zero_length_check()) {
- // Special case for zero-length matches.
- // r4: capture start index
- __ cmp(current_input_offset(), r4);
- // Not a zero-length match, restart.
- __ b(ne, &load_char_start_regexp);
- // Offset from the end is zero if we already reached the end.
- __ cmp(current_input_offset(), Operand::Zero());
- __ b(eq, &exit_label_);
- // Advance current position after a zero-length match.
- __ add(current_input_offset(),
- current_input_offset(),
- Operand((mode_ == UC16) ? 2 : 1));
- }
-
- __ b(&load_char_start_regexp);
- } else {
- __ mov(r0, Operand(SUCCESS));
- }
- }
-
- // Exit and return r0
- __ bind(&exit_label_);
- if (global()) {
- __ ldr(r0, MemOperand(frame_pointer(), kSuccessfulCaptures));
- }
-
- __ bind(&return_r0);
- // Skip sp past regexp registers and local variables..
- __ mov(sp, frame_pointer());
- // Restore registers r4..r11 and return (restoring lr to pc).
- __ ldm(ia_w, sp, registers_to_retain | pc.bit());
-
- // Backtrack code (branch target for conditional backtracks).
- if (backtrack_label_.is_linked()) {
- __ bind(&backtrack_label_);
- Backtrack();
- }
-
- Label exit_with_exception;
-
- // Preempt-code
- if (check_preempt_label_.is_linked()) {
- SafeCallTarget(&check_preempt_label_);
-
- CallCheckStackGuardState(r0);
- __ cmp(r0, Operand::Zero());
- // If returning non-zero, we should end execution with the given
- // result as return value.
- __ b(ne, &return_r0);
-
- // String might have moved: Reload end of string from frame.
- __ ldr(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
- SafeReturn();
- }
-
- // Backtrack stack overflow code.
- if (stack_overflow_label_.is_linked()) {
- SafeCallTarget(&stack_overflow_label_);
- // Reached if the backtrack-stack limit has been hit.
- Label grow_failed;
-
- // Call GrowStack(backtrack_stackpointer(), &stack_base)
- static const int num_arguments = 3;
- __ PrepareCallCFunction(num_arguments, r0);
- __ mov(r0, backtrack_stackpointer());
- __ add(r1, frame_pointer(), Operand(kStackHighEnd));
- __ mov(r2, Operand(ExternalReference::isolate_address()));
- ExternalReference grow_stack =
- ExternalReference::re_grow_stack(masm_->isolate());
- __ CallCFunction(grow_stack, num_arguments);
- // If return NULL, we have failed to grow the stack, and
- // must exit with a stack-overflow exception.
- __ cmp(r0, Operand::Zero());
- __ b(eq, &exit_with_exception);
- // Otherwise use return value as new stack pointer.
- __ mov(backtrack_stackpointer(), r0);
- // Restore saved registers and continue.
- SafeReturn();
- }
-
- if (exit_with_exception.is_linked()) {
- // If any of the code above needed to exit with an exception.
- __ bind(&exit_with_exception);
- // Exit with Result EXCEPTION(-1) to signal thrown exception.
- __ mov(r0, Operand(EXCEPTION));
- __ jmp(&return_r0);
- }
-
- CodeDesc code_desc;
- masm_->GetCode(&code_desc);
- Handle<Code> code = FACTORY->NewCode(code_desc,
- Code::ComputeFlags(Code::REGEXP),
- masm_->CodeObject());
- PROFILE(Isolate::Current(), RegExpCodeCreateEvent(*code, *source));
- return Handle<HeapObject>::cast(code);
-}
-
-
-void RegExpMacroAssemblerARM::GoTo(Label* to) {
- BranchOrBacktrack(al, to);
-}
-
-
-void RegExpMacroAssemblerARM::IfRegisterGE(int reg,
- int comparand,
- Label* if_ge) {
- __ ldr(r0, register_location(reg));
- __ cmp(r0, Operand(comparand));
- BranchOrBacktrack(ge, if_ge);
-}
-
-
-void RegExpMacroAssemblerARM::IfRegisterLT(int reg,
- int comparand,
- Label* if_lt) {
- __ ldr(r0, register_location(reg));
- __ cmp(r0, Operand(comparand));
- BranchOrBacktrack(lt, if_lt);
-}
-
-
-void RegExpMacroAssemblerARM::IfRegisterEqPos(int reg,
- Label* if_eq) {
- __ ldr(r0, register_location(reg));
- __ cmp(r0, Operand(current_input_offset()));
- BranchOrBacktrack(eq, if_eq);
-}
-
-
-RegExpMacroAssembler::IrregexpImplementation
- RegExpMacroAssemblerARM::Implementation() {
- return kARMImplementation;
-}
-
-
-void RegExpMacroAssemblerARM::LoadCurrentCharacter(int cp_offset,
- Label* on_end_of_input,
- bool check_bounds,
- int characters) {
- ASSERT(cp_offset >= -1); // ^ and \b can look behind one character.
- ASSERT(cp_offset < (1<<30)); // Be sane! (And ensure negation works)
- if (check_bounds) {
- CheckPosition(cp_offset + characters - 1, on_end_of_input);
- }
- LoadCurrentCharacterUnchecked(cp_offset, characters);
-}
-
-
-void RegExpMacroAssemblerARM::PopCurrentPosition() {
- Pop(current_input_offset());
-}
-
-
-void RegExpMacroAssemblerARM::PopRegister(int register_index) {
- Pop(r0);
- __ str(r0, register_location(register_index));
-}
-
-
-static bool is_valid_memory_offset(int value) {
- if (value < 0) value = -value;
- return value < (1<<12);
-}
-
-
-void RegExpMacroAssemblerARM::PushBacktrack(Label* label) {
- if (label->is_bound()) {
- int target = label->pos();
- __ mov(r0, Operand(target + Code::kHeaderSize - kHeapObjectTag));
- } else {
- int constant_offset = GetBacktrackConstantPoolEntry();
- masm_->label_at_put(label, constant_offset);
- // Reading pc-relative is based on the address 8 bytes ahead of
- // the current opcode.
- unsigned int offset_of_pc_register_read =
- masm_->pc_offset() + Assembler::kPcLoadDelta;
- int pc_offset_of_constant =
- constant_offset - offset_of_pc_register_read;
- ASSERT(pc_offset_of_constant < 0);
- if (is_valid_memory_offset(pc_offset_of_constant)) {
- Assembler::BlockConstPoolScope block_const_pool(masm_);
- __ ldr(r0, MemOperand(pc, pc_offset_of_constant));
- } else {
- // Not a 12-bit offset, so it needs to be loaded from the constant
- // pool.
- Assembler::BlockConstPoolScope block_const_pool(masm_);
- __ mov(r0, Operand(pc_offset_of_constant + Assembler::kInstrSize));
- __ ldr(r0, MemOperand(pc, r0));
- }
- }
- Push(r0);
- CheckStackLimit();
-}
-
-
-void RegExpMacroAssemblerARM::PushCurrentPosition() {
- Push(current_input_offset());
-}
-
-
-void RegExpMacroAssemblerARM::PushRegister(int register_index,
- StackCheckFlag check_stack_limit) {
- __ ldr(r0, register_location(register_index));
- Push(r0);
- if (check_stack_limit) CheckStackLimit();
-}
-
-
-void RegExpMacroAssemblerARM::ReadCurrentPositionFromRegister(int reg) {
- __ ldr(current_input_offset(), register_location(reg));
-}
-
-
-void RegExpMacroAssemblerARM::ReadStackPointerFromRegister(int reg) {
- __ ldr(backtrack_stackpointer(), register_location(reg));
- __ ldr(r0, MemOperand(frame_pointer(), kStackHighEnd));
- __ add(backtrack_stackpointer(), backtrack_stackpointer(), Operand(r0));
-}
-
-
-void RegExpMacroAssemblerARM::SetCurrentPositionFromEnd(int by) {
- Label after_position;
- __ cmp(current_input_offset(), Operand(-by * char_size()));
- __ b(ge, &after_position);
- __ mov(current_input_offset(), Operand(-by * char_size()));
- // On RegExp code entry (where this operation is used), the character before
- // the current position is expected to be already loaded.
- // We have advanced the position, so it's safe to read backwards.
- LoadCurrentCharacterUnchecked(-1, 1);
- __ bind(&after_position);
-}
-
-
-void RegExpMacroAssemblerARM::SetRegister(int register_index, int to) {
- ASSERT(register_index >= num_saved_registers_); // Reserved for positions!
- __ mov(r0, Operand(to));
- __ str(r0, register_location(register_index));
-}
-
-
-bool RegExpMacroAssemblerARM::Succeed() {
- __ jmp(&success_label_);
- return global();
-}
-
-
-void RegExpMacroAssemblerARM::WriteCurrentPositionToRegister(int reg,
- int cp_offset) {
- if (cp_offset == 0) {
- __ str(current_input_offset(), register_location(reg));
- } else {
- __ add(r0, current_input_offset(), Operand(cp_offset * char_size()));
- __ str(r0, register_location(reg));
- }
-}
-
-
-void RegExpMacroAssemblerARM::ClearRegisters(int reg_from, int reg_to) {
- ASSERT(reg_from <= reg_to);
- __ ldr(r0, MemOperand(frame_pointer(), kInputStartMinusOne));
- for (int reg = reg_from; reg <= reg_to; reg++) {
- __ str(r0, register_location(reg));
- }
-}
-
-
-void RegExpMacroAssemblerARM::WriteStackPointerToRegister(int reg) {
- __ ldr(r1, MemOperand(frame_pointer(), kStackHighEnd));
- __ sub(r0, backtrack_stackpointer(), r1);
- __ str(r0, register_location(reg));
-}
-
-
-// Private methods:
-
-void RegExpMacroAssemblerARM::CallCheckStackGuardState(Register scratch) {
- static const int num_arguments = 3;
- __ PrepareCallCFunction(num_arguments, scratch);
- // RegExp code frame pointer.
- __ mov(r2, frame_pointer());
- // Code* of self.
- __ mov(r1, Operand(masm_->CodeObject()));
- // r0 becomes return address pointer.
- ExternalReference stack_guard_check =
- ExternalReference::re_check_stack_guard_state(masm_->isolate());
- CallCFunctionUsingStub(stack_guard_check, num_arguments);
-}
-
-
-// Helper function for reading a value out of a stack frame.
-template <typename T>
-static T& frame_entry(Address re_frame, int frame_offset) {
- return reinterpret_cast<T&>(Memory::int32_at(re_frame + frame_offset));
-}
-
-
-int RegExpMacroAssemblerARM::CheckStackGuardState(Address* return_address,
- Code* re_code,
- Address re_frame) {
- Isolate* isolate = frame_entry<Isolate*>(re_frame, kIsolate);
- ASSERT(isolate == Isolate::Current());
- if (isolate->stack_guard()->IsStackOverflow()) {
- isolate->StackOverflow();
- return EXCEPTION;
- }
-
- // If not real stack overflow the stack guard was used to interrupt
- // execution for another purpose.
-
- // If this is a direct call from JavaScript retry the RegExp forcing the call
- // through the runtime system. Currently the direct call cannot handle a GC.
- if (frame_entry<int>(re_frame, kDirectCall) == 1) {
- return RETRY;
- }
-
- // Prepare for possible GC.
- HandleScope handles(isolate);
- Handle<Code> code_handle(re_code);
-
- Handle<String> subject(frame_entry<String*>(re_frame, kInputString));
-
- // Current string.
- bool is_ascii = subject->IsOneByteRepresentationUnderneath();
-
- ASSERT(re_code->instruction_start() <= *return_address);
- ASSERT(*return_address <=
- re_code->instruction_start() + re_code->instruction_size());
-
- MaybeObject* result = Execution::HandleStackGuardInterrupt(isolate);
-
- if (*code_handle != re_code) { // Return address no longer valid
- int delta = code_handle->address() - re_code->address();
- // Overwrite the return address on the stack.
- *return_address += delta;
- }
-
- if (result->IsException()) {
- return EXCEPTION;
- }
-
- Handle<String> subject_tmp = subject;
- int slice_offset = 0;
-
- // Extract the underlying string and the slice offset.
- if (StringShape(*subject_tmp).IsCons()) {
- subject_tmp = Handle<String>(ConsString::cast(*subject_tmp)->first());
- } else if (StringShape(*subject_tmp).IsSliced()) {
- SlicedString* slice = SlicedString::cast(*subject_tmp);
- subject_tmp = Handle<String>(slice->parent());
- slice_offset = slice->offset();
- }
-
- // String might have changed.
- if (subject_tmp->IsOneByteRepresentation() != is_ascii) {
- // If we changed between an ASCII and an UC16 string, the specialized
- // code cannot be used, and we need to restart regexp matching from
- // scratch (including, potentially, compiling a new version of the code).
- return RETRY;
- }
-
- // Otherwise, the content of the string might have moved. It must still
- // be a sequential or external string with the same content.
- // Update the start and end pointers in the stack frame to the current
- // location (whether it has actually moved or not).
- ASSERT(StringShape(*subject_tmp).IsSequential() ||
- StringShape(*subject_tmp).IsExternal());
-
- // The original start address of the characters to match.
- const byte* start_address = frame_entry<const byte*>(re_frame, kInputStart);
-
- // Find the current start address of the same character at the current string
- // position.
- int start_index = frame_entry<int>(re_frame, kStartIndex);
- const byte* new_address = StringCharacterPosition(*subject_tmp,
- start_index + slice_offset);
-
- if (start_address != new_address) {
- // If there is a difference, update the object pointer and start and end
- // addresses in the RegExp stack frame to match the new value.
- const byte* end_address = frame_entry<const byte* >(re_frame, kInputEnd);
- int byte_length = static_cast<int>(end_address - start_address);
- frame_entry<const String*>(re_frame, kInputString) = *subject;
- frame_entry<const byte*>(re_frame, kInputStart) = new_address;
- frame_entry<const byte*>(re_frame, kInputEnd) = new_address + byte_length;
- } else if (frame_entry<const String*>(re_frame, kInputString) != *subject) {
- // Subject string might have been a ConsString that underwent
- // short-circuiting during GC. That will not change start_address but
- // will change pointer inside the subject handle.
- frame_entry<const String*>(re_frame, kInputString) = *subject;
- }
-
- return 0;
-}
-
-
-MemOperand RegExpMacroAssemblerARM::register_location(int register_index) {
- ASSERT(register_index < (1<<30));
- if (num_registers_ <= register_index) {
- num_registers_ = register_index + 1;
- }
- return MemOperand(frame_pointer(),
- kRegisterZero - register_index * kPointerSize);
-}
-
-
-void RegExpMacroAssemblerARM::CheckPosition(int cp_offset,
- Label* on_outside_input) {
- __ cmp(current_input_offset(), Operand(-cp_offset * char_size()));
- BranchOrBacktrack(ge, on_outside_input);
-}
-
-
-void RegExpMacroAssemblerARM::BranchOrBacktrack(Condition condition,
- Label* to) {
- if (condition == al) { // Unconditional.
- if (to == NULL) {
- Backtrack();
- return;
- }
- __ jmp(to);
- return;
- }
- if (to == NULL) {
- __ b(condition, &backtrack_label_);
- return;
- }
- __ b(condition, to);
-}
-
-
-void RegExpMacroAssemblerARM::SafeCall(Label* to, Condition cond) {
- __ bl(to, cond);
-}
-
-
-void RegExpMacroAssemblerARM::SafeReturn() {
- __ pop(lr);
- __ add(pc, lr, Operand(masm_->CodeObject()));
-}
-
-
-void RegExpMacroAssemblerARM::SafeCallTarget(Label* name) {
- __ bind(name);
- __ sub(lr, lr, Operand(masm_->CodeObject()));
- __ push(lr);
-}
-
-
-void RegExpMacroAssemblerARM::Push(Register source) {
- ASSERT(!source.is(backtrack_stackpointer()));
- __ str(source,
- MemOperand(backtrack_stackpointer(), kPointerSize, NegPreIndex));
-}
-
-
-void RegExpMacroAssemblerARM::Pop(Register target) {
- ASSERT(!target.is(backtrack_stackpointer()));
- __ ldr(target,
- MemOperand(backtrack_stackpointer(), kPointerSize, PostIndex));
-}
-
-
-void RegExpMacroAssemblerARM::CheckPreemption() {
- // Check for preemption.
- ExternalReference stack_limit =
- ExternalReference::address_of_stack_limit(masm_->isolate());
- __ mov(r0, Operand(stack_limit));
- __ ldr(r0, MemOperand(r0));
- __ cmp(sp, r0);
- SafeCall(&check_preempt_label_, ls);
-}
-
-
-void RegExpMacroAssemblerARM::CheckStackLimit() {
- ExternalReference stack_limit =
- ExternalReference::address_of_regexp_stack_limit(masm_->isolate());
- __ mov(r0, Operand(stack_limit));
- __ ldr(r0, MemOperand(r0));
- __ cmp(backtrack_stackpointer(), Operand(r0));
- SafeCall(&stack_overflow_label_, ls);
-}
-
-
-void RegExpMacroAssemblerARM::EmitBacktrackConstantPool() {
- __ CheckConstPool(false, false);
- Assembler::BlockConstPoolScope block_const_pool(masm_);
- backtrack_constant_pool_offset_ = masm_->pc_offset();
- for (int i = 0; i < kBacktrackConstantPoolSize; i++) {
- __ emit(0);
- }
-
- backtrack_constant_pool_capacity_ = kBacktrackConstantPoolSize;
-}
-
-
-int RegExpMacroAssemblerARM::GetBacktrackConstantPoolEntry() {
- while (backtrack_constant_pool_capacity_ > 0) {
- int offset = backtrack_constant_pool_offset_;
- backtrack_constant_pool_offset_ += kPointerSize;
- backtrack_constant_pool_capacity_--;
- if (masm_->pc_offset() - offset < 2 * KB) {
- return offset;
- }
- }
- Label new_pool_skip;
- __ jmp(&new_pool_skip);
- EmitBacktrackConstantPool();
- __ bind(&new_pool_skip);
- int offset = backtrack_constant_pool_offset_;
- backtrack_constant_pool_offset_ += kPointerSize;
- backtrack_constant_pool_capacity_--;
- return offset;
-}
-
-
-void RegExpMacroAssemblerARM::CallCFunctionUsingStub(
- ExternalReference function,
- int num_arguments) {
- // Must pass all arguments in registers. The stub pushes on the stack.
- ASSERT(num_arguments <= 4);
- __ mov(code_pointer(), Operand(function));
- RegExpCEntryStub stub;
- __ CallStub(&stub);
- if (OS::ActivationFrameAlignment() != 0) {
- __ ldr(sp, MemOperand(sp, 0));
- }
- __ mov(code_pointer(), Operand(masm_->CodeObject()));
-}
-
-
-bool RegExpMacroAssemblerARM::CanReadUnaligned() {
- return CpuFeatures::IsSupported(UNALIGNED_ACCESSES) && !slow_safe();
-}
-
-
-void RegExpMacroAssemblerARM::LoadCurrentCharacterUnchecked(int cp_offset,
- int characters) {
- Register offset = current_input_offset();
- if (cp_offset != 0) {
- // r4 is not being used to store the capture start index at this point.
- __ add(r4, current_input_offset(), Operand(cp_offset * char_size()));
- offset = r4;
- }
- // The ldr, str, ldrh, strh instructions can do unaligned accesses, if the CPU
- // and the operating system running on the target allow it.
- // If unaligned load/stores are not supported then this function must only
- // be used to load a single character at a time.
- if (!CanReadUnaligned()) {
- ASSERT(characters == 1);
- }
-
- if (mode_ == ASCII) {
- if (characters == 4) {
- __ ldr(current_character(), MemOperand(end_of_input_address(), offset));
- } else if (characters == 2) {
- __ ldrh(current_character(), MemOperand(end_of_input_address(), offset));
- } else {
- ASSERT(characters == 1);
- __ ldrb(current_character(), MemOperand(end_of_input_address(), offset));
- }
- } else {
- ASSERT(mode_ == UC16);
- if (characters == 2) {
- __ ldr(current_character(), MemOperand(end_of_input_address(), offset));
- } else {
- ASSERT(characters == 1);
- __ ldrh(current_character(), MemOperand(end_of_input_address(), offset));
- }
- }
-}
-
-
-void RegExpCEntryStub::Generate(MacroAssembler* masm_) {
- int stack_alignment = OS::ActivationFrameAlignment();
- if (stack_alignment < kPointerSize) stack_alignment = kPointerSize;
- // Stack is already aligned for call, so decrement by alignment
- // to make room for storing the link register.
- __ str(lr, MemOperand(sp, stack_alignment, NegPreIndex));
- __ mov(r0, sp);
- __ Call(r5);
- __ ldr(pc, MemOperand(sp, stack_alignment, PostIndex));
-}
-
-#undef __
-
-#endif // V8_INTERPRETED_REGEXP
-
-}} // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_ARM
diff --git a/src/3rdparty/v8/src/arm/regexp-macro-assembler-arm.h b/src/3rdparty/v8/src/arm/regexp-macro-assembler-arm.h
deleted file mode 100644
index c45669a..0000000
--- a/src/3rdparty/v8/src/arm/regexp-macro-assembler-arm.h
+++ /dev/null
@@ -1,259 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_ARM_REGEXP_MACRO_ASSEMBLER_ARM_H_
-#define V8_ARM_REGEXP_MACRO_ASSEMBLER_ARM_H_
-
-#include "arm/assembler-arm.h"
-#include "arm/assembler-arm-inl.h"
-
-namespace v8 {
-namespace internal {
-
-
-#ifndef V8_INTERPRETED_REGEXP
-class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler {
- public:
- RegExpMacroAssemblerARM(Mode mode, int registers_to_save, Zone* zone);
- virtual ~RegExpMacroAssemblerARM();
- virtual int stack_limit_slack();
- virtual void AdvanceCurrentPosition(int by);
- virtual void AdvanceRegister(int reg, int by);
- virtual void Backtrack();
- virtual void Bind(Label* label);
- virtual void CheckAtStart(Label* on_at_start);
- virtual void CheckCharacter(unsigned c, Label* on_equal);
- virtual void CheckCharacterAfterAnd(unsigned c,
- unsigned mask,
- Label* on_equal);
- virtual void CheckCharacterGT(uc16 limit, Label* on_greater);
- virtual void CheckCharacterLT(uc16 limit, Label* on_less);
- virtual void CheckCharacters(Vector<const uc16> str,
- int cp_offset,
- Label* on_failure,
- bool check_end_of_string);
- // A "greedy loop" is a loop that is both greedy and with a simple
- // body. It has a particularly simple implementation.
- virtual void CheckGreedyLoop(Label* on_tos_equals_current_position);
- virtual void CheckNotAtStart(Label* on_not_at_start);
- virtual void CheckNotBackReference(int start_reg, Label* on_no_match);
- virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
- Label* on_no_match);
- virtual void CheckNotCharacter(unsigned c, Label* on_not_equal);
- virtual void CheckNotCharacterAfterAnd(unsigned c,
- unsigned mask,
- Label* on_not_equal);
- virtual void CheckNotCharacterAfterMinusAnd(uc16 c,
- uc16 minus,
- uc16 mask,
- Label* on_not_equal);
- virtual void CheckCharacterInRange(uc16 from,
- uc16 to,
- Label* on_in_range);
- virtual void CheckCharacterNotInRange(uc16 from,
- uc16 to,
- Label* on_not_in_range);
- virtual void CheckBitInTable(Handle<ByteArray> table, Label* on_bit_set);
-
- // Checks whether the given offset from the current position is before
- // the end of the string.
- virtual void CheckPosition(int cp_offset, Label* on_outside_input);
- virtual bool CheckSpecialCharacterClass(uc16 type,
- Label* on_no_match);
- virtual void Fail();
- virtual Handle<HeapObject> GetCode(Handle<String> source);
- virtual void GoTo(Label* label);
- virtual void IfRegisterGE(int reg, int comparand, Label* if_ge);
- virtual void IfRegisterLT(int reg, int comparand, Label* if_lt);
- virtual void IfRegisterEqPos(int reg, Label* if_eq);
- virtual IrregexpImplementation Implementation();
- virtual void LoadCurrentCharacter(int cp_offset,
- Label* on_end_of_input,
- bool check_bounds = true,
- int characters = 1);
- virtual void PopCurrentPosition();
- virtual void PopRegister(int register_index);
- virtual void PushBacktrack(Label* label);
- virtual void PushCurrentPosition();
- virtual void PushRegister(int register_index,
- StackCheckFlag check_stack_limit);
- virtual void ReadCurrentPositionFromRegister(int reg);
- virtual void ReadStackPointerFromRegister(int reg);
- virtual void SetCurrentPositionFromEnd(int by);
- virtual void SetRegister(int register_index, int to);
- virtual bool Succeed();
- virtual void WriteCurrentPositionToRegister(int reg, int cp_offset);
- virtual void ClearRegisters(int reg_from, int reg_to);
- virtual void WriteStackPointerToRegister(int reg);
- virtual bool CanReadUnaligned();
-
- // Called from RegExp if the stack-guard is triggered.
- // If the code object is relocated, the return address is fixed before
- // returning.
- static int CheckStackGuardState(Address* return_address,
- Code* re_code,
- Address re_frame);
-
- private:
- // Offsets from frame_pointer() of function parameters and stored registers.
- static const int kFramePointer = 0;
-
- // Above the frame pointer - Stored registers and stack passed parameters.
- // Register 4..11.
- static const int kStoredRegisters = kFramePointer;
- // Return address (stored from link register, read into pc on return).
- static const int kReturnAddress = kStoredRegisters + 8 * kPointerSize;
- static const int kSecondaryReturnAddress = kReturnAddress + kPointerSize;
- // Stack parameters placed by caller.
- static const int kRegisterOutput = kSecondaryReturnAddress + kPointerSize;
- static const int kNumOutputRegisters = kRegisterOutput + kPointerSize;
- static const int kStackHighEnd = kNumOutputRegisters + kPointerSize;
- static const int kDirectCall = kStackHighEnd + kPointerSize;
- static const int kIsolate = kDirectCall + kPointerSize;
-
- // Below the frame pointer.
- // Register parameters stored by setup code.
- static const int kInputEnd = kFramePointer - kPointerSize;
- static const int kInputStart = kInputEnd - kPointerSize;
- static const int kStartIndex = kInputStart - kPointerSize;
- static const int kInputString = kStartIndex - kPointerSize;
- // When adding local variables remember to push space for them in
- // the frame in GetCode.
- static const int kSuccessfulCaptures = kInputString - kPointerSize;
- static const int kInputStartMinusOne = kSuccessfulCaptures - kPointerSize;
- // First register address. Following registers are below it on the stack.
- static const int kRegisterZero = kInputStartMinusOne - kPointerSize;
-
- // Initial size of code buffer.
- static const size_t kRegExpCodeSize = 1024;
-
- static const int kBacktrackConstantPoolSize = 4;
-
- // Load a number of characters at the given offset from the
- // current position, into the current-character register.
- void LoadCurrentCharacterUnchecked(int cp_offset, int character_count);
-
- // Check whether preemption has been requested.
- void CheckPreemption();
-
- // Check whether we are exceeding the stack limit on the backtrack stack.
- void CheckStackLimit();
-
- void EmitBacktrackConstantPool();
- int GetBacktrackConstantPoolEntry();
-
-
- // Generate a call to CheckStackGuardState.
- void CallCheckStackGuardState(Register scratch);
-
- // The ebp-relative location of a regexp register.
- MemOperand register_location(int register_index);
-
- // Register holding the current input position as negative offset from
- // the end of the string.
- inline Register current_input_offset() { return r6; }
-
- // The register containing the current character after LoadCurrentCharacter.
- inline Register current_character() { return r7; }
-
- // Register holding address of the end of the input string.
- inline Register end_of_input_address() { return r10; }
-
- // Register holding the frame address. Local variables, parameters and
- // regexp registers are addressed relative to this.
- inline Register frame_pointer() { return fp; }
-
- // The register containing the backtrack stack top. Provides a meaningful
- // name to the register.
- inline Register backtrack_stackpointer() { return r8; }
-
- // Register holding pointer to the current code object.
- inline Register code_pointer() { return r5; }
-
- // Byte size of chars in the string to match (decided by the Mode argument)
- inline int char_size() { return static_cast<int>(mode_); }
-
- // Equivalent to a conditional branch to the label, unless the label
- // is NULL, in which case it is a conditional Backtrack.
- void BranchOrBacktrack(Condition condition, Label* to);
-
- // Call and return internally in the generated code in a way that
- // is GC-safe (i.e., doesn't leave absolute code addresses on the stack)
- inline void SafeCall(Label* to, Condition cond = al);
- inline void SafeReturn();
- inline void SafeCallTarget(Label* name);
-
- // Pushes the value of a register on the backtrack stack. Decrements the
- // stack pointer by a word size and stores the register's value there.
- inline void Push(Register source);
-
- // Pops a value from the backtrack stack. Reads the word at the stack pointer
- // and increments it by a word size.
- inline void Pop(Register target);
-
- // Calls a C function and cleans up the frame alignment done by
- // by FrameAlign. The called function *is* allowed to trigger a garbage
- // collection, but may not take more than four arguments (no arguments
- // passed on the stack), and the first argument will be a pointer to the
- // return address.
- inline void CallCFunctionUsingStub(ExternalReference function,
- int num_arguments);
-
-
- MacroAssembler* masm_;
-
- // Which mode to generate code for (ASCII or UC16).
- Mode mode_;
-
- // One greater than maximal register index actually used.
- int num_registers_;
-
- // Number of registers to output at the end (the saved registers
- // are always 0..num_saved_registers_-1)
- int num_saved_registers_;
-
- // Manage a small pre-allocated pool for writing label targets
- // to for pushing backtrack addresses.
- int backtrack_constant_pool_offset_;
- int backtrack_constant_pool_capacity_;
-
- // Labels used internally.
- Label entry_label_;
- Label start_label_;
- Label success_label_;
- Label backtrack_label_;
- Label exit_label_;
- Label check_preempt_label_;
- Label stack_overflow_label_;
-};
-
-#endif // V8_INTERPRETED_REGEXP
-
-
-}} // namespace v8::internal
-
-#endif // V8_ARM_REGEXP_MACRO_ASSEMBLER_ARM_H_
diff --git a/src/3rdparty/v8/src/arm/simulator-arm.cc b/src/3rdparty/v8/src/arm/simulator-arm.cc
deleted file mode 100644
index b7bc839..0000000
--- a/src/3rdparty/v8/src/arm/simulator-arm.cc
+++ /dev/null
@@ -1,3475 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include <stdlib.h>
-#include <math.h>
-#include <cstdarg>
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_ARM)
-
-#include "disasm.h"
-#include "assembler.h"
-#include "codegen.h"
-#include "arm/constants-arm.h"
-#include "arm/simulator-arm.h"
-
-#if defined(USE_SIMULATOR)
-
-// Only build the simulator if not compiling for real ARM hardware.
-namespace v8 {
-namespace internal {
-
-// This macro provides a platform independent use of sscanf. The reason for
-// SScanF not being implemented in a platform independent way through
-// ::v8::internal::OS in the same way as SNPrintF is that the
-// Windows C Run-Time Library does not provide vsscanf.
-#define SScanF sscanf // NOLINT
-
-// The ArmDebugger class is used by the simulator while debugging simulated ARM
-// code.
-class ArmDebugger {
- public:
- explicit ArmDebugger(Simulator* sim) : sim_(sim) { }
- ~ArmDebugger();
-
- void Stop(Instruction* instr);
- void Debug();
-
- private:
- static const Instr kBreakpointInstr =
- (al | (7*B25) | (1*B24) | kBreakpoint);
- static const Instr kNopInstr = (al | (13*B21));
-
- Simulator* sim_;
-
- int32_t GetRegisterValue(int regnum);
- double GetRegisterPairDoubleValue(int regnum);
- double GetVFPDoubleRegisterValue(int regnum);
- bool GetValue(const char* desc, int32_t* value);
- bool GetVFPSingleValue(const char* desc, float* value);
- bool GetVFPDoubleValue(const char* desc, double* value);
-
- // Set or delete a breakpoint. Returns true if successful.
- bool SetBreakpoint(Instruction* breakpc);
- bool DeleteBreakpoint(Instruction* breakpc);
-
- // Undo and redo all breakpoints. This is needed to bracket disassembly and
- // execution to skip past breakpoints when run from the debugger.
- void UndoBreakpoints();
- void RedoBreakpoints();
-};
-
-
-ArmDebugger::~ArmDebugger() {
-}
-
-
-
-#ifdef GENERATED_CODE_COVERAGE
-static FILE* coverage_log = NULL;
-
-
-static void InitializeCoverage() {
- char* file_name = getenv("V8_GENERATED_CODE_COVERAGE_LOG");
- if (file_name != NULL) {
- coverage_log = fopen(file_name, "aw+");
- }
-}
-
-
-void ArmDebugger::Stop(Instruction* instr) {
- // Get the stop code.
- uint32_t code = instr->SvcValue() & kStopCodeMask;
- // Retrieve the encoded address, which comes just after this stop.
- char** msg_address =
- reinterpret_cast<char**>(sim_->get_pc() + Instruction::kInstrSize);
- char* msg = *msg_address;
- ASSERT(msg != NULL);
-
- // Update this stop description.
- if (isWatchedStop(code) && !watched_stops[code].desc) {
- watched_stops[code].desc = msg;
- }
-
- if (strlen(msg) > 0) {
- if (coverage_log != NULL) {
- fprintf(coverage_log, "%s\n", msg);
- fflush(coverage_log);
- }
- // Overwrite the instruction and address with nops.
- instr->SetInstructionBits(kNopInstr);
- reinterpret_cast<Instruction*>(msg_address)->SetInstructionBits(kNopInstr);
- }
- sim_->set_pc(sim_->get_pc() + 2 * Instruction::kInstrSize);
-}
-
-#else // ndef GENERATED_CODE_COVERAGE
-
-static void InitializeCoverage() {
-}
-
-
-void ArmDebugger::Stop(Instruction* instr) {
- // Get the stop code.
- uint32_t code = instr->SvcValue() & kStopCodeMask;
- // Retrieve the encoded address, which comes just after this stop.
- char* msg = *reinterpret_cast<char**>(sim_->get_pc()
- + Instruction::kInstrSize);
- // Update this stop description.
- if (sim_->isWatchedStop(code) && !sim_->watched_stops[code].desc) {
- sim_->watched_stops[code].desc = msg;
- }
- // Print the stop message and code if it is not the default code.
- if (code != kMaxStopCode) {
- PrintF("Simulator hit stop %u: %s\n", code, msg);
- } else {
- PrintF("Simulator hit %s\n", msg);
- }
- sim_->set_pc(sim_->get_pc() + 2 * Instruction::kInstrSize);
- Debug();
-}
-#endif
-
-
-int32_t ArmDebugger::GetRegisterValue(int regnum) {
- if (regnum == kPCRegister) {
- return sim_->get_pc();
- } else {
- return sim_->get_register(regnum);
- }
-}
-
-
-double ArmDebugger::GetRegisterPairDoubleValue(int regnum) {
- return sim_->get_double_from_register_pair(regnum);
-}
-
-
-double ArmDebugger::GetVFPDoubleRegisterValue(int regnum) {
- return sim_->get_double_from_d_register(regnum);
-}
-
-
-bool ArmDebugger::GetValue(const char* desc, int32_t* value) {
- int regnum = Registers::Number(desc);
- if (regnum != kNoRegister) {
- *value = GetRegisterValue(regnum);
- return true;
- } else {
- if (strncmp(desc, "0x", 2) == 0) {
- return SScanF(desc + 2, "%x", reinterpret_cast<uint32_t*>(value)) == 1;
- } else {
- return SScanF(desc, "%u", reinterpret_cast<uint32_t*>(value)) == 1;
- }
- }
- return false;
-}
-
-
-bool ArmDebugger::GetVFPSingleValue(const char* desc, float* value) {
- bool is_double;
- int regnum = VFPRegisters::Number(desc, &is_double);
- if (regnum != kNoRegister && !is_double) {
- *value = sim_->get_float_from_s_register(regnum);
- return true;
- }
- return false;
-}
-
-
-bool ArmDebugger::GetVFPDoubleValue(const char* desc, double* value) {
- bool is_double;
- int regnum = VFPRegisters::Number(desc, &is_double);
- if (regnum != kNoRegister && is_double) {
- *value = sim_->get_double_from_d_register(regnum);
- return true;
- }
- return false;
-}
-
-
-bool ArmDebugger::SetBreakpoint(Instruction* breakpc) {
- // Check if a breakpoint can be set. If not return without any side-effects.
- if (sim_->break_pc_ != NULL) {
- return false;
- }
-
- // Set the breakpoint.
- sim_->break_pc_ = breakpc;
- sim_->break_instr_ = breakpc->InstructionBits();
- // Not setting the breakpoint instruction in the code itself. It will be set
- // when the debugger shell continues.
- return true;
-}
-
-
-bool ArmDebugger::DeleteBreakpoint(Instruction* breakpc) {
- if (sim_->break_pc_ != NULL) {
- sim_->break_pc_->SetInstructionBits(sim_->break_instr_);
- }
-
- sim_->break_pc_ = NULL;
- sim_->break_instr_ = 0;
- return true;
-}
-
-
-void ArmDebugger::UndoBreakpoints() {
- if (sim_->break_pc_ != NULL) {
- sim_->break_pc_->SetInstructionBits(sim_->break_instr_);
- }
-}
-
-
-void ArmDebugger::RedoBreakpoints() {
- if (sim_->break_pc_ != NULL) {
- sim_->break_pc_->SetInstructionBits(kBreakpointInstr);
- }
-}
-
-
-void ArmDebugger::Debug() {
- intptr_t last_pc = -1;
- bool done = false;
-
-#define COMMAND_SIZE 63
-#define ARG_SIZE 255
-
-#define STR(a) #a
-#define XSTR(a) STR(a)
-
- char cmd[COMMAND_SIZE + 1];
- char arg1[ARG_SIZE + 1];
- char arg2[ARG_SIZE + 1];
- char* argv[3] = { cmd, arg1, arg2 };
-
- // make sure to have a proper terminating character if reaching the limit
- cmd[COMMAND_SIZE] = 0;
- arg1[ARG_SIZE] = 0;
- arg2[ARG_SIZE] = 0;
-
- // Undo all set breakpoints while running in the debugger shell. This will
- // make them invisible to all commands.
- UndoBreakpoints();
-
- while (!done && !sim_->has_bad_pc()) {
- if (last_pc != sim_->get_pc()) {
- disasm::NameConverter converter;
- disasm::Disassembler dasm(converter);
- // use a reasonably large buffer
- v8::internal::EmbeddedVector<char, 256> buffer;
- dasm.InstructionDecode(buffer,
- reinterpret_cast<byte*>(sim_->get_pc()));
- PrintF(" 0x%08x %s\n", sim_->get_pc(), buffer.start());
- last_pc = sim_->get_pc();
- }
- char* line = ReadLine("sim> ");
- if (line == NULL) {
- break;
- } else {
- char* last_input = sim_->last_debugger_input();
- if (strcmp(line, "\n") == 0 && last_input != NULL) {
- line = last_input;
- } else {
- // Ownership is transferred to sim_;
- sim_->set_last_debugger_input(line);
- }
- // Use sscanf to parse the individual parts of the command line. At the
- // moment no command expects more than two parameters.
- int argc = SScanF(line,
- "%" XSTR(COMMAND_SIZE) "s "
- "%" XSTR(ARG_SIZE) "s "
- "%" XSTR(ARG_SIZE) "s",
- cmd, arg1, arg2);
- if ((strcmp(cmd, "si") == 0) || (strcmp(cmd, "stepi") == 0)) {
- sim_->InstructionDecode(reinterpret_cast<Instruction*>(sim_->get_pc()));
- } else if ((strcmp(cmd, "c") == 0) || (strcmp(cmd, "cont") == 0)) {
- // Execute the one instruction we broke at with breakpoints disabled.
- sim_->InstructionDecode(reinterpret_cast<Instruction*>(sim_->get_pc()));
- // Leave the debugger shell.
- done = true;
- } else if ((strcmp(cmd, "p") == 0) || (strcmp(cmd, "print") == 0)) {
- if (argc == 2 || (argc == 3 && strcmp(arg2, "fp") == 0)) {
- int32_t value;
- float svalue;
- double dvalue;
- if (strcmp(arg1, "all") == 0) {
- for (int i = 0; i < kNumRegisters; i++) {
- value = GetRegisterValue(i);
- PrintF("%3s: 0x%08x %10d", Registers::Name(i), value, value);
- if ((argc == 3 && strcmp(arg2, "fp") == 0) &&
- i < 8 &&
- (i % 2) == 0) {
- dvalue = GetRegisterPairDoubleValue(i);
- PrintF(" (%f)\n", dvalue);
- } else {
- PrintF("\n");
- }
- }
- for (int i = 0; i < kNumVFPDoubleRegisters; i++) {
- dvalue = GetVFPDoubleRegisterValue(i);
- uint64_t as_words = BitCast<uint64_t>(dvalue);
- PrintF("%3s: %f 0x%08x %08x\n",
- VFPRegisters::Name(i, true),
- dvalue,
- static_cast<uint32_t>(as_words >> 32),
- static_cast<uint32_t>(as_words & 0xffffffff));
- }
- } else {
- if (GetValue(arg1, &value)) {
- PrintF("%s: 0x%08x %d \n", arg1, value, value);
- } else if (GetVFPSingleValue(arg1, &svalue)) {
- uint32_t as_word = BitCast<uint32_t>(svalue);
- PrintF("%s: %f 0x%08x\n", arg1, svalue, as_word);
- } else if (GetVFPDoubleValue(arg1, &dvalue)) {
- uint64_t as_words = BitCast<uint64_t>(dvalue);
- PrintF("%s: %f 0x%08x %08x\n",
- arg1,
- dvalue,
- static_cast<uint32_t>(as_words >> 32),
- static_cast<uint32_t>(as_words & 0xffffffff));
- } else {
- PrintF("%s unrecognized\n", arg1);
- }
- }
- } else {
- PrintF("print <register>\n");
- }
- } else if ((strcmp(cmd, "po") == 0)
- || (strcmp(cmd, "printobject") == 0)) {
- if (argc == 2) {
- int32_t value;
- if (GetValue(arg1, &value)) {
- Object* obj = reinterpret_cast<Object*>(value);
- PrintF("%s: \n", arg1);
-#ifdef DEBUG
- obj->PrintLn();
-#else
- obj->ShortPrint();
- PrintF("\n");
-#endif
- } else {
- PrintF("%s unrecognized\n", arg1);
- }
- } else {
- PrintF("printobject <value>\n");
- }
- } else if (strcmp(cmd, "stack") == 0 || strcmp(cmd, "mem") == 0) {
- int32_t* cur = NULL;
- int32_t* end = NULL;
- int next_arg = 1;
-
- if (strcmp(cmd, "stack") == 0) {
- cur = reinterpret_cast<int32_t*>(sim_->get_register(Simulator::sp));
- } else { // "mem"
- int32_t value;
- if (!GetValue(arg1, &value)) {
- PrintF("%s unrecognized\n", arg1);
- continue;
- }
- cur = reinterpret_cast<int32_t*>(value);
- next_arg++;
- }
-
- int32_t words;
- if (argc == next_arg) {
- words = 10;
- } else {
- if (!GetValue(argv[next_arg], &words)) {
- words = 10;
- }
- }
- end = cur + words;
-
- while (cur < end) {
- PrintF(" 0x%08x: 0x%08x %10d",
- reinterpret_cast<intptr_t>(cur), *cur, *cur);
- HeapObject* obj = reinterpret_cast<HeapObject*>(*cur);
- int value = *cur;
- Heap* current_heap = v8::internal::Isolate::Current()->heap();
- if (current_heap->Contains(obj) || ((value & 1) == 0)) {
- PrintF(" (");
- if ((value & 1) == 0) {
- PrintF("smi %d", value / 2);
- } else {
- obj->ShortPrint();
- }
- PrintF(")");
- }
- PrintF("\n");
- cur++;
- }
- } else if (strcmp(cmd, "disasm") == 0 || strcmp(cmd, "di") == 0) {
- disasm::NameConverter converter;
- disasm::Disassembler dasm(converter);
- // use a reasonably large buffer
- v8::internal::EmbeddedVector<char, 256> buffer;
-
- byte* prev = NULL;
- byte* cur = NULL;
- byte* end = NULL;
-
- if (argc == 1) {
- cur = reinterpret_cast<byte*>(sim_->get_pc());
- end = cur + (10 * Instruction::kInstrSize);
- } else if (argc == 2) {
- int regnum = Registers::Number(arg1);
- if (regnum != kNoRegister || strncmp(arg1, "0x", 2) == 0) {
- // The argument is an address or a register name.
- int32_t value;
- if (GetValue(arg1, &value)) {
- cur = reinterpret_cast<byte*>(value);
- // Disassemble 10 instructions at <arg1>.
- end = cur + (10 * Instruction::kInstrSize);
- }
- } else {
- // The argument is the number of instructions.
- int32_t value;
- if (GetValue(arg1, &value)) {
- cur = reinterpret_cast<byte*>(sim_->get_pc());
- // Disassemble <arg1> instructions.
- end = cur + (value * Instruction::kInstrSize);
- }
- }
- } else {
- int32_t value1;
- int32_t value2;
- if (GetValue(arg1, &value1) && GetValue(arg2, &value2)) {
- cur = reinterpret_cast<byte*>(value1);
- end = cur + (value2 * Instruction::kInstrSize);
- }
- }
-
- while (cur < end) {
- prev = cur;
- cur += dasm.InstructionDecode(buffer, cur);
- PrintF(" 0x%08x %s\n",
- reinterpret_cast<intptr_t>(prev), buffer.start());
- }
- } else if (strcmp(cmd, "gdb") == 0) {
- PrintF("relinquishing control to gdb\n");
- v8::internal::OS::DebugBreak();
- PrintF("regaining control from gdb\n");
- } else if (strcmp(cmd, "break") == 0) {
- if (argc == 2) {
- int32_t value;
- if (GetValue(arg1, &value)) {
- if (!SetBreakpoint(reinterpret_cast<Instruction*>(value))) {
- PrintF("setting breakpoint failed\n");
- }
- } else {
- PrintF("%s unrecognized\n", arg1);
- }
- } else {
- PrintF("break <address>\n");
- }
- } else if (strcmp(cmd, "del") == 0) {
- if (!DeleteBreakpoint(NULL)) {
- PrintF("deleting breakpoint failed\n");
- }
- } else if (strcmp(cmd, "flags") == 0) {
- PrintF("N flag: %d; ", sim_->n_flag_);
- PrintF("Z flag: %d; ", sim_->z_flag_);
- PrintF("C flag: %d; ", sim_->c_flag_);
- PrintF("V flag: %d\n", sim_->v_flag_);
- PrintF("INVALID OP flag: %d; ", sim_->inv_op_vfp_flag_);
- PrintF("DIV BY ZERO flag: %d; ", sim_->div_zero_vfp_flag_);
- PrintF("OVERFLOW flag: %d; ", sim_->overflow_vfp_flag_);
- PrintF("UNDERFLOW flag: %d; ", sim_->underflow_vfp_flag_);
- PrintF("INEXACT flag: %d;\n", sim_->inexact_vfp_flag_);
- } else if (strcmp(cmd, "stop") == 0) {
- int32_t value;
- intptr_t stop_pc = sim_->get_pc() - 2 * Instruction::kInstrSize;
- Instruction* stop_instr = reinterpret_cast<Instruction*>(stop_pc);
- Instruction* msg_address =
- reinterpret_cast<Instruction*>(stop_pc + Instruction::kInstrSize);
- if ((argc == 2) && (strcmp(arg1, "unstop") == 0)) {
- // Remove the current stop.
- if (sim_->isStopInstruction(stop_instr)) {
- stop_instr->SetInstructionBits(kNopInstr);
- msg_address->SetInstructionBits(kNopInstr);
- } else {
- PrintF("Not at debugger stop.\n");
- }
- } else if (argc == 3) {
- // Print information about all/the specified breakpoint(s).
- if (strcmp(arg1, "info") == 0) {
- if (strcmp(arg2, "all") == 0) {
- PrintF("Stop information:\n");
- for (uint32_t i = 0; i < sim_->kNumOfWatchedStops; i++) {
- sim_->PrintStopInfo(i);
- }
- } else if (GetValue(arg2, &value)) {
- sim_->PrintStopInfo(value);
- } else {
- PrintF("Unrecognized argument.\n");
- }
- } else if (strcmp(arg1, "enable") == 0) {
- // Enable all/the specified breakpoint(s).
- if (strcmp(arg2, "all") == 0) {
- for (uint32_t i = 0; i < sim_->kNumOfWatchedStops; i++) {
- sim_->EnableStop(i);
- }
- } else if (GetValue(arg2, &value)) {
- sim_->EnableStop(value);
- } else {
- PrintF("Unrecognized argument.\n");
- }
- } else if (strcmp(arg1, "disable") == 0) {
- // Disable all/the specified breakpoint(s).
- if (strcmp(arg2, "all") == 0) {
- for (uint32_t i = 0; i < sim_->kNumOfWatchedStops; i++) {
- sim_->DisableStop(i);
- }
- } else if (GetValue(arg2, &value)) {
- sim_->DisableStop(value);
- } else {
- PrintF("Unrecognized argument.\n");
- }
- }
- } else {
- PrintF("Wrong usage. Use help command for more information.\n");
- }
- } else if ((strcmp(cmd, "t") == 0) || strcmp(cmd, "trace") == 0) {
- ::v8::internal::FLAG_trace_sim = !::v8::internal::FLAG_trace_sim;
- PrintF("Trace of executed instructions is %s\n",
- ::v8::internal::FLAG_trace_sim ? "on" : "off");
- } else if ((strcmp(cmd, "h") == 0) || (strcmp(cmd, "help") == 0)) {
- PrintF("cont\n");
- PrintF(" continue execution (alias 'c')\n");
- PrintF("stepi\n");
- PrintF(" step one instruction (alias 'si')\n");
- PrintF("print <register>\n");
- PrintF(" print register content (alias 'p')\n");
- PrintF(" use register name 'all' to print all registers\n");
- PrintF(" add argument 'fp' to print register pair double values\n");
- PrintF("printobject <register>\n");
- PrintF(" print an object from a register (alias 'po')\n");
- PrintF("flags\n");
- PrintF(" print flags\n");
- PrintF("stack [<words>]\n");
- PrintF(" dump stack content, default dump 10 words)\n");
- PrintF("mem <address> [<words>]\n");
- PrintF(" dump memory content, default dump 10 words)\n");
- PrintF("disasm [<instructions>]\n");
- PrintF("disasm [<address/register>]\n");
- PrintF("disasm [[<address/register>] <instructions>]\n");
- PrintF(" disassemble code, default is 10 instructions\n");
- PrintF(" from pc (alias 'di')\n");
- PrintF("gdb\n");
- PrintF(" enter gdb\n");
- PrintF("break <address>\n");
- PrintF(" set a break point on the address\n");
- PrintF("del\n");
- PrintF(" delete the breakpoint\n");
- PrintF("trace (alias 't')\n");
- PrintF(" toogle the tracing of all executed statements\n");
- PrintF("stop feature:\n");
- PrintF(" Description:\n");
- PrintF(" Stops are debug instructions inserted by\n");
- PrintF(" the Assembler::stop() function.\n");
- PrintF(" When hitting a stop, the Simulator will\n");
- PrintF(" stop and and give control to the ArmDebugger.\n");
- PrintF(" The first %d stop codes are watched:\n",
- Simulator::kNumOfWatchedStops);
- PrintF(" - They can be enabled / disabled: the Simulator\n");
- PrintF(" will / won't stop when hitting them.\n");
- PrintF(" - The Simulator keeps track of how many times they \n");
- PrintF(" are met. (See the info command.) Going over a\n");
- PrintF(" disabled stop still increases its counter. \n");
- PrintF(" Commands:\n");
- PrintF(" stop info all/<code> : print infos about number <code>\n");
- PrintF(" or all stop(s).\n");
- PrintF(" stop enable/disable all/<code> : enables / disables\n");
- PrintF(" all or number <code> stop(s)\n");
- PrintF(" stop unstop\n");
- PrintF(" ignore the stop instruction at the current location\n");
- PrintF(" from now on\n");
- } else {
- PrintF("Unknown command: %s\n", cmd);
- }
- }
- }
-
- // Add all the breakpoints back to stop execution and enter the debugger
- // shell when hit.
- RedoBreakpoints();
-
-#undef COMMAND_SIZE
-#undef ARG_SIZE
-
-#undef STR
-#undef XSTR
-}
-
-
-static bool ICacheMatch(void* one, void* two) {
- ASSERT((reinterpret_cast<intptr_t>(one) & CachePage::kPageMask) == 0);
- ASSERT((reinterpret_cast<intptr_t>(two) & CachePage::kPageMask) == 0);
- return one == two;
-}
-
-
-static uint32_t ICacheHash(void* key) {
- return static_cast<uint32_t>(reinterpret_cast<uintptr_t>(key)) >> 2;
-}
-
-
-static bool AllOnOnePage(uintptr_t start, int size) {
- intptr_t start_page = (start & ~CachePage::kPageMask);
- intptr_t end_page = ((start + size) & ~CachePage::kPageMask);
- return start_page == end_page;
-}
-
-
-void Simulator::set_last_debugger_input(char* input) {
- DeleteArray(last_debugger_input_);
- last_debugger_input_ = input;
-}
-
-
-void Simulator::FlushICache(v8::internal::HashMap* i_cache,
- void* start_addr,
- size_t size) {
- intptr_t start = reinterpret_cast<intptr_t>(start_addr);
- int intra_line = (start & CachePage::kLineMask);
- start -= intra_line;
- size += intra_line;
- size = ((size - 1) | CachePage::kLineMask) + 1;
- int offset = (start & CachePage::kPageMask);
- while (!AllOnOnePage(start, size - 1)) {
- int bytes_to_flush = CachePage::kPageSize - offset;
- FlushOnePage(i_cache, start, bytes_to_flush);
- start += bytes_to_flush;
- size -= bytes_to_flush;
- ASSERT_EQ(0, start & CachePage::kPageMask);
- offset = 0;
- }
- if (size != 0) {
- FlushOnePage(i_cache, start, size);
- }
-}
-
-
-CachePage* Simulator::GetCachePage(v8::internal::HashMap* i_cache, void* page) {
- v8::internal::HashMap::Entry* entry = i_cache->Lookup(page,
- ICacheHash(page),
- true);
- if (entry->value == NULL) {
- CachePage* new_page = new CachePage();
- entry->value = new_page;
- }
- return reinterpret_cast<CachePage*>(entry->value);
-}
-
-
-// Flush from start up to and not including start + size.
-void Simulator::FlushOnePage(v8::internal::HashMap* i_cache,
- intptr_t start,
- int size) {
- ASSERT(size <= CachePage::kPageSize);
- ASSERT(AllOnOnePage(start, size - 1));
- ASSERT((start & CachePage::kLineMask) == 0);
- ASSERT((size & CachePage::kLineMask) == 0);
- void* page = reinterpret_cast<void*>(start & (~CachePage::kPageMask));
- int offset = (start & CachePage::kPageMask);
- CachePage* cache_page = GetCachePage(i_cache, page);
- char* valid_bytemap = cache_page->ValidityByte(offset);
- memset(valid_bytemap, CachePage::LINE_INVALID, size >> CachePage::kLineShift);
-}
-
-
-void Simulator::CheckICache(v8::internal::HashMap* i_cache,
- Instruction* instr) {
- intptr_t address = reinterpret_cast<intptr_t>(instr);
- void* page = reinterpret_cast<void*>(address & (~CachePage::kPageMask));
- void* line = reinterpret_cast<void*>(address & (~CachePage::kLineMask));
- int offset = (address & CachePage::kPageMask);
- CachePage* cache_page = GetCachePage(i_cache, page);
- char* cache_valid_byte = cache_page->ValidityByte(offset);
- bool cache_hit = (*cache_valid_byte == CachePage::LINE_VALID);
- char* cached_line = cache_page->CachedData(offset & ~CachePage::kLineMask);
- if (cache_hit) {
- // Check that the data in memory matches the contents of the I-cache.
- CHECK(memcmp(reinterpret_cast<void*>(instr),
- cache_page->CachedData(offset),
- Instruction::kInstrSize) == 0);
- } else {
- // Cache miss. Load memory into the cache.
- memcpy(cached_line, line, CachePage::kLineLength);
- *cache_valid_byte = CachePage::LINE_VALID;
- }
-}
-
-
-void Simulator::Initialize(Isolate* isolate) {
- if (isolate->simulator_initialized()) return;
- isolate->set_simulator_initialized(true);
- ::v8::internal::ExternalReference::set_redirector(isolate,
- &RedirectExternalReference);
-}
-
-
-Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
- i_cache_ = isolate_->simulator_i_cache();
- if (i_cache_ == NULL) {
- i_cache_ = new v8::internal::HashMap(&ICacheMatch);
- isolate_->set_simulator_i_cache(i_cache_);
- }
- Initialize(isolate);
- // Set up simulator support first. Some of this information is needed to
- // setup the architecture state.
- size_t stack_size = 1 * 1024*1024; // allocate 1MB for stack
- stack_ = reinterpret_cast<char*>(malloc(stack_size));
- pc_modified_ = false;
- icount_ = 0;
- break_pc_ = NULL;
- break_instr_ = 0;
-
- // Set up architecture state.
- // All registers are initialized to zero to start with.
- for (int i = 0; i < num_registers; i++) {
- registers_[i] = 0;
- }
- n_flag_ = false;
- z_flag_ = false;
- c_flag_ = false;
- v_flag_ = false;
-
- // Initializing VFP registers.
- // All registers are initialized to zero to start with
- // even though s_registers_ & d_registers_ share the same
- // physical registers in the target.
- for (int i = 0; i < num_d_registers * 2; i++) {
- vfp_registers_[i] = 0;
- }
- n_flag_FPSCR_ = false;
- z_flag_FPSCR_ = false;
- c_flag_FPSCR_ = false;
- v_flag_FPSCR_ = false;
- FPSCR_rounding_mode_ = RZ;
-
- inv_op_vfp_flag_ = false;
- div_zero_vfp_flag_ = false;
- overflow_vfp_flag_ = false;
- underflow_vfp_flag_ = false;
- inexact_vfp_flag_ = false;
-
- // The sp is initialized to point to the bottom (high address) of the
- // allocated stack area. To be safe in potential stack underflows we leave
- // some buffer below.
- registers_[sp] = reinterpret_cast<int32_t>(stack_) + stack_size - 64;
- // The lr and pc are initialized to a known bad value that will cause an
- // access violation if the simulator ever tries to execute it.
- registers_[pc] = bad_lr;
- registers_[lr] = bad_lr;
- InitializeCoverage();
-
- last_debugger_input_ = NULL;
-}
-
-
-// When the generated code calls an external reference we need to catch that in
-// the simulator. The external reference will be a function compiled for the
-// host architecture. We need to call that function instead of trying to
-// execute it with the simulator. We do that by redirecting the external
-// reference to a svc (Supervisor Call) instruction that is handled by
-// the simulator. We write the original destination of the jump just at a known
-// offset from the svc instruction so the simulator knows what to call.
-class Redirection {
- public:
- Redirection(void* external_function, ExternalReference::Type type)
- : external_function_(external_function),
- swi_instruction_(al | (0xf*B24) | kCallRtRedirected),
- type_(type),
- next_(NULL) {
- Isolate* isolate = Isolate::Current();
- next_ = isolate->simulator_redirection();
- Simulator::current(isolate)->
- FlushICache(isolate->simulator_i_cache(),
- reinterpret_cast<void*>(&swi_instruction_),
- Instruction::kInstrSize);
- isolate->set_simulator_redirection(this);
- }
-
- void* address_of_swi_instruction() {
- return reinterpret_cast<void*>(&swi_instruction_);
- }
-
- void* external_function() { return external_function_; }
- ExternalReference::Type type() { return type_; }
-
- static Redirection* Get(void* external_function,
- ExternalReference::Type type) {
- Isolate* isolate = Isolate::Current();
- Redirection* current = isolate->simulator_redirection();
- for (; current != NULL; current = current->next_) {
- if (current->external_function_ == external_function) return current;
- }
- return new Redirection(external_function, type);
- }
-
- static Redirection* FromSwiInstruction(Instruction* swi_instruction) {
- char* addr_of_swi = reinterpret_cast<char*>(swi_instruction);
- char* addr_of_redirection =
- addr_of_swi - OFFSET_OF(Redirection, swi_instruction_);
- return reinterpret_cast<Redirection*>(addr_of_redirection);
- }
-
- private:
- void* external_function_;
- uint32_t swi_instruction_;
- ExternalReference::Type type_;
- Redirection* next_;
-};
-
-
-void* Simulator::RedirectExternalReference(void* external_function,
- ExternalReference::Type type) {
- Redirection* redirection = Redirection::Get(external_function, type);
- return redirection->address_of_swi_instruction();
-}
-
-
-// Get the active Simulator for the current thread.
-Simulator* Simulator::current(Isolate* isolate) {
- v8::internal::Isolate::PerIsolateThreadData* isolate_data =
- isolate->FindOrAllocatePerThreadDataForThisThread();
- ASSERT(isolate_data != NULL);
-
- Simulator* sim = isolate_data->simulator();
- if (sim == NULL) {
- // TODO(146): delete the simulator object when a thread/isolate goes away.
- sim = new Simulator(isolate);
- isolate_data->set_simulator(sim);
- }
- return sim;
-}
-
-
-// Sets the register in the architecture state. It will also deal with updating
-// Simulator internal state for special registers such as PC.
-void Simulator::set_register(int reg, int32_t value) {
- ASSERT((reg >= 0) && (reg < num_registers));
- if (reg == pc) {
- pc_modified_ = true;
- }
- registers_[reg] = value;
-}
-
-
-// Get the register from the architecture state. This function does handle
-// the special case of accessing the PC register.
-int32_t Simulator::get_register(int reg) const {
- ASSERT((reg >= 0) && (reg < num_registers));
- // Stupid code added to avoid bug in GCC.
- // See: http://gcc.gnu.org/bugzilla/show_bug.cgi?id=43949
- if (reg >= num_registers) return 0;
- // End stupid code.
- return registers_[reg] + ((reg == pc) ? Instruction::kPCReadOffset : 0);
-}
-
-
-double Simulator::get_double_from_register_pair(int reg) {
- ASSERT((reg >= 0) && (reg < num_registers) && ((reg % 2) == 0));
-
- double dm_val = 0.0;
- // Read the bits from the unsigned integer register_[] array
- // into the double precision floating point value and return it.
- char buffer[2 * sizeof(vfp_registers_[0])];
- memcpy(buffer, &registers_[reg], 2 * sizeof(registers_[0]));
- memcpy(&dm_val, buffer, 2 * sizeof(registers_[0]));
- return(dm_val);
-}
-
-
-void Simulator::set_dw_register(int dreg, const int* dbl) {
- ASSERT((dreg >= 0) && (dreg < num_d_registers));
- registers_[dreg] = dbl[0];
- registers_[dreg + 1] = dbl[1];
-}
-
-
-// Raw access to the PC register.
-void Simulator::set_pc(int32_t value) {
- pc_modified_ = true;
- registers_[pc] = value;
-}
-
-
-bool Simulator::has_bad_pc() const {
- return ((registers_[pc] == bad_lr) || (registers_[pc] == end_sim_pc));
-}
-
-
-// Raw access to the PC register without the special adjustment when reading.
-int32_t Simulator::get_pc() const {
- return registers_[pc];
-}
-
-
-// Getting from and setting into VFP registers.
-void Simulator::set_s_register(int sreg, unsigned int value) {
- ASSERT((sreg >= 0) && (sreg < num_s_registers));
- vfp_registers_[sreg] = value;
-}
-
-
-unsigned int Simulator::get_s_register(int sreg) const {
- ASSERT((sreg >= 0) && (sreg < num_s_registers));
- return vfp_registers_[sreg];
-}
-
-
-template<class InputType, int register_size>
-void Simulator::SetVFPRegister(int reg_index, const InputType& value) {
- ASSERT(reg_index >= 0);
- if (register_size == 1) ASSERT(reg_index < num_s_registers);
- if (register_size == 2) ASSERT(reg_index < DwVfpRegister::NumRegisters());
-
- char buffer[register_size * sizeof(vfp_registers_[0])];
- memcpy(buffer, &value, register_size * sizeof(vfp_registers_[0]));
- memcpy(&vfp_registers_[reg_index * register_size], buffer,
- register_size * sizeof(vfp_registers_[0]));
-}
-
-
-template<class ReturnType, int register_size>
-ReturnType Simulator::GetFromVFPRegister(int reg_index) {
- ASSERT(reg_index >= 0);
- if (register_size == 1) ASSERT(reg_index < num_s_registers);
- if (register_size == 2) ASSERT(reg_index < DwVfpRegister::NumRegisters());
-
- ReturnType value = 0;
- char buffer[register_size * sizeof(vfp_registers_[0])];
- memcpy(buffer, &vfp_registers_[register_size * reg_index],
- register_size * sizeof(vfp_registers_[0]));
- memcpy(&value, buffer, register_size * sizeof(vfp_registers_[0]));
- return value;
-}
-
-
-// For use in calls that take two double values, constructed either
-// from r0-r3 or d0 and d1.
-void Simulator::GetFpArgs(double* x, double* y) {
- if (use_eabi_hardfloat()) {
- *x = vfp_registers_[0];
- *y = vfp_registers_[1];
- } else {
- // We use a char buffer to get around the strict-aliasing rules which
- // otherwise allow the compiler to optimize away the copy.
- char buffer[sizeof(*x)];
- // Registers 0 and 1 -> x.
- memcpy(buffer, registers_, sizeof(*x));
- memcpy(x, buffer, sizeof(*x));
- // Registers 2 and 3 -> y.
- memcpy(buffer, registers_ + 2, sizeof(*y));
- memcpy(y, buffer, sizeof(*y));
- }
-}
-
-// For use in calls that take one double value, constructed either
-// from r0 and r1 or d0.
-void Simulator::GetFpArgs(double* x) {
- if (use_eabi_hardfloat()) {
- *x = vfp_registers_[0];
- } else {
- // We use a char buffer to get around the strict-aliasing rules which
- // otherwise allow the compiler to optimize away the copy.
- char buffer[sizeof(*x)];
- // Registers 0 and 1 -> x.
- memcpy(buffer, registers_, sizeof(*x));
- memcpy(x, buffer, sizeof(*x));
- }
-}
-
-
-// For use in calls that take one double value constructed either
-// from r0 and r1 or d0 and one integer value.
-void Simulator::GetFpArgs(double* x, int32_t* y) {
- if (use_eabi_hardfloat()) {
- *x = vfp_registers_[0];
- *y = registers_[1];
- } else {
- // We use a char buffer to get around the strict-aliasing rules which
- // otherwise allow the compiler to optimize away the copy.
- char buffer[sizeof(*x)];
- // Registers 0 and 1 -> x.
- memcpy(buffer, registers_, sizeof(*x));
- memcpy(x, buffer, sizeof(*x));
- // Register 2 -> y.
- memcpy(buffer, registers_ + 2, sizeof(*y));
- memcpy(y, buffer, sizeof(*y));
- }
-}
-
-
-// The return value is either in r0/r1 or d0.
-void Simulator::SetFpResult(const double& result) {
- if (use_eabi_hardfloat()) {
- char buffer[2 * sizeof(vfp_registers_[0])];
- memcpy(buffer, &result, sizeof(buffer));
- // Copy result to d0.
- memcpy(vfp_registers_, buffer, sizeof(buffer));
- } else {
- char buffer[2 * sizeof(registers_[0])];
- memcpy(buffer, &result, sizeof(buffer));
- // Copy result to r0 and r1.
- memcpy(registers_, buffer, sizeof(buffer));
- }
-}
-
-
-void Simulator::TrashCallerSaveRegisters() {
- // We don't trash the registers with the return value.
- registers_[2] = 0x50Bad4U;
- registers_[3] = 0x50Bad4U;
- registers_[12] = 0x50Bad4U;
-}
-
-// Some Operating Systems allow unaligned access on ARMv7 targets. We
-// assume that unaligned accesses are not allowed unless the v8 build system
-// defines the CAN_USE_UNALIGNED_ACCESSES macro to be non-zero.
-// The following statements below describes the behavior of the ARM CPUs
-// that don't support unaligned access.
-// Some ARM platforms raise an interrupt on detecting unaligned access.
-// On others it does a funky rotation thing. For now we
-// simply disallow unaligned reads. Note that simulator runs have the runtime
-// system running directly on the host system and only generated code is
-// executed in the simulator. Since the host is typically IA32 we will not
-// get the correct ARM-like behaviour on unaligned accesses for those ARM
-// targets that don't support unaligned loads and stores.
-
-
-int Simulator::ReadW(int32_t addr, Instruction* instr) {
- if (FLAG_enable_unaligned_accesses || (addr & 3) == 0) {
- intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
- return *ptr;
- } else {
- PrintF("Unaligned read at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
- addr,
- reinterpret_cast<intptr_t>(instr));
- UNIMPLEMENTED();
- return 0;
- }
-}
-
-
-void Simulator::WriteW(int32_t addr, int value, Instruction* instr) {
- if (FLAG_enable_unaligned_accesses || (addr & 3) == 0) {
- intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
- *ptr = value;
- } else {
- PrintF("Unaligned write at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
- addr,
- reinterpret_cast<intptr_t>(instr));
- UNIMPLEMENTED();
- }
-}
-
-
-uint16_t Simulator::ReadHU(int32_t addr, Instruction* instr) {
- if (FLAG_enable_unaligned_accesses || (addr & 1) == 0) {
- uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
- return *ptr;
- } else {
- PrintF("Unaligned unsigned halfword read at 0x%08x, pc=0x%08"
- V8PRIxPTR "\n",
- addr,
- reinterpret_cast<intptr_t>(instr));
- UNIMPLEMENTED();
- return 0;
- }
-}
-
-
-int16_t Simulator::ReadH(int32_t addr, Instruction* instr) {
- if (FLAG_enable_unaligned_accesses || (addr & 1) == 0) {
- int16_t* ptr = reinterpret_cast<int16_t*>(addr);
- return *ptr;
- } else {
- PrintF("Unaligned signed halfword read at 0x%08x\n", addr);
- UNIMPLEMENTED();
- return 0;
- }
-}
-
-
-void Simulator::WriteH(int32_t addr, uint16_t value, Instruction* instr) {
- if (FLAG_enable_unaligned_accesses || (addr & 1) == 0) {
- uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
- *ptr = value;
- } else {
- PrintF("Unaligned unsigned halfword write at 0x%08x, pc=0x%08"
- V8PRIxPTR "\n",
- addr,
- reinterpret_cast<intptr_t>(instr));
- UNIMPLEMENTED();
- }
-}
-
-
-void Simulator::WriteH(int32_t addr, int16_t value, Instruction* instr) {
- if (FLAG_enable_unaligned_accesses || (addr & 1) == 0) {
- int16_t* ptr = reinterpret_cast<int16_t*>(addr);
- *ptr = value;
- } else {
- PrintF("Unaligned halfword write at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
- addr,
- reinterpret_cast<intptr_t>(instr));
- UNIMPLEMENTED();
- }
-}
-
-
-uint8_t Simulator::ReadBU(int32_t addr) {
- uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
- return *ptr;
-}
-
-
-int8_t Simulator::ReadB(int32_t addr) {
- int8_t* ptr = reinterpret_cast<int8_t*>(addr);
- return *ptr;
-}
-
-
-void Simulator::WriteB(int32_t addr, uint8_t value) {
- uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
- *ptr = value;
-}
-
-
-void Simulator::WriteB(int32_t addr, int8_t value) {
- int8_t* ptr = reinterpret_cast<int8_t*>(addr);
- *ptr = value;
-}
-
-
-int32_t* Simulator::ReadDW(int32_t addr) {
- if (FLAG_enable_unaligned_accesses || (addr & 3) == 0) {
- int32_t* ptr = reinterpret_cast<int32_t*>(addr);
- return ptr;
- } else {
- PrintF("Unaligned read at 0x%08x\n", addr);
- UNIMPLEMENTED();
- return 0;
- }
-}
-
-
-void Simulator::WriteDW(int32_t addr, int32_t value1, int32_t value2) {
- if (FLAG_enable_unaligned_accesses || (addr & 3) == 0) {
- int32_t* ptr = reinterpret_cast<int32_t*>(addr);
- *ptr++ = value1;
- *ptr = value2;
- } else {
- PrintF("Unaligned write at 0x%08x\n", addr);
- UNIMPLEMENTED();
- }
-}
-
-
-// Returns the limit of the stack area to enable checking for stack overflows.
-uintptr_t Simulator::StackLimit() const {
- // Leave a safety margin of 1024 bytes to prevent overrunning the stack when
- // pushing values.
- return reinterpret_cast<uintptr_t>(stack_) + 1024;
-}
-
-
-// Unsupported instructions use Format to print an error and stop execution.
-void Simulator::Format(Instruction* instr, const char* format) {
- PrintF("Simulator found unsupported instruction:\n 0x%08x: %s\n",
- reinterpret_cast<intptr_t>(instr), format);
- UNIMPLEMENTED();
-}
-
-
-// Checks if the current instruction should be executed based on its
-// condition bits.
-bool Simulator::ConditionallyExecute(Instruction* instr) {
- switch (instr->ConditionField()) {
- case eq: return z_flag_;
- case ne: return !z_flag_;
- case cs: return c_flag_;
- case cc: return !c_flag_;
- case mi: return n_flag_;
- case pl: return !n_flag_;
- case vs: return v_flag_;
- case vc: return !v_flag_;
- case hi: return c_flag_ && !z_flag_;
- case ls: return !c_flag_ || z_flag_;
- case ge: return n_flag_ == v_flag_;
- case lt: return n_flag_ != v_flag_;
- case gt: return !z_flag_ && (n_flag_ == v_flag_);
- case le: return z_flag_ || (n_flag_ != v_flag_);
- case al: return true;
- default: UNREACHABLE();
- }
- return false;
-}
-
-
-// Calculate and set the Negative and Zero flags.
-void Simulator::SetNZFlags(int32_t val) {
- n_flag_ = (val < 0);
- z_flag_ = (val == 0);
-}
-
-
-// Set the Carry flag.
-void Simulator::SetCFlag(bool val) {
- c_flag_ = val;
-}
-
-
-// Set the oVerflow flag.
-void Simulator::SetVFlag(bool val) {
- v_flag_ = val;
-}
-
-
-// Calculate C flag value for additions.
-bool Simulator::CarryFrom(int32_t left, int32_t right, int32_t carry) {
- uint32_t uleft = static_cast<uint32_t>(left);
- uint32_t uright = static_cast<uint32_t>(right);
- uint32_t urest = 0xffffffffU - uleft;
-
- return (uright > urest) ||
- (carry && (((uright + 1) > urest) || (uright > (urest - 1))));
-}
-
-
-// Calculate C flag value for subtractions.
-bool Simulator::BorrowFrom(int32_t left, int32_t right) {
- uint32_t uleft = static_cast<uint32_t>(left);
- uint32_t uright = static_cast<uint32_t>(right);
-
- return (uright > uleft);
-}
-
-
-// Calculate V flag value for additions and subtractions.
-bool Simulator::OverflowFrom(int32_t alu_out,
- int32_t left, int32_t right, bool addition) {
- bool overflow;
- if (addition) {
- // operands have the same sign
- overflow = ((left >= 0 && right >= 0) || (left < 0 && right < 0))
- // and operands and result have different sign
- && ((left < 0 && alu_out >= 0) || (left >= 0 && alu_out < 0));
- } else {
- // operands have different signs
- overflow = ((left < 0 && right >= 0) || (left >= 0 && right < 0))
- // and first operand and result have different signs
- && ((left < 0 && alu_out >= 0) || (left >= 0 && alu_out < 0));
- }
- return overflow;
-}
-
-
-// Support for VFP comparisons.
-void Simulator::Compute_FPSCR_Flags(double val1, double val2) {
- if (isnan(val1) || isnan(val2)) {
- n_flag_FPSCR_ = false;
- z_flag_FPSCR_ = false;
- c_flag_FPSCR_ = true;
- v_flag_FPSCR_ = true;
- // All non-NaN cases.
- } else if (val1 == val2) {
- n_flag_FPSCR_ = false;
- z_flag_FPSCR_ = true;
- c_flag_FPSCR_ = true;
- v_flag_FPSCR_ = false;
- } else if (val1 < val2) {
- n_flag_FPSCR_ = true;
- z_flag_FPSCR_ = false;
- c_flag_FPSCR_ = false;
- v_flag_FPSCR_ = false;
- } else {
- // Case when (val1 > val2).
- n_flag_FPSCR_ = false;
- z_flag_FPSCR_ = false;
- c_flag_FPSCR_ = true;
- v_flag_FPSCR_ = false;
- }
-}
-
-
-void Simulator::Copy_FPSCR_to_APSR() {
- n_flag_ = n_flag_FPSCR_;
- z_flag_ = z_flag_FPSCR_;
- c_flag_ = c_flag_FPSCR_;
- v_flag_ = v_flag_FPSCR_;
-}
-
-
-// Addressing Mode 1 - Data-processing operands:
-// Get the value based on the shifter_operand with register.
-int32_t Simulator::GetShiftRm(Instruction* instr, bool* carry_out) {
- ShiftOp shift = instr->ShiftField();
- int shift_amount = instr->ShiftAmountValue();
- int32_t result = get_register(instr->RmValue());
- if (instr->Bit(4) == 0) {
- // by immediate
- if ((shift == ROR) && (shift_amount == 0)) {
- UNIMPLEMENTED();
- return result;
- } else if (((shift == LSR) || (shift == ASR)) && (shift_amount == 0)) {
- shift_amount = 32;
- }
- switch (shift) {
- case ASR: {
- if (shift_amount == 0) {
- if (result < 0) {
- result = 0xffffffff;
- *carry_out = true;
- } else {
- result = 0;
- *carry_out = false;
- }
- } else {
- result >>= (shift_amount - 1);
- *carry_out = (result & 1) == 1;
- result >>= 1;
- }
- break;
- }
-
- case LSL: {
- if (shift_amount == 0) {
- *carry_out = c_flag_;
- } else {
- result <<= (shift_amount - 1);
- *carry_out = (result < 0);
- result <<= 1;
- }
- break;
- }
-
- case LSR: {
- if (shift_amount == 0) {
- result = 0;
- *carry_out = c_flag_;
- } else {
- uint32_t uresult = static_cast<uint32_t>(result);
- uresult >>= (shift_amount - 1);
- *carry_out = (uresult & 1) == 1;
- uresult >>= 1;
- result = static_cast<int32_t>(uresult);
- }
- break;
- }
-
- case ROR: {
- if (shift_amount == 0) {
- *carry_out = c_flag_;
- } else {
- uint32_t left = static_cast<uint32_t>(result) >> shift_amount;
- uint32_t right = static_cast<uint32_t>(result) << (32 - shift_amount);
- result = right | left;
- *carry_out = (static_cast<uint32_t>(result) >> 31) != 0;
- }
- break;
- }
-
- default: {
- UNREACHABLE();
- break;
- }
- }
- } else {
- // by register
- int rs = instr->RsValue();
- shift_amount = get_register(rs) &0xff;
- switch (shift) {
- case ASR: {
- if (shift_amount == 0) {
- *carry_out = c_flag_;
- } else if (shift_amount < 32) {
- result >>= (shift_amount - 1);
- *carry_out = (result & 1) == 1;
- result >>= 1;
- } else {
- ASSERT(shift_amount >= 32);
- if (result < 0) {
- *carry_out = true;
- result = 0xffffffff;
- } else {
- *carry_out = false;
- result = 0;
- }
- }
- break;
- }
-
- case LSL: {
- if (shift_amount == 0) {
- *carry_out = c_flag_;
- } else if (shift_amount < 32) {
- result <<= (shift_amount - 1);
- *carry_out = (result < 0);
- result <<= 1;
- } else if (shift_amount == 32) {
- *carry_out = (result & 1) == 1;
- result = 0;
- } else {
- ASSERT(shift_amount > 32);
- *carry_out = false;
- result = 0;
- }
- break;
- }
-
- case LSR: {
- if (shift_amount == 0) {
- *carry_out = c_flag_;
- } else if (shift_amount < 32) {
- uint32_t uresult = static_cast<uint32_t>(result);
- uresult >>= (shift_amount - 1);
- *carry_out = (uresult & 1) == 1;
- uresult >>= 1;
- result = static_cast<int32_t>(uresult);
- } else if (shift_amount == 32) {
- *carry_out = (result < 0);
- result = 0;
- } else {
- *carry_out = false;
- result = 0;
- }
- break;
- }
-
- case ROR: {
- if (shift_amount == 0) {
- *carry_out = c_flag_;
- } else {
- uint32_t left = static_cast<uint32_t>(result) >> shift_amount;
- uint32_t right = static_cast<uint32_t>(result) << (32 - shift_amount);
- result = right | left;
- *carry_out = (static_cast<uint32_t>(result) >> 31) != 0;
- }
- break;
- }
-
- default: {
- UNREACHABLE();
- break;
- }
- }
- }
- return result;
-}
-
-
-// Addressing Mode 1 - Data-processing operands:
-// Get the value based on the shifter_operand with immediate.
-int32_t Simulator::GetImm(Instruction* instr, bool* carry_out) {
- int rotate = instr->RotateValue() * 2;
- int immed8 = instr->Immed8Value();
- int imm = (immed8 >> rotate) | (immed8 << (32 - rotate));
- *carry_out = (rotate == 0) ? c_flag_ : (imm < 0);
- return imm;
-}
-
-
-static int count_bits(int bit_vector) {
- int count = 0;
- while (bit_vector != 0) {
- if ((bit_vector & 1) != 0) {
- count++;
- }
- bit_vector >>= 1;
- }
- return count;
-}
-
-
-void Simulator::ProcessPUW(Instruction* instr,
- int num_regs,
- int reg_size,
- intptr_t* start_address,
- intptr_t* end_address) {
- int rn = instr->RnValue();
- int32_t rn_val = get_register(rn);
- switch (instr->PUField()) {
- case da_x: {
- UNIMPLEMENTED();
- break;
- }
- case ia_x: {
- *start_address = rn_val;
- *end_address = rn_val + (num_regs * reg_size) - reg_size;
- rn_val = rn_val + (num_regs * reg_size);
- break;
- }
- case db_x: {
- *start_address = rn_val - (num_regs * reg_size);
- *end_address = rn_val - reg_size;
- rn_val = *start_address;
- break;
- }
- case ib_x: {
- *start_address = rn_val + reg_size;
- *end_address = rn_val + (num_regs * reg_size);
- rn_val = *end_address;
- break;
- }
- default: {
- UNREACHABLE();
- break;
- }
- }
- if (instr->HasW()) {
- set_register(rn, rn_val);
- }
-}
-
-// Addressing Mode 4 - Load and Store Multiple
-void Simulator::HandleRList(Instruction* instr, bool load) {
- int rlist = instr->RlistValue();
- int num_regs = count_bits(rlist);
-
- intptr_t start_address = 0;
- intptr_t end_address = 0;
- ProcessPUW(instr, num_regs, kPointerSize, &start_address, &end_address);
-
- intptr_t* address = reinterpret_cast<intptr_t*>(start_address);
- // Catch null pointers a little earlier.
- ASSERT(start_address > 8191 || start_address < 0);
- int reg = 0;
- while (rlist != 0) {
- if ((rlist & 1) != 0) {
- if (load) {
- set_register(reg, *address);
- } else {
- *address = get_register(reg);
- }
- address += 1;
- }
- reg++;
- rlist >>= 1;
- }
- ASSERT(end_address == ((intptr_t)address) - 4);
-}
-
-
-// Addressing Mode 6 - Load and Store Multiple Coprocessor registers.
-void Simulator::HandleVList(Instruction* instr) {
- VFPRegPrecision precision =
- (instr->SzValue() == 0) ? kSinglePrecision : kDoublePrecision;
- int operand_size = (precision == kSinglePrecision) ? 4 : 8;
-
- bool load = (instr->VLValue() == 0x1);
-
- int vd;
- int num_regs;
- vd = instr->VFPDRegValue(precision);
- if (precision == kSinglePrecision) {
- num_regs = instr->Immed8Value();
- } else {
- num_regs = instr->Immed8Value() / 2;
- }
-
- intptr_t start_address = 0;
- intptr_t end_address = 0;
- ProcessPUW(instr, num_regs, operand_size, &start_address, &end_address);
-
- intptr_t* address = reinterpret_cast<intptr_t*>(start_address);
- for (int reg = vd; reg < vd + num_regs; reg++) {
- if (precision == kSinglePrecision) {
- if (load) {
- set_s_register_from_sinteger(
- reg, ReadW(reinterpret_cast<int32_t>(address), instr));
- } else {
- WriteW(reinterpret_cast<int32_t>(address),
- get_sinteger_from_s_register(reg), instr);
- }
- address += 1;
- } else {
- if (load) {
- int32_t data[] = {
- ReadW(reinterpret_cast<int32_t>(address), instr),
- ReadW(reinterpret_cast<int32_t>(address + 1), instr)
- };
- double d;
- memcpy(&d, data, 8);
- set_d_register_from_double(reg, d);
- } else {
- int32_t data[2];
- double d = get_double_from_d_register(reg);
- memcpy(data, &d, 8);
- WriteW(reinterpret_cast<int32_t>(address), data[0], instr);
- WriteW(reinterpret_cast<int32_t>(address + 1), data[1], instr);
- }
- address += 2;
- }
- }
- ASSERT(reinterpret_cast<intptr_t>(address) - operand_size == end_address);
-}
-
-
-// Calls into the V8 runtime are based on this very simple interface.
-// Note: To be able to return two values from some calls the code in runtime.cc
-// uses the ObjectPair which is essentially two 32-bit values stuffed into a
-// 64-bit value. With the code below we assume that all runtime calls return
-// 64 bits of result. If they don't, the r1 result register contains a bogus
-// value, which is fine because it is caller-saved.
-typedef int64_t (*SimulatorRuntimeCall)(int32_t arg0,
- int32_t arg1,
- int32_t arg2,
- int32_t arg3,
- int32_t arg4,
- int32_t arg5);
-typedef double (*SimulatorRuntimeFPCall)(int32_t arg0,
- int32_t arg1,
- int32_t arg2,
- int32_t arg3);
-
-// This signature supports direct call in to API function native callback
-// (refer to InvocationCallback in v8.h).
-typedef v8::Handle<v8::Value> (*SimulatorRuntimeDirectApiCall)(int32_t arg0);
-
-// This signature supports direct call to accessor getter callback.
-typedef v8::Handle<v8::Value> (*SimulatorRuntimeDirectGetterCall)(int32_t arg0,
- int32_t arg1);
-
-// Software interrupt instructions are used by the simulator to call into the
-// C-based V8 runtime.
-void Simulator::SoftwareInterrupt(Instruction* instr) {
- int svc = instr->SvcValue();
- switch (svc) {
- case kCallRtRedirected: {
- // Check if stack is aligned. Error if not aligned is reported below to
- // include information on the function called.
- bool stack_aligned =
- (get_register(sp)
- & (::v8::internal::FLAG_sim_stack_alignment - 1)) == 0;
- Redirection* redirection = Redirection::FromSwiInstruction(instr);
- int32_t arg0 = get_register(r0);
- int32_t arg1 = get_register(r1);
- int32_t arg2 = get_register(r2);
- int32_t arg3 = get_register(r3);
- int32_t* stack_pointer = reinterpret_cast<int32_t*>(get_register(sp));
- int32_t arg4 = stack_pointer[0];
- int32_t arg5 = stack_pointer[1];
- bool fp_call =
- (redirection->type() == ExternalReference::BUILTIN_FP_FP_CALL) ||
- (redirection->type() == ExternalReference::BUILTIN_COMPARE_CALL) ||
- (redirection->type() == ExternalReference::BUILTIN_FP_CALL) ||
- (redirection->type() == ExternalReference::BUILTIN_FP_INT_CALL);
- if (use_eabi_hardfloat()) {
- // With the hard floating point calling convention, double
- // arguments are passed in VFP registers. Fetch the arguments
- // from there and call the builtin using soft floating point
- // convention.
- switch (redirection->type()) {
- case ExternalReference::BUILTIN_FP_FP_CALL:
- case ExternalReference::BUILTIN_COMPARE_CALL:
- arg0 = vfp_registers_[0];
- arg1 = vfp_registers_[1];
- arg2 = vfp_registers_[2];
- arg3 = vfp_registers_[3];
- break;
- case ExternalReference::BUILTIN_FP_CALL:
- arg0 = vfp_registers_[0];
- arg1 = vfp_registers_[1];
- break;
- case ExternalReference::BUILTIN_FP_INT_CALL:
- arg0 = vfp_registers_[0];
- arg1 = vfp_registers_[1];
- arg2 = get_register(0);
- break;
- default:
- break;
- }
- }
- // This is dodgy but it works because the C entry stubs are never moved.
- // See comment in codegen-arm.cc and bug 1242173.
- int32_t saved_lr = get_register(lr);
- intptr_t external =
- reinterpret_cast<intptr_t>(redirection->external_function());
- if (fp_call) {
- if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
- SimulatorRuntimeFPCall target =
- reinterpret_cast<SimulatorRuntimeFPCall>(external);
- double dval0, dval1;
- int32_t ival;
- switch (redirection->type()) {
- case ExternalReference::BUILTIN_FP_FP_CALL:
- case ExternalReference::BUILTIN_COMPARE_CALL:
- GetFpArgs(&dval0, &dval1);
- PrintF("Call to host function at %p with args %f, %f",
- FUNCTION_ADDR(target), dval0, dval1);
- break;
- case ExternalReference::BUILTIN_FP_CALL:
- GetFpArgs(&dval0);
- PrintF("Call to host function at %p with arg %f",
- FUNCTION_ADDR(target), dval0);
- break;
- case ExternalReference::BUILTIN_FP_INT_CALL:
- GetFpArgs(&dval0, &ival);
- PrintF("Call to host function at %p with args %f, %d",
- FUNCTION_ADDR(target), dval0, ival);
- break;
- default:
- UNREACHABLE();
- break;
- }
- if (!stack_aligned) {
- PrintF(" with unaligned stack %08x\n", get_register(sp));
- }
- PrintF("\n");
- }
- CHECK(stack_aligned);
- if (redirection->type() != ExternalReference::BUILTIN_COMPARE_CALL) {
- SimulatorRuntimeFPCall target =
- reinterpret_cast<SimulatorRuntimeFPCall>(external);
- double result = target(arg0, arg1, arg2, arg3);
- SetFpResult(result);
- } else {
- SimulatorRuntimeCall target =
- reinterpret_cast<SimulatorRuntimeCall>(external);
- int64_t result = target(arg0, arg1, arg2, arg3, arg4, arg5);
- int32_t lo_res = static_cast<int32_t>(result);
- int32_t hi_res = static_cast<int32_t>(result >> 32);
- if (::v8::internal::FLAG_trace_sim) {
- PrintF("Returned %08x\n", lo_res);
- }
- set_register(r0, lo_res);
- set_register(r1, hi_res);
- }
- } else if (redirection->type() == ExternalReference::DIRECT_API_CALL) {
- SimulatorRuntimeDirectApiCall target =
- reinterpret_cast<SimulatorRuntimeDirectApiCall>(external);
- if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
- PrintF("Call to host function at %p args %08x",
- FUNCTION_ADDR(target), arg0);
- if (!stack_aligned) {
- PrintF(" with unaligned stack %08x\n", get_register(sp));
- }
- PrintF("\n");
- }
- CHECK(stack_aligned);
- v8::Handle<v8::Value> result = target(arg0);
- if (::v8::internal::FLAG_trace_sim) {
- PrintF("Returned %p\n", reinterpret_cast<void *>(*result));
- }
- set_register(r0, reinterpret_cast<int32_t>(*result));
- } else if (redirection->type() == ExternalReference::DIRECT_GETTER_CALL) {
- SimulatorRuntimeDirectGetterCall target =
- reinterpret_cast<SimulatorRuntimeDirectGetterCall>(external);
- if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
- PrintF("Call to host function at %p args %08x %08x",
- FUNCTION_ADDR(target), arg0, arg1);
- if (!stack_aligned) {
- PrintF(" with unaligned stack %08x\n", get_register(sp));
- }
- PrintF("\n");
- }
- CHECK(stack_aligned);
- v8::Handle<v8::Value> result = target(arg0, arg1);
- if (::v8::internal::FLAG_trace_sim) {
- PrintF("Returned %p\n", reinterpret_cast<void *>(*result));
- }
- set_register(r0, reinterpret_cast<int32_t>(*result));
- } else {
- // builtin call.
- ASSERT(redirection->type() == ExternalReference::BUILTIN_CALL);
- SimulatorRuntimeCall target =
- reinterpret_cast<SimulatorRuntimeCall>(external);
- if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
- PrintF(
- "Call to host function at %p"
- "args %08x, %08x, %08x, %08x, %08x, %08x",
- FUNCTION_ADDR(target),
- arg0,
- arg1,
- arg2,
- arg3,
- arg4,
- arg5);
- if (!stack_aligned) {
- PrintF(" with unaligned stack %08x\n", get_register(sp));
- }
- PrintF("\n");
- }
- CHECK(stack_aligned);
- int64_t result = target(arg0, arg1, arg2, arg3, arg4, arg5);
- int32_t lo_res = static_cast<int32_t>(result);
- int32_t hi_res = static_cast<int32_t>(result >> 32);
- if (::v8::internal::FLAG_trace_sim) {
- PrintF("Returned %08x\n", lo_res);
- }
- set_register(r0, lo_res);
- set_register(r1, hi_res);
- }
- set_register(lr, saved_lr);
- set_pc(get_register(lr));
- break;
- }
- case kBreakpoint: {
- ArmDebugger dbg(this);
- dbg.Debug();
- break;
- }
- // stop uses all codes greater than 1 << 23.
- default: {
- if (svc >= (1 << 23)) {
- uint32_t code = svc & kStopCodeMask;
- if (isWatchedStop(code)) {
- IncreaseStopCounter(code);
- }
- // Stop if it is enabled, otherwise go on jumping over the stop
- // and the message address.
- if (isEnabledStop(code)) {
- ArmDebugger dbg(this);
- dbg.Stop(instr);
- } else {
- set_pc(get_pc() + 2 * Instruction::kInstrSize);
- }
- } else {
- // This is not a valid svc code.
- UNREACHABLE();
- break;
- }
- }
- }
-}
-
-
-// Stop helper functions.
-bool Simulator::isStopInstruction(Instruction* instr) {
- return (instr->Bits(27, 24) == 0xF) && (instr->SvcValue() >= kStopCode);
-}
-
-
-bool Simulator::isWatchedStop(uint32_t code) {
- ASSERT(code <= kMaxStopCode);
- return code < kNumOfWatchedStops;
-}
-
-
-bool Simulator::isEnabledStop(uint32_t code) {
- ASSERT(code <= kMaxStopCode);
- // Unwatched stops are always enabled.
- return !isWatchedStop(code) ||
- !(watched_stops[code].count & kStopDisabledBit);
-}
-
-
-void Simulator::EnableStop(uint32_t code) {
- ASSERT(isWatchedStop(code));
- if (!isEnabledStop(code)) {
- watched_stops[code].count &= ~kStopDisabledBit;
- }
-}
-
-
-void Simulator::DisableStop(uint32_t code) {
- ASSERT(isWatchedStop(code));
- if (isEnabledStop(code)) {
- watched_stops[code].count |= kStopDisabledBit;
- }
-}
-
-
-void Simulator::IncreaseStopCounter(uint32_t code) {
- ASSERT(code <= kMaxStopCode);
- ASSERT(isWatchedStop(code));
- if ((watched_stops[code].count & ~(1 << 31)) == 0x7fffffff) {
- PrintF("Stop counter for code %i has overflowed.\n"
- "Enabling this code and reseting the counter to 0.\n", code);
- watched_stops[code].count = 0;
- EnableStop(code);
- } else {
- watched_stops[code].count++;
- }
-}
-
-
-// Print a stop status.
-void Simulator::PrintStopInfo(uint32_t code) {
- ASSERT(code <= kMaxStopCode);
- if (!isWatchedStop(code)) {
- PrintF("Stop not watched.");
- } else {
- const char* state = isEnabledStop(code) ? "Enabled" : "Disabled";
- int32_t count = watched_stops[code].count & ~kStopDisabledBit;
- // Don't print the state of unused breakpoints.
- if (count != 0) {
- if (watched_stops[code].desc) {
- PrintF("stop %i - 0x%x: \t%s, \tcounter = %i, \t%s\n",
- code, code, state, count, watched_stops[code].desc);
- } else {
- PrintF("stop %i - 0x%x: \t%s, \tcounter = %i\n",
- code, code, state, count);
- }
- }
- }
-}
-
-
-// Handle execution based on instruction types.
-
-// Instruction types 0 and 1 are both rolled into one function because they
-// only differ in the handling of the shifter_operand.
-void Simulator::DecodeType01(Instruction* instr) {
- int type = instr->TypeValue();
- if ((type == 0) && instr->IsSpecialType0()) {
- // multiply instruction or extra loads and stores
- if (instr->Bits(7, 4) == 9) {
- if (instr->Bit(24) == 0) {
- // Raw field decoding here. Multiply instructions have their Rd in
- // funny places.
- int rn = instr->RnValue();
- int rm = instr->RmValue();
- int rs = instr->RsValue();
- int32_t rs_val = get_register(rs);
- int32_t rm_val = get_register(rm);
- if (instr->Bit(23) == 0) {
- if (instr->Bit(21) == 0) {
- // The MUL instruction description (A 4.1.33) refers to Rd as being
- // the destination for the operation, but it confusingly uses the
- // Rn field to encode it.
- // Format(instr, "mul'cond's 'rn, 'rm, 'rs");
- int rd = rn; // Remap the rn field to the Rd register.
- int32_t alu_out = rm_val * rs_val;
- set_register(rd, alu_out);
- if (instr->HasS()) {
- SetNZFlags(alu_out);
- }
- } else {
- int rd = instr->RdValue();
- int32_t acc_value = get_register(rd);
- if (instr->Bit(22) == 0) {
- // The MLA instruction description (A 4.1.28) refers to the order
- // of registers as "Rd, Rm, Rs, Rn". But confusingly it uses the
- // Rn field to encode the Rd register and the Rd field to encode
- // the Rn register.
- // Format(instr, "mla'cond's 'rn, 'rm, 'rs, 'rd");
- int32_t mul_out = rm_val * rs_val;
- int32_t result = acc_value + mul_out;
- set_register(rn, result);
- } else {
- // Format(instr, "mls'cond's 'rn, 'rm, 'rs, 'rd");
- int32_t mul_out = rm_val * rs_val;
- int32_t result = acc_value - mul_out;
- set_register(rn, result);
- }
- }
- } else {
- // The signed/long multiply instructions use the terms RdHi and RdLo
- // when referring to the target registers. They are mapped to the Rn
- // and Rd fields as follows:
- // RdLo == Rd
- // RdHi == Rn (This is confusingly stored in variable rd here
- // because the mul instruction from above uses the
- // Rn field to encode the Rd register. Good luck figuring
- // this out without reading the ARM instruction manual
- // at a very detailed level.)
- // Format(instr, "'um'al'cond's 'rd, 'rn, 'rs, 'rm");
- int rd_hi = rn; // Remap the rn field to the RdHi register.
- int rd_lo = instr->RdValue();
- int32_t hi_res = 0;
- int32_t lo_res = 0;
- if (instr->Bit(22) == 1) {
- int64_t left_op = static_cast<int32_t>(rm_val);
- int64_t right_op = static_cast<int32_t>(rs_val);
- uint64_t result = left_op * right_op;
- hi_res = static_cast<int32_t>(result >> 32);
- lo_res = static_cast<int32_t>(result & 0xffffffff);
- } else {
- // unsigned multiply
- uint64_t left_op = static_cast<uint32_t>(rm_val);
- uint64_t right_op = static_cast<uint32_t>(rs_val);
- uint64_t result = left_op * right_op;
- hi_res = static_cast<int32_t>(result >> 32);
- lo_res = static_cast<int32_t>(result & 0xffffffff);
- }
- set_register(rd_lo, lo_res);
- set_register(rd_hi, hi_res);
- if (instr->HasS()) {
- UNIMPLEMENTED();
- }
- }
- } else {
- UNIMPLEMENTED(); // Not used by V8.
- }
- } else {
- // extra load/store instructions
- int rd = instr->RdValue();
- int rn = instr->RnValue();
- int32_t rn_val = get_register(rn);
- int32_t addr = 0;
- if (instr->Bit(22) == 0) {
- int rm = instr->RmValue();
- int32_t rm_val = get_register(rm);
- switch (instr->PUField()) {
- case da_x: {
- // Format(instr, "'memop'cond'sign'h 'rd, ['rn], -'rm");
- ASSERT(!instr->HasW());
- addr = rn_val;
- rn_val -= rm_val;
- set_register(rn, rn_val);
- break;
- }
- case ia_x: {
- // Format(instr, "'memop'cond'sign'h 'rd, ['rn], +'rm");
- ASSERT(!instr->HasW());
- addr = rn_val;
- rn_val += rm_val;
- set_register(rn, rn_val);
- break;
- }
- case db_x: {
- // Format(instr, "'memop'cond'sign'h 'rd, ['rn, -'rm]'w");
- rn_val -= rm_val;
- addr = rn_val;
- if (instr->HasW()) {
- set_register(rn, rn_val);
- }
- break;
- }
- case ib_x: {
- // Format(instr, "'memop'cond'sign'h 'rd, ['rn, +'rm]'w");
- rn_val += rm_val;
- addr = rn_val;
- if (instr->HasW()) {
- set_register(rn, rn_val);
- }
- break;
- }
- default: {
- // The PU field is a 2-bit field.
- UNREACHABLE();
- break;
- }
- }
- } else {
- int32_t imm_val = (instr->ImmedHValue() << 4) | instr->ImmedLValue();
- switch (instr->PUField()) {
- case da_x: {
- // Format(instr, "'memop'cond'sign'h 'rd, ['rn], #-'off8");
- ASSERT(!instr->HasW());
- addr = rn_val;
- rn_val -= imm_val;
- set_register(rn, rn_val);
- break;
- }
- case ia_x: {
- // Format(instr, "'memop'cond'sign'h 'rd, ['rn], #+'off8");
- ASSERT(!instr->HasW());
- addr = rn_val;
- rn_val += imm_val;
- set_register(rn, rn_val);
- break;
- }
- case db_x: {
- // Format(instr, "'memop'cond'sign'h 'rd, ['rn, #-'off8]'w");
- rn_val -= imm_val;
- addr = rn_val;
- if (instr->HasW()) {
- set_register(rn, rn_val);
- }
- break;
- }
- case ib_x: {
- // Format(instr, "'memop'cond'sign'h 'rd, ['rn, #+'off8]'w");
- rn_val += imm_val;
- addr = rn_val;
- if (instr->HasW()) {
- set_register(rn, rn_val);
- }
- break;
- }
- default: {
- // The PU field is a 2-bit field.
- UNREACHABLE();
- break;
- }
- }
- }
- if (((instr->Bits(7, 4) & 0xd) == 0xd) && (instr->Bit(20) == 0)) {
- ASSERT((rd % 2) == 0);
- if (instr->HasH()) {
- // The strd instruction.
- int32_t value1 = get_register(rd);
- int32_t value2 = get_register(rd+1);
- WriteDW(addr, value1, value2);
- } else {
- // The ldrd instruction.
- int* rn_data = ReadDW(addr);
- set_dw_register(rd, rn_data);
- }
- } else if (instr->HasH()) {
- if (instr->HasSign()) {
- if (instr->HasL()) {
- int16_t val = ReadH(addr, instr);
- set_register(rd, val);
- } else {
- int16_t val = get_register(rd);
- WriteH(addr, val, instr);
- }
- } else {
- if (instr->HasL()) {
- uint16_t val = ReadHU(addr, instr);
- set_register(rd, val);
- } else {
- uint16_t val = get_register(rd);
- WriteH(addr, val, instr);
- }
- }
- } else {
- // signed byte loads
- ASSERT(instr->HasSign());
- ASSERT(instr->HasL());
- int8_t val = ReadB(addr);
- set_register(rd, val);
- }
- return;
- }
- } else if ((type == 0) && instr->IsMiscType0()) {
- if (instr->Bits(22, 21) == 1) {
- int rm = instr->RmValue();
- switch (instr->BitField(7, 4)) {
- case BX:
- set_pc(get_register(rm));
- break;
- case BLX: {
- uint32_t old_pc = get_pc();
- set_pc(get_register(rm));
- set_register(lr, old_pc + Instruction::kInstrSize);
- break;
- }
- case BKPT: {
- ArmDebugger dbg(this);
- PrintF("Simulator hit BKPT.\n");
- dbg.Debug();
- break;
- }
- default:
- UNIMPLEMENTED();
- }
- } else if (instr->Bits(22, 21) == 3) {
- int rm = instr->RmValue();
- int rd = instr->RdValue();
- switch (instr->BitField(7, 4)) {
- case CLZ: {
- uint32_t bits = get_register(rm);
- int leading_zeros = 0;
- if (bits == 0) {
- leading_zeros = 32;
- } else {
- while ((bits & 0x80000000u) == 0) {
- bits <<= 1;
- leading_zeros++;
- }
- }
- set_register(rd, leading_zeros);
- break;
- }
- default:
- UNIMPLEMENTED();
- }
- } else {
- PrintF("%08x\n", instr->InstructionBits());
- UNIMPLEMENTED();
- }
- } else if ((type == 1) && instr->IsNopType1()) {
- // NOP.
- } else {
- int rd = instr->RdValue();
- int rn = instr->RnValue();
- int32_t rn_val = get_register(rn);
- int32_t shifter_operand = 0;
- bool shifter_carry_out = 0;
- if (type == 0) {
- shifter_operand = GetShiftRm(instr, &shifter_carry_out);
- } else {
- ASSERT(instr->TypeValue() == 1);
- shifter_operand = GetImm(instr, &shifter_carry_out);
- }
- int32_t alu_out;
-
- switch (instr->OpcodeField()) {
- case AND: {
- // Format(instr, "and'cond's 'rd, 'rn, 'shift_rm");
- // Format(instr, "and'cond's 'rd, 'rn, 'imm");
- alu_out = rn_val & shifter_operand;
- set_register(rd, alu_out);
- if (instr->HasS()) {
- SetNZFlags(alu_out);
- SetCFlag(shifter_carry_out);
- }
- break;
- }
-
- case EOR: {
- // Format(instr, "eor'cond's 'rd, 'rn, 'shift_rm");
- // Format(instr, "eor'cond's 'rd, 'rn, 'imm");
- alu_out = rn_val ^ shifter_operand;
- set_register(rd, alu_out);
- if (instr->HasS()) {
- SetNZFlags(alu_out);
- SetCFlag(shifter_carry_out);
- }
- break;
- }
-
- case SUB: {
- // Format(instr, "sub'cond's 'rd, 'rn, 'shift_rm");
- // Format(instr, "sub'cond's 'rd, 'rn, 'imm");
- alu_out = rn_val - shifter_operand;
- set_register(rd, alu_out);
- if (instr->HasS()) {
- SetNZFlags(alu_out);
- SetCFlag(!BorrowFrom(rn_val, shifter_operand));
- SetVFlag(OverflowFrom(alu_out, rn_val, shifter_operand, false));
- }
- break;
- }
-
- case RSB: {
- // Format(instr, "rsb'cond's 'rd, 'rn, 'shift_rm");
- // Format(instr, "rsb'cond's 'rd, 'rn, 'imm");
- alu_out = shifter_operand - rn_val;
- set_register(rd, alu_out);
- if (instr->HasS()) {
- SetNZFlags(alu_out);
- SetCFlag(!BorrowFrom(shifter_operand, rn_val));
- SetVFlag(OverflowFrom(alu_out, shifter_operand, rn_val, false));
- }
- break;
- }
-
- case ADD: {
- // Format(instr, "add'cond's 'rd, 'rn, 'shift_rm");
- // Format(instr, "add'cond's 'rd, 'rn, 'imm");
- alu_out = rn_val + shifter_operand;
- set_register(rd, alu_out);
- if (instr->HasS()) {
- SetNZFlags(alu_out);
- SetCFlag(CarryFrom(rn_val, shifter_operand));
- SetVFlag(OverflowFrom(alu_out, rn_val, shifter_operand, true));
- }
- break;
- }
-
- case ADC: {
- // Format(instr, "adc'cond's 'rd, 'rn, 'shift_rm");
- // Format(instr, "adc'cond's 'rd, 'rn, 'imm");
- alu_out = rn_val + shifter_operand + GetCarry();
- set_register(rd, alu_out);
- if (instr->HasS()) {
- SetNZFlags(alu_out);
- SetCFlag(CarryFrom(rn_val, shifter_operand, GetCarry()));
- SetVFlag(OverflowFrom(alu_out, rn_val, shifter_operand, true));
- }
- break;
- }
-
- case SBC: {
- Format(instr, "sbc'cond's 'rd, 'rn, 'shift_rm");
- Format(instr, "sbc'cond's 'rd, 'rn, 'imm");
- break;
- }
-
- case RSC: {
- Format(instr, "rsc'cond's 'rd, 'rn, 'shift_rm");
- Format(instr, "rsc'cond's 'rd, 'rn, 'imm");
- break;
- }
-
- case TST: {
- if (instr->HasS()) {
- // Format(instr, "tst'cond 'rn, 'shift_rm");
- // Format(instr, "tst'cond 'rn, 'imm");
- alu_out = rn_val & shifter_operand;
- SetNZFlags(alu_out);
- SetCFlag(shifter_carry_out);
- } else {
- // Format(instr, "movw'cond 'rd, 'imm").
- alu_out = instr->ImmedMovwMovtValue();
- set_register(rd, alu_out);
- }
- break;
- }
-
- case TEQ: {
- if (instr->HasS()) {
- // Format(instr, "teq'cond 'rn, 'shift_rm");
- // Format(instr, "teq'cond 'rn, 'imm");
- alu_out = rn_val ^ shifter_operand;
- SetNZFlags(alu_out);
- SetCFlag(shifter_carry_out);
- } else {
- // Other instructions matching this pattern are handled in the
- // miscellaneous instructions part above.
- UNREACHABLE();
- }
- break;
- }
-
- case CMP: {
- if (instr->HasS()) {
- // Format(instr, "cmp'cond 'rn, 'shift_rm");
- // Format(instr, "cmp'cond 'rn, 'imm");
- alu_out = rn_val - shifter_operand;
- SetNZFlags(alu_out);
- SetCFlag(!BorrowFrom(rn_val, shifter_operand));
- SetVFlag(OverflowFrom(alu_out, rn_val, shifter_operand, false));
- } else {
- // Format(instr, "movt'cond 'rd, 'imm").
- alu_out = (get_register(rd) & 0xffff) |
- (instr->ImmedMovwMovtValue() << 16);
- set_register(rd, alu_out);
- }
- break;
- }
-
- case CMN: {
- if (instr->HasS()) {
- // Format(instr, "cmn'cond 'rn, 'shift_rm");
- // Format(instr, "cmn'cond 'rn, 'imm");
- alu_out = rn_val + shifter_operand;
- SetNZFlags(alu_out);
- SetCFlag(CarryFrom(rn_val, shifter_operand));
- SetVFlag(OverflowFrom(alu_out, rn_val, shifter_operand, true));
- } else {
- // Other instructions matching this pattern are handled in the
- // miscellaneous instructions part above.
- UNREACHABLE();
- }
- break;
- }
-
- case ORR: {
- // Format(instr, "orr'cond's 'rd, 'rn, 'shift_rm");
- // Format(instr, "orr'cond's 'rd, 'rn, 'imm");
- alu_out = rn_val | shifter_operand;
- set_register(rd, alu_out);
- if (instr->HasS()) {
- SetNZFlags(alu_out);
- SetCFlag(shifter_carry_out);
- }
- break;
- }
-
- case MOV: {
- // Format(instr, "mov'cond's 'rd, 'shift_rm");
- // Format(instr, "mov'cond's 'rd, 'imm");
- alu_out = shifter_operand;
- set_register(rd, alu_out);
- if (instr->HasS()) {
- SetNZFlags(alu_out);
- SetCFlag(shifter_carry_out);
- }
- break;
- }
-
- case BIC: {
- // Format(instr, "bic'cond's 'rd, 'rn, 'shift_rm");
- // Format(instr, "bic'cond's 'rd, 'rn, 'imm");
- alu_out = rn_val & ~shifter_operand;
- set_register(rd, alu_out);
- if (instr->HasS()) {
- SetNZFlags(alu_out);
- SetCFlag(shifter_carry_out);
- }
- break;
- }
-
- case MVN: {
- // Format(instr, "mvn'cond's 'rd, 'shift_rm");
- // Format(instr, "mvn'cond's 'rd, 'imm");
- alu_out = ~shifter_operand;
- set_register(rd, alu_out);
- if (instr->HasS()) {
- SetNZFlags(alu_out);
- SetCFlag(shifter_carry_out);
- }
- break;
- }
-
- default: {
- UNREACHABLE();
- break;
- }
- }
- }
-}
-
-
-void Simulator::DecodeType2(Instruction* instr) {
- int rd = instr->RdValue();
- int rn = instr->RnValue();
- int32_t rn_val = get_register(rn);
- int32_t im_val = instr->Offset12Value();
- int32_t addr = 0;
- switch (instr->PUField()) {
- case da_x: {
- // Format(instr, "'memop'cond'b 'rd, ['rn], #-'off12");
- ASSERT(!instr->HasW());
- addr = rn_val;
- rn_val -= im_val;
- set_register(rn, rn_val);
- break;
- }
- case ia_x: {
- // Format(instr, "'memop'cond'b 'rd, ['rn], #+'off12");
- ASSERT(!instr->HasW());
- addr = rn_val;
- rn_val += im_val;
- set_register(rn, rn_val);
- break;
- }
- case db_x: {
- // Format(instr, "'memop'cond'b 'rd, ['rn, #-'off12]'w");
- rn_val -= im_val;
- addr = rn_val;
- if (instr->HasW()) {
- set_register(rn, rn_val);
- }
- break;
- }
- case ib_x: {
- // Format(instr, "'memop'cond'b 'rd, ['rn, #+'off12]'w");
- rn_val += im_val;
- addr = rn_val;
- if (instr->HasW()) {
- set_register(rn, rn_val);
- }
- break;
- }
- default: {
- UNREACHABLE();
- break;
- }
- }
- if (instr->HasB()) {
- if (instr->HasL()) {
- byte val = ReadBU(addr);
- set_register(rd, val);
- } else {
- byte val = get_register(rd);
- WriteB(addr, val);
- }
- } else {
- if (instr->HasL()) {
- set_register(rd, ReadW(addr, instr));
- } else {
- WriteW(addr, get_register(rd), instr);
- }
- }
-}
-
-
-void Simulator::DecodeType3(Instruction* instr) {
- int rd = instr->RdValue();
- int rn = instr->RnValue();
- int32_t rn_val = get_register(rn);
- bool shifter_carry_out = 0;
- int32_t shifter_operand = GetShiftRm(instr, &shifter_carry_out);
- int32_t addr = 0;
- switch (instr->PUField()) {
- case da_x: {
- ASSERT(!instr->HasW());
- Format(instr, "'memop'cond'b 'rd, ['rn], -'shift_rm");
- UNIMPLEMENTED();
- break;
- }
- case ia_x: {
- if (instr->HasW()) {
- ASSERT(instr->Bits(5, 4) == 0x1);
-
- if (instr->Bit(22) == 0x1) { // USAT.
- int32_t sat_pos = instr->Bits(20, 16);
- int32_t sat_val = (1 << sat_pos) - 1;
- int32_t shift = instr->Bits(11, 7);
- int32_t shift_type = instr->Bit(6);
- int32_t rm_val = get_register(instr->RmValue());
- if (shift_type == 0) { // LSL
- rm_val <<= shift;
- } else { // ASR
- rm_val >>= shift;
- }
- // If saturation occurs, the Q flag should be set in the CPSR.
- // There is no Q flag yet, and no instruction (MRS) to read the
- // CPSR directly.
- if (rm_val > sat_val) {
- rm_val = sat_val;
- } else if (rm_val < 0) {
- rm_val = 0;
- }
- set_register(rd, rm_val);
- } else { // SSAT.
- UNIMPLEMENTED();
- }
- return;
- } else {
- Format(instr, "'memop'cond'b 'rd, ['rn], +'shift_rm");
- UNIMPLEMENTED();
- }
- break;
- }
- case db_x: {
- if (FLAG_enable_sudiv) {
- if (!instr->HasW()) {
- if (instr->Bits(5, 4) == 0x1) {
- if ((instr->Bit(22) == 0x0) && (instr->Bit(20) == 0x1)) {
- // sdiv (in V8 notation matching ARM ISA format) rn = rm/rs
- // Format(instr, "'sdiv'cond'b 'rn, 'rm, 'rs);
- int rm = instr->RmValue();
- int32_t rm_val = get_register(rm);
- int rs = instr->RsValue();
- int32_t rs_val = get_register(rs);
- int32_t ret_val = 0;
- ASSERT(rs_val != 0);
- ret_val = rm_val/rs_val;
- set_register(rn, ret_val);
- return;
- }
- }
- }
- }
- // Format(instr, "'memop'cond'b 'rd, ['rn, -'shift_rm]'w");
- addr = rn_val - shifter_operand;
- if (instr->HasW()) {
- set_register(rn, addr);
- }
- break;
- }
- case ib_x: {
- if (instr->HasW() && (instr->Bits(6, 4) == 0x5)) {
- uint32_t widthminus1 = static_cast<uint32_t>(instr->Bits(20, 16));
- uint32_t lsbit = static_cast<uint32_t>(instr->Bits(11, 7));
- uint32_t msbit = widthminus1 + lsbit;
- if (msbit <= 31) {
- if (instr->Bit(22)) {
- // ubfx - unsigned bitfield extract.
- uint32_t rm_val =
- static_cast<uint32_t>(get_register(instr->RmValue()));
- uint32_t extr_val = rm_val << (31 - msbit);
- extr_val = extr_val >> (31 - widthminus1);
- set_register(instr->RdValue(), extr_val);
- } else {
- // sbfx - signed bitfield extract.
- int32_t rm_val = get_register(instr->RmValue());
- int32_t extr_val = rm_val << (31 - msbit);
- extr_val = extr_val >> (31 - widthminus1);
- set_register(instr->RdValue(), extr_val);
- }
- } else {
- UNREACHABLE();
- }
- return;
- } else if (!instr->HasW() && (instr->Bits(6, 4) == 0x1)) {
- uint32_t lsbit = static_cast<uint32_t>(instr->Bits(11, 7));
- uint32_t msbit = static_cast<uint32_t>(instr->Bits(20, 16));
- if (msbit >= lsbit) {
- // bfc or bfi - bitfield clear/insert.
- uint32_t rd_val =
- static_cast<uint32_t>(get_register(instr->RdValue()));
- uint32_t bitcount = msbit - lsbit + 1;
- uint32_t mask = (1 << bitcount) - 1;
- rd_val &= ~(mask << lsbit);
- if (instr->RmValue() != 15) {
- // bfi - bitfield insert.
- uint32_t rm_val =
- static_cast<uint32_t>(get_register(instr->RmValue()));
- rm_val &= mask;
- rd_val |= rm_val << lsbit;
- }
- set_register(instr->RdValue(), rd_val);
- } else {
- UNREACHABLE();
- }
- return;
- } else {
- // Format(instr, "'memop'cond'b 'rd, ['rn, +'shift_rm]'w");
- addr = rn_val + shifter_operand;
- if (instr->HasW()) {
- set_register(rn, addr);
- }
- }
- break;
- }
- default: {
- UNREACHABLE();
- break;
- }
- }
- if (instr->HasB()) {
- if (instr->HasL()) {
- uint8_t byte = ReadB(addr);
- set_register(rd, byte);
- } else {
- uint8_t byte = get_register(rd);
- WriteB(addr, byte);
- }
- } else {
- if (instr->HasL()) {
- set_register(rd, ReadW(addr, instr));
- } else {
- WriteW(addr, get_register(rd), instr);
- }
- }
-}
-
-
-void Simulator::DecodeType4(Instruction* instr) {
- ASSERT(instr->Bit(22) == 0); // only allowed to be set in privileged mode
- if (instr->HasL()) {
- // Format(instr, "ldm'cond'pu 'rn'w, 'rlist");
- HandleRList(instr, true);
- } else {
- // Format(instr, "stm'cond'pu 'rn'w, 'rlist");
- HandleRList(instr, false);
- }
-}
-
-
-void Simulator::DecodeType5(Instruction* instr) {
- // Format(instr, "b'l'cond 'target");
- int off = (instr->SImmed24Value() << 2);
- intptr_t pc_address = get_pc();
- if (instr->HasLink()) {
- set_register(lr, pc_address + Instruction::kInstrSize);
- }
- int pc_reg = get_register(pc);
- set_pc(pc_reg + off);
-}
-
-
-void Simulator::DecodeType6(Instruction* instr) {
- DecodeType6CoprocessorIns(instr);
-}
-
-
-void Simulator::DecodeType7(Instruction* instr) {
- if (instr->Bit(24) == 1) {
- SoftwareInterrupt(instr);
- } else {
- DecodeTypeVFP(instr);
- }
-}
-
-
-// void Simulator::DecodeTypeVFP(Instruction* instr)
-// The Following ARMv7 VFPv instructions are currently supported.
-// vmov :Sn = Rt
-// vmov :Rt = Sn
-// vcvt: Dd = Sm
-// vcvt: Sd = Dm
-// Dd = vabs(Dm)
-// Dd = vneg(Dm)
-// Dd = vadd(Dn, Dm)
-// Dd = vsub(Dn, Dm)
-// Dd = vmul(Dn, Dm)
-// Dd = vdiv(Dn, Dm)
-// vcmp(Dd, Dm)
-// vmrs
-// Dd = vsqrt(Dm)
-void Simulator::DecodeTypeVFP(Instruction* instr) {
- ASSERT((instr->TypeValue() == 7) && (instr->Bit(24) == 0x0) );
- ASSERT(instr->Bits(11, 9) == 0x5);
-
- // Obtain double precision register codes.
- int vm = instr->VFPMRegValue(kDoublePrecision);
- int vd = instr->VFPDRegValue(kDoublePrecision);
- int vn = instr->VFPNRegValue(kDoublePrecision);
-
- if (instr->Bit(4) == 0) {
- if (instr->Opc1Value() == 0x7) {
- // Other data processing instructions
- if ((instr->Opc2Value() == 0x0) && (instr->Opc3Value() == 0x1)) {
- // vmov register to register.
- if (instr->SzValue() == 0x1) {
- int m = instr->VFPMRegValue(kDoublePrecision);
- int d = instr->VFPDRegValue(kDoublePrecision);
- set_d_register_from_double(d, get_double_from_d_register(m));
- } else {
- int m = instr->VFPMRegValue(kSinglePrecision);
- int d = instr->VFPDRegValue(kSinglePrecision);
- set_s_register_from_float(d, get_float_from_s_register(m));
- }
- } else if ((instr->Opc2Value() == 0x0) && (instr->Opc3Value() == 0x3)) {
- // vabs
- double dm_value = get_double_from_d_register(vm);
- double dd_value = fabs(dm_value);
- set_d_register_from_double(vd, dd_value);
- } else if ((instr->Opc2Value() == 0x1) && (instr->Opc3Value() == 0x1)) {
- // vneg
- double dm_value = get_double_from_d_register(vm);
- double dd_value = -dm_value;
- set_d_register_from_double(vd, dd_value);
- } else if ((instr->Opc2Value() == 0x7) && (instr->Opc3Value() == 0x3)) {
- DecodeVCVTBetweenDoubleAndSingle(instr);
- } else if ((instr->Opc2Value() == 0x8) && (instr->Opc3Value() & 0x1)) {
- DecodeVCVTBetweenFloatingPointAndInteger(instr);
- } else if (((instr->Opc2Value() >> 1) == 0x6) &&
- (instr->Opc3Value() & 0x1)) {
- DecodeVCVTBetweenFloatingPointAndInteger(instr);
- } else if (((instr->Opc2Value() == 0x4) || (instr->Opc2Value() == 0x5)) &&
- (instr->Opc3Value() & 0x1)) {
- DecodeVCMP(instr);
- } else if (((instr->Opc2Value() == 0x1)) && (instr->Opc3Value() == 0x3)) {
- // vsqrt
- double dm_value = get_double_from_d_register(vm);
- double dd_value = sqrt(dm_value);
- set_d_register_from_double(vd, dd_value);
- } else if (instr->Opc3Value() == 0x0) {
- // vmov immediate.
- if (instr->SzValue() == 0x1) {
- set_d_register_from_double(vd, instr->DoubleImmedVmov());
- } else {
- UNREACHABLE(); // Not used by v8.
- }
- } else {
- UNREACHABLE(); // Not used by V8.
- }
- } else if (instr->Opc1Value() == 0x3) {
- if (instr->SzValue() != 0x1) {
- UNREACHABLE(); // Not used by V8.
- }
-
- if (instr->Opc3Value() & 0x1) {
- // vsub
- double dn_value = get_double_from_d_register(vn);
- double dm_value = get_double_from_d_register(vm);
- double dd_value = dn_value - dm_value;
- set_d_register_from_double(vd, dd_value);
- } else {
- // vadd
- double dn_value = get_double_from_d_register(vn);
- double dm_value = get_double_from_d_register(vm);
- double dd_value = dn_value + dm_value;
- set_d_register_from_double(vd, dd_value);
- }
- } else if ((instr->Opc1Value() == 0x2) && !(instr->Opc3Value() & 0x1)) {
- // vmul
- if (instr->SzValue() != 0x1) {
- UNREACHABLE(); // Not used by V8.
- }
-
- double dn_value = get_double_from_d_register(vn);
- double dm_value = get_double_from_d_register(vm);
- double dd_value = dn_value * dm_value;
- set_d_register_from_double(vd, dd_value);
- } else if ((instr->Opc1Value() == 0x0)) {
- // vmla, vmls
- const bool is_vmls = (instr->Opc3Value() & 0x1);
-
- if (instr->SzValue() != 0x1) {
- UNREACHABLE(); // Not used by V8.
- }
-
- const double dd_val = get_double_from_d_register(vd);
- const double dn_val = get_double_from_d_register(vn);
- const double dm_val = get_double_from_d_register(vm);
-
- // Note: we do the mul and add/sub in separate steps to avoid getting a
- // result with too high precision.
- set_d_register_from_double(vd, dn_val * dm_val);
- if (is_vmls) {
- set_d_register_from_double(vd, dd_val - get_double_from_d_register(vd));
- } else {
- set_d_register_from_double(vd, dd_val + get_double_from_d_register(vd));
- }
- } else if ((instr->Opc1Value() == 0x4) && !(instr->Opc3Value() & 0x1)) {
- // vdiv
- if (instr->SzValue() != 0x1) {
- UNREACHABLE(); // Not used by V8.
- }
-
- double dn_value = get_double_from_d_register(vn);
- double dm_value = get_double_from_d_register(vm);
- double dd_value = dn_value / dm_value;
- div_zero_vfp_flag_ = (dm_value == 0);
- set_d_register_from_double(vd, dd_value);
- } else {
- UNIMPLEMENTED(); // Not used by V8.
- }
- } else {
- if ((instr->VCValue() == 0x0) &&
- (instr->VAValue() == 0x0)) {
- DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(instr);
- } else if ((instr->VLValue() == 0x0) &&
- (instr->VCValue() == 0x1) &&
- (instr->Bit(23) == 0x0)) {
- // vmov (ARM core register to scalar)
- int vd = instr->Bits(19, 16) | (instr->Bit(7) << 4);
- double dd_value = get_double_from_d_register(vd);
- int32_t data[2];
- memcpy(data, &dd_value, 8);
- data[instr->Bit(21)] = get_register(instr->RtValue());
- memcpy(&dd_value, data, 8);
- set_d_register_from_double(vd, dd_value);
- } else if ((instr->VLValue() == 0x1) &&
- (instr->VCValue() == 0x0) &&
- (instr->VAValue() == 0x7) &&
- (instr->Bits(19, 16) == 0x1)) {
- // vmrs
- uint32_t rt = instr->RtValue();
- if (rt == 0xF) {
- Copy_FPSCR_to_APSR();
- } else {
- // Emulate FPSCR from the Simulator flags.
- uint32_t fpscr = (n_flag_FPSCR_ << 31) |
- (z_flag_FPSCR_ << 30) |
- (c_flag_FPSCR_ << 29) |
- (v_flag_FPSCR_ << 28) |
- (inexact_vfp_flag_ << 4) |
- (underflow_vfp_flag_ << 3) |
- (overflow_vfp_flag_ << 2) |
- (div_zero_vfp_flag_ << 1) |
- (inv_op_vfp_flag_ << 0) |
- (FPSCR_rounding_mode_);
- set_register(rt, fpscr);
- }
- } else if ((instr->VLValue() == 0x0) &&
- (instr->VCValue() == 0x0) &&
- (instr->VAValue() == 0x7) &&
- (instr->Bits(19, 16) == 0x1)) {
- // vmsr
- uint32_t rt = instr->RtValue();
- if (rt == pc) {
- UNREACHABLE();
- } else {
- uint32_t rt_value = get_register(rt);
- n_flag_FPSCR_ = (rt_value >> 31) & 1;
- z_flag_FPSCR_ = (rt_value >> 30) & 1;
- c_flag_FPSCR_ = (rt_value >> 29) & 1;
- v_flag_FPSCR_ = (rt_value >> 28) & 1;
- inexact_vfp_flag_ = (rt_value >> 4) & 1;
- underflow_vfp_flag_ = (rt_value >> 3) & 1;
- overflow_vfp_flag_ = (rt_value >> 2) & 1;
- div_zero_vfp_flag_ = (rt_value >> 1) & 1;
- inv_op_vfp_flag_ = (rt_value >> 0) & 1;
- FPSCR_rounding_mode_ =
- static_cast<VFPRoundingMode>((rt_value) & kVFPRoundingModeMask);
- }
- } else {
- UNIMPLEMENTED(); // Not used by V8.
- }
- }
-}
-
-
-void Simulator::DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(
- Instruction* instr) {
- ASSERT((instr->Bit(4) == 1) && (instr->VCValue() == 0x0) &&
- (instr->VAValue() == 0x0));
-
- int t = instr->RtValue();
- int n = instr->VFPNRegValue(kSinglePrecision);
- bool to_arm_register = (instr->VLValue() == 0x1);
-
- if (to_arm_register) {
- int32_t int_value = get_sinteger_from_s_register(n);
- set_register(t, int_value);
- } else {
- int32_t rs_val = get_register(t);
- set_s_register_from_sinteger(n, rs_val);
- }
-}
-
-
-void Simulator::DecodeVCMP(Instruction* instr) {
- ASSERT((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7));
- ASSERT(((instr->Opc2Value() == 0x4) || (instr->Opc2Value() == 0x5)) &&
- (instr->Opc3Value() & 0x1));
- // Comparison.
-
- VFPRegPrecision precision = kSinglePrecision;
- if (instr->SzValue() == 1) {
- precision = kDoublePrecision;
- }
-
- int d = instr->VFPDRegValue(precision);
- int m = 0;
- if (instr->Opc2Value() == 0x4) {
- m = instr->VFPMRegValue(precision);
- }
-
- if (precision == kDoublePrecision) {
- double dd_value = get_double_from_d_register(d);
- double dm_value = 0.0;
- if (instr->Opc2Value() == 0x4) {
- dm_value = get_double_from_d_register(m);
- }
-
- // Raise exceptions for quiet NaNs if necessary.
- if (instr->Bit(7) == 1) {
- if (isnan(dd_value)) {
- inv_op_vfp_flag_ = true;
- }
- }
-
- Compute_FPSCR_Flags(dd_value, dm_value);
- } else {
- UNIMPLEMENTED(); // Not used by V8.
- }
-}
-
-
-void Simulator::DecodeVCVTBetweenDoubleAndSingle(Instruction* instr) {
- ASSERT((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7));
- ASSERT((instr->Opc2Value() == 0x7) && (instr->Opc3Value() == 0x3));
-
- VFPRegPrecision dst_precision = kDoublePrecision;
- VFPRegPrecision src_precision = kSinglePrecision;
- if (instr->SzValue() == 1) {
- dst_precision = kSinglePrecision;
- src_precision = kDoublePrecision;
- }
-
- int dst = instr->VFPDRegValue(dst_precision);
- int src = instr->VFPMRegValue(src_precision);
-
- if (dst_precision == kSinglePrecision) {
- double val = get_double_from_d_register(src);
- set_s_register_from_float(dst, static_cast<float>(val));
- } else {
- float val = get_float_from_s_register(src);
- set_d_register_from_double(dst, static_cast<double>(val));
- }
-}
-
-bool get_inv_op_vfp_flag(VFPRoundingMode mode,
- double val,
- bool unsigned_) {
- ASSERT((mode == RN) || (mode == RM) || (mode == RZ));
- double max_uint = static_cast<double>(0xffffffffu);
- double max_int = static_cast<double>(kMaxInt);
- double min_int = static_cast<double>(kMinInt);
-
- // Check for NaN.
- if (val != val) {
- return true;
- }
-
- // Check for overflow. This code works because 32bit integers can be
- // exactly represented by ieee-754 64bit floating-point values.
- switch (mode) {
- case RN:
- return unsigned_ ? (val >= (max_uint + 0.5)) ||
- (val < -0.5)
- : (val >= (max_int + 0.5)) ||
- (val < (min_int - 0.5));
-
- case RM:
- return unsigned_ ? (val >= (max_uint + 1.0)) ||
- (val < 0)
- : (val >= (max_int + 1.0)) ||
- (val < min_int);
-
- case RZ:
- return unsigned_ ? (val >= (max_uint + 1.0)) ||
- (val <= -1)
- : (val >= (max_int + 1.0)) ||
- (val <= (min_int - 1.0));
- default:
- UNREACHABLE();
- return true;
- }
-}
-
-
-// We call this function only if we had a vfp invalid exception.
-// It returns the correct saturated value.
-int VFPConversionSaturate(double val, bool unsigned_res) {
- if (val != val) {
- return 0;
- } else {
- if (unsigned_res) {
- return (val < 0) ? 0 : 0xffffffffu;
- } else {
- return (val < 0) ? kMinInt : kMaxInt;
- }
- }
-}
-
-
-void Simulator::DecodeVCVTBetweenFloatingPointAndInteger(Instruction* instr) {
- ASSERT((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7) &&
- (instr->Bits(27, 23) == 0x1D));
- ASSERT(((instr->Opc2Value() == 0x8) && (instr->Opc3Value() & 0x1)) ||
- (((instr->Opc2Value() >> 1) == 0x6) && (instr->Opc3Value() & 0x1)));
-
- // Conversion between floating-point and integer.
- bool to_integer = (instr->Bit(18) == 1);
-
- VFPRegPrecision src_precision = (instr->SzValue() == 1) ? kDoublePrecision
- : kSinglePrecision;
-
- if (to_integer) {
- // We are playing with code close to the C++ standard's limits below,
- // hence the very simple code and heavy checks.
- //
- // Note:
- // C++ defines default type casting from floating point to integer as
- // (close to) rounding toward zero ("fractional part discarded").
-
- int dst = instr->VFPDRegValue(kSinglePrecision);
- int src = instr->VFPMRegValue(src_precision);
-
- // Bit 7 in vcvt instructions indicates if we should use the FPSCR rounding
- // mode or the default Round to Zero mode.
- VFPRoundingMode mode = (instr->Bit(7) != 1) ? FPSCR_rounding_mode_
- : RZ;
- ASSERT((mode == RM) || (mode == RZ) || (mode == RN));
-
- bool unsigned_integer = (instr->Bit(16) == 0);
- bool double_precision = (src_precision == kDoublePrecision);
-
- double val = double_precision ? get_double_from_d_register(src)
- : get_float_from_s_register(src);
-
- int temp = unsigned_integer ? static_cast<uint32_t>(val)
- : static_cast<int32_t>(val);
-
- inv_op_vfp_flag_ = get_inv_op_vfp_flag(mode, val, unsigned_integer);
-
- double abs_diff =
- unsigned_integer ? fabs(val - static_cast<uint32_t>(temp))
- : fabs(val - temp);
-
- inexact_vfp_flag_ = (abs_diff != 0);
-
- if (inv_op_vfp_flag_) {
- temp = VFPConversionSaturate(val, unsigned_integer);
- } else {
- switch (mode) {
- case RN: {
- int val_sign = (val > 0) ? 1 : -1;
- if (abs_diff > 0.5) {
- temp += val_sign;
- } else if (abs_diff == 0.5) {
- // Round to even if exactly halfway.
- temp = ((temp % 2) == 0) ? temp : temp + val_sign;
- }
- break;
- }
-
- case RM:
- temp = temp > val ? temp - 1 : temp;
- break;
-
- case RZ:
- // Nothing to do.
- break;
-
- default:
- UNREACHABLE();
- }
- }
-
- // Update the destination register.
- set_s_register_from_sinteger(dst, temp);
-
- } else {
- bool unsigned_integer = (instr->Bit(7) == 0);
-
- int dst = instr->VFPDRegValue(src_precision);
- int src = instr->VFPMRegValue(kSinglePrecision);
-
- int val = get_sinteger_from_s_register(src);
-
- if (src_precision == kDoublePrecision) {
- if (unsigned_integer) {
- set_d_register_from_double(
- dst, static_cast<double>(static_cast<uint32_t>(val)));
- } else {
- set_d_register_from_double(dst, static_cast<double>(val));
- }
- } else {
- if (unsigned_integer) {
- set_s_register_from_float(
- dst, static_cast<float>(static_cast<uint32_t>(val)));
- } else {
- set_s_register_from_float(dst, static_cast<float>(val));
- }
- }
- }
-}
-
-
-// void Simulator::DecodeType6CoprocessorIns(Instruction* instr)
-// Decode Type 6 coprocessor instructions.
-// Dm = vmov(Rt, Rt2)
-// <Rt, Rt2> = vmov(Dm)
-// Ddst = MEM(Rbase + 4*offset).
-// MEM(Rbase + 4*offset) = Dsrc.
-void Simulator::DecodeType6CoprocessorIns(Instruction* instr) {
- ASSERT((instr->TypeValue() == 6));
-
- if (instr->CoprocessorValue() == 0xA) {
- switch (instr->OpcodeValue()) {
- case 0x8:
- case 0xA:
- case 0xC:
- case 0xE: { // Load and store single precision float to memory.
- int rn = instr->RnValue();
- int vd = instr->VFPDRegValue(kSinglePrecision);
- int offset = instr->Immed8Value();
- if (!instr->HasU()) {
- offset = -offset;
- }
-
- int32_t address = get_register(rn) + 4 * offset;
- if (instr->HasL()) {
- // Load double from memory: vldr.
- set_s_register_from_sinteger(vd, ReadW(address, instr));
- } else {
- // Store double to memory: vstr.
- WriteW(address, get_sinteger_from_s_register(vd), instr);
- }
- break;
- }
- case 0x4:
- case 0x5:
- case 0x6:
- case 0x7:
- case 0x9:
- case 0xB:
- // Load/store multiple single from memory: vldm/vstm.
- HandleVList(instr);
- break;
- default:
- UNIMPLEMENTED(); // Not used by V8.
- }
- } else if (instr->CoprocessorValue() == 0xB) {
- switch (instr->OpcodeValue()) {
- case 0x2:
- // Load and store double to two GP registers
- if (instr->Bits(7, 6) != 0 || instr->Bit(4) != 1) {
- UNIMPLEMENTED(); // Not used by V8.
- } else {
- int rt = instr->RtValue();
- int rn = instr->RnValue();
- int vm = instr->VFPMRegValue(kDoublePrecision);
- if (instr->HasL()) {
- int32_t data[2];
- double d = get_double_from_d_register(vm);
- memcpy(data, &d, 8);
- set_register(rt, data[0]);
- set_register(rn, data[1]);
- } else {
- int32_t data[] = { get_register(rt), get_register(rn) };
- double d;
- memcpy(&d, data, 8);
- set_d_register_from_double(vm, d);
- }
- }
- break;
- case 0x8:
- case 0xA:
- case 0xC:
- case 0xE: { // Load and store double to memory.
- int rn = instr->RnValue();
- int vd = instr->VFPDRegValue(kDoublePrecision);
- int offset = instr->Immed8Value();
- if (!instr->HasU()) {
- offset = -offset;
- }
- int32_t address = get_register(rn) + 4 * offset;
- if (instr->HasL()) {
- // Load double from memory: vldr.
- int32_t data[] = {
- ReadW(address, instr),
- ReadW(address + 4, instr)
- };
- double val;
- memcpy(&val, data, 8);
- set_d_register_from_double(vd, val);
- } else {
- // Store double to memory: vstr.
- int32_t data[2];
- double val = get_double_from_d_register(vd);
- memcpy(data, &val, 8);
- WriteW(address, data[0], instr);
- WriteW(address + 4, data[1], instr);
- }
- break;
- }
- case 0x4:
- case 0x5:
- case 0x6:
- case 0x7:
- case 0x9:
- case 0xB:
- // Load/store multiple double from memory: vldm/vstm.
- HandleVList(instr);
- break;
- default:
- UNIMPLEMENTED(); // Not used by V8.
- }
- } else {
- UNIMPLEMENTED(); // Not used by V8.
- }
-}
-
-
-// Executes the current instruction.
-void Simulator::InstructionDecode(Instruction* instr) {
- if (v8::internal::FLAG_check_icache) {
- CheckICache(isolate_->simulator_i_cache(), instr);
- }
- pc_modified_ = false;
- if (::v8::internal::FLAG_trace_sim) {
- disasm::NameConverter converter;
- disasm::Disassembler dasm(converter);
- // use a reasonably large buffer
- v8::internal::EmbeddedVector<char, 256> buffer;
- dasm.InstructionDecode(buffer,
- reinterpret_cast<byte*>(instr));
- PrintF(" 0x%08x %s\n", reinterpret_cast<intptr_t>(instr), buffer.start());
- }
- if (instr->ConditionField() == kSpecialCondition) {
- UNIMPLEMENTED();
- } else if (ConditionallyExecute(instr)) {
- switch (instr->TypeValue()) {
- case 0:
- case 1: {
- DecodeType01(instr);
- break;
- }
- case 2: {
- DecodeType2(instr);
- break;
- }
- case 3: {
- DecodeType3(instr);
- break;
- }
- case 4: {
- DecodeType4(instr);
- break;
- }
- case 5: {
- DecodeType5(instr);
- break;
- }
- case 6: {
- DecodeType6(instr);
- break;
- }
- case 7: {
- DecodeType7(instr);
- break;
- }
- default: {
- UNIMPLEMENTED();
- break;
- }
- }
- // If the instruction is a non taken conditional stop, we need to skip the
- // inlined message address.
- } else if (instr->IsStop()) {
- set_pc(get_pc() + 2 * Instruction::kInstrSize);
- }
- if (!pc_modified_) {
- set_register(pc, reinterpret_cast<int32_t>(instr)
- + Instruction::kInstrSize);
- }
-}
-
-
-void Simulator::Execute() {
- // Get the PC to simulate. Cannot use the accessor here as we need the
- // raw PC value and not the one used as input to arithmetic instructions.
- int program_counter = get_pc();
-
- if (::v8::internal::FLAG_stop_sim_at == 0) {
- // Fast version of the dispatch loop without checking whether the simulator
- // should be stopping at a particular executed instruction.
- while (program_counter != end_sim_pc) {
- Instruction* instr = reinterpret_cast<Instruction*>(program_counter);
- icount_++;
- InstructionDecode(instr);
- program_counter = get_pc();
- }
- } else {
- // FLAG_stop_sim_at is at the non-default value. Stop in the debugger when
- // we reach the particular instuction count.
- while (program_counter != end_sim_pc) {
- Instruction* instr = reinterpret_cast<Instruction*>(program_counter);
- icount_++;
- if (icount_ == ::v8::internal::FLAG_stop_sim_at) {
- ArmDebugger dbg(this);
- dbg.Debug();
- } else {
- InstructionDecode(instr);
- }
- program_counter = get_pc();
- }
- }
-}
-
-
-void Simulator::CallInternal(byte* entry) {
- // Prepare to execute the code at entry
- set_register(pc, reinterpret_cast<int32_t>(entry));
- // Put down marker for end of simulation. The simulator will stop simulation
- // when the PC reaches this value. By saving the "end simulation" value into
- // the LR the simulation stops when returning to this call point.
- set_register(lr, end_sim_pc);
-
- // Remember the values of callee-saved registers.
- // The code below assumes that r9 is not used as sb (static base) in
- // simulator code and therefore is regarded as a callee-saved register.
- int32_t r4_val = get_register(r4);
- int32_t r5_val = get_register(r5);
- int32_t r6_val = get_register(r6);
- int32_t r7_val = get_register(r7);
- int32_t r8_val = get_register(r8);
- int32_t r9_val = get_register(r9);
- int32_t r10_val = get_register(r10);
- int32_t r11_val = get_register(r11);
-
- // Set up the callee-saved registers with a known value. To be able to check
- // that they are preserved properly across JS execution.
- int32_t callee_saved_value = icount_;
- set_register(r4, callee_saved_value);
- set_register(r5, callee_saved_value);
- set_register(r6, callee_saved_value);
- set_register(r7, callee_saved_value);
- set_register(r8, callee_saved_value);
- set_register(r9, callee_saved_value);
- set_register(r10, callee_saved_value);
- set_register(r11, callee_saved_value);
-
- // Start the simulation
- Execute();
-
- // Check that the callee-saved registers have been preserved.
- CHECK_EQ(callee_saved_value, get_register(r4));
- CHECK_EQ(callee_saved_value, get_register(r5));
- CHECK_EQ(callee_saved_value, get_register(r6));
- CHECK_EQ(callee_saved_value, get_register(r7));
- CHECK_EQ(callee_saved_value, get_register(r8));
- CHECK_EQ(callee_saved_value, get_register(r9));
- CHECK_EQ(callee_saved_value, get_register(r10));
- CHECK_EQ(callee_saved_value, get_register(r11));
-
- // Restore callee-saved registers with the original value.
- set_register(r4, r4_val);
- set_register(r5, r5_val);
- set_register(r6, r6_val);
- set_register(r7, r7_val);
- set_register(r8, r8_val);
- set_register(r9, r9_val);
- set_register(r10, r10_val);
- set_register(r11, r11_val);
-}
-
-
-int32_t Simulator::Call(byte* entry, int argument_count, ...) {
- va_list parameters;
- va_start(parameters, argument_count);
- // Set up arguments
-
- // First four arguments passed in registers.
- ASSERT(argument_count >= 4);
- set_register(r0, va_arg(parameters, int32_t));
- set_register(r1, va_arg(parameters, int32_t));
- set_register(r2, va_arg(parameters, int32_t));
- set_register(r3, va_arg(parameters, int32_t));
-
- // Remaining arguments passed on stack.
- int original_stack = get_register(sp);
- // Compute position of stack on entry to generated code.
- int entry_stack = (original_stack - (argument_count - 4) * sizeof(int32_t));
- if (OS::ActivationFrameAlignment() != 0) {
- entry_stack &= -OS::ActivationFrameAlignment();
- }
- // Store remaining arguments on stack, from low to high memory.
- intptr_t* stack_argument = reinterpret_cast<intptr_t*>(entry_stack);
- for (int i = 4; i < argument_count; i++) {
- stack_argument[i - 4] = va_arg(parameters, int32_t);
- }
- va_end(parameters);
- set_register(sp, entry_stack);
-
- CallInternal(entry);
-
- // Pop stack passed arguments.
- CHECK_EQ(entry_stack, get_register(sp));
- set_register(sp, original_stack);
-
- int32_t result = get_register(r0);
- return result;
-}
-
-
-double Simulator::CallFP(byte* entry, double d0, double d1) {
- if (use_eabi_hardfloat()) {
- set_d_register_from_double(0, d0);
- set_d_register_from_double(1, d1);
- } else {
- int buffer[2];
- ASSERT(sizeof(buffer[0]) * 2 == sizeof(d0));
- memcpy(buffer, &d0, sizeof(d0));
- set_dw_register(0, buffer);
- memcpy(buffer, &d1, sizeof(d1));
- set_dw_register(2, buffer);
- }
- CallInternal(entry);
- if (use_eabi_hardfloat()) {
- return get_double_from_d_register(0);
- } else {
- return get_double_from_register_pair(0);
- }
-}
-
-
-uintptr_t Simulator::PushAddress(uintptr_t address) {
- int new_sp = get_register(sp) - sizeof(uintptr_t);
- uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(new_sp);
- *stack_slot = address;
- set_register(sp, new_sp);
- return new_sp;
-}
-
-
-uintptr_t Simulator::PopAddress() {
- int current_sp = get_register(sp);
- uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(current_sp);
- uintptr_t address = *stack_slot;
- set_register(sp, current_sp + sizeof(uintptr_t));
- return address;
-}
-
-} } // namespace v8::internal
-
-#endif // USE_SIMULATOR
-
-#endif // V8_TARGET_ARCH_ARM
diff --git a/src/3rdparty/v8/src/arm/simulator-arm.h b/src/3rdparty/v8/src/arm/simulator-arm.h
deleted file mode 100644
index 907a590..0000000
--- a/src/3rdparty/v8/src/arm/simulator-arm.h
+++ /dev/null
@@ -1,468 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-// Declares a Simulator for ARM instructions if we are not generating a native
-// ARM binary. This Simulator allows us to run and debug ARM code generation on
-// regular desktop machines.
-// V8 calls into generated code by "calling" the CALL_GENERATED_CODE macro,
-// which will start execution in the Simulator or forwards to the real entry
-// on a ARM HW platform.
-
-#ifndef V8_ARM_SIMULATOR_ARM_H_
-#define V8_ARM_SIMULATOR_ARM_H_
-
-#include "allocation.h"
-
-#if !defined(USE_SIMULATOR)
-// Running without a simulator on a native arm platform.
-
-namespace v8 {
-namespace internal {
-
-// When running without a simulator we call the entry directly.
-#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
- (entry(p0, p1, p2, p3, p4))
-
-typedef int (*arm_regexp_matcher)(String*, int, const byte*, const byte*,
- void*, int*, int, Address, int, Isolate*);
-
-
-// Call the generated regexp code directly. The code at the entry address
-// should act as a function matching the type arm_regexp_matcher.
-// The fifth argument is a dummy that reserves the space used for
-// the return address added by the ExitFrame in native calls.
-#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
- (FUNCTION_CAST<arm_regexp_matcher>(entry)( \
- p0, p1, p2, p3, NULL, p4, p5, p6, p7, p8))
-
-#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
- reinterpret_cast<TryCatch*>(try_catch_address)
-
-// The stack limit beyond which we will throw stack overflow errors in
-// generated code. Because generated code on arm uses the C stack, we
-// just use the C stack limit.
-class SimulatorStack : public v8::internal::AllStatic {
- public:
- static inline uintptr_t JsLimitFromCLimit(v8::internal::Isolate* isolate,
- uintptr_t c_limit) {
- USE(isolate);
- return c_limit;
- }
-
- static inline uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
- return try_catch_address;
- }
-
- static inline void UnregisterCTryCatch() { }
-};
-
-} } // namespace v8::internal
-
-#else // !defined(USE_SIMULATOR)
-// Running with a simulator.
-
-#include "constants-arm.h"
-#include "hashmap.h"
-#include "assembler.h"
-
-namespace v8 {
-namespace internal {
-
-class CachePage {
- public:
- static const int LINE_VALID = 0;
- static const int LINE_INVALID = 1;
-
- static const int kPageShift = 12;
- static const int kPageSize = 1 << kPageShift;
- static const int kPageMask = kPageSize - 1;
- static const int kLineShift = 2; // The cache line is only 4 bytes right now.
- static const int kLineLength = 1 << kLineShift;
- static const int kLineMask = kLineLength - 1;
-
- CachePage() {
- memset(&validity_map_, LINE_INVALID, sizeof(validity_map_));
- }
-
- char* ValidityByte(int offset) {
- return &validity_map_[offset >> kLineShift];
- }
-
- char* CachedData(int offset) {
- return &data_[offset];
- }
-
- private:
- char data_[kPageSize]; // The cached data.
- static const int kValidityMapSize = kPageSize >> kLineShift;
- char validity_map_[kValidityMapSize]; // One byte per line.
-};
-
-
-class Simulator {
- public:
- friend class ArmDebugger;
- enum Register {
- no_reg = -1,
- r0 = 0, r1, r2, r3, r4, r5, r6, r7,
- r8, r9, r10, r11, r12, r13, r14, r15,
- num_registers,
- sp = 13,
- lr = 14,
- pc = 15,
- s0 = 0, s1, s2, s3, s4, s5, s6, s7,
- s8, s9, s10, s11, s12, s13, s14, s15,
- s16, s17, s18, s19, s20, s21, s22, s23,
- s24, s25, s26, s27, s28, s29, s30, s31,
- num_s_registers = 32,
- d0 = 0, d1, d2, d3, d4, d5, d6, d7,
- d8, d9, d10, d11, d12, d13, d14, d15,
- d16, d17, d18, d19, d20, d21, d22, d23,
- d24, d25, d26, d27, d28, d29, d30, d31,
- num_d_registers = 32
- };
-
- explicit Simulator(Isolate* isolate);
- ~Simulator();
-
- // The currently executing Simulator instance. Potentially there can be one
- // for each native thread.
- static Simulator* current(v8::internal::Isolate* isolate);
-
- // Accessors for register state. Reading the pc value adheres to the ARM
- // architecture specification and is off by a 8 from the currently executing
- // instruction.
- void set_register(int reg, int32_t value);
- int32_t get_register(int reg) const;
- double get_double_from_register_pair(int reg);
- void set_dw_register(int dreg, const int* dbl);
-
- // Support for VFP.
- void set_s_register(int reg, unsigned int value);
- unsigned int get_s_register(int reg) const;
-
- void set_d_register_from_double(int dreg, const double& dbl) {
- SetVFPRegister<double, 2>(dreg, dbl);
- }
-
- double get_double_from_d_register(int dreg) {
- return GetFromVFPRegister<double, 2>(dreg);
- }
-
- void set_s_register_from_float(int sreg, const float flt) {
- SetVFPRegister<float, 1>(sreg, flt);
- }
-
- float get_float_from_s_register(int sreg) {
- return GetFromVFPRegister<float, 1>(sreg);
- }
-
- void set_s_register_from_sinteger(int sreg, const int sint) {
- SetVFPRegister<int, 1>(sreg, sint);
- }
-
- int get_sinteger_from_s_register(int sreg) {
- return GetFromVFPRegister<int, 1>(sreg);
- }
-
- // Special case of set_register and get_register to access the raw PC value.
- void set_pc(int32_t value);
- int32_t get_pc() const;
-
- // Accessor to the internal simulator stack area.
- uintptr_t StackLimit() const;
-
- // Executes ARM instructions until the PC reaches end_sim_pc.
- void Execute();
-
- // Call on program start.
- static void Initialize(Isolate* isolate);
-
- // V8 generally calls into generated JS code with 5 parameters and into
- // generated RegExp code with 7 parameters. This is a convenience function,
- // which sets up the simulator state and grabs the result on return.
- int32_t Call(byte* entry, int argument_count, ...);
- // Alternative: call a 2-argument double function.
- double CallFP(byte* entry, double d0, double d1);
-
- // Push an address onto the JS stack.
- uintptr_t PushAddress(uintptr_t address);
-
- // Pop an address from the JS stack.
- uintptr_t PopAddress();
-
- // Debugger input.
- void set_last_debugger_input(char* input);
- char* last_debugger_input() { return last_debugger_input_; }
-
- // ICache checking.
- static void FlushICache(v8::internal::HashMap* i_cache, void* start,
- size_t size);
-
- // Returns true if pc register contains one of the 'special_values' defined
- // below (bad_lr, end_sim_pc).
- bool has_bad_pc() const;
-
- // EABI variant for double arguments in use.
- bool use_eabi_hardfloat() {
-#if USE_EABI_HARDFLOAT
- return true;
-#else
- return false;
-#endif
- }
-
- private:
- enum special_values {
- // Known bad pc value to ensure that the simulator does not execute
- // without being properly setup.
- bad_lr = -1,
- // A pc value used to signal the simulator to stop execution. Generally
- // the lr is set to this value on transition from native C code to
- // simulated execution, so that the simulator can "return" to the native
- // C code.
- end_sim_pc = -2
- };
-
- // Unsupported instructions use Format to print an error and stop execution.
- void Format(Instruction* instr, const char* format);
-
- // Checks if the current instruction should be executed based on its
- // condition bits.
- bool ConditionallyExecute(Instruction* instr);
-
- // Helper functions to set the conditional flags in the architecture state.
- void SetNZFlags(int32_t val);
- void SetCFlag(bool val);
- void SetVFlag(bool val);
- bool CarryFrom(int32_t left, int32_t right, int32_t carry = 0);
- bool BorrowFrom(int32_t left, int32_t right);
- bool OverflowFrom(int32_t alu_out,
- int32_t left,
- int32_t right,
- bool addition);
-
- inline int GetCarry() {
- return c_flag_ ? 1 : 0;
- };
-
- // Support for VFP.
- void Compute_FPSCR_Flags(double val1, double val2);
- void Copy_FPSCR_to_APSR();
-
- // Helper functions to decode common "addressing" modes
- int32_t GetShiftRm(Instruction* instr, bool* carry_out);
- int32_t GetImm(Instruction* instr, bool* carry_out);
- void ProcessPUW(Instruction* instr,
- int num_regs,
- int operand_size,
- intptr_t* start_address,
- intptr_t* end_address);
- void HandleRList(Instruction* instr, bool load);
- void HandleVList(Instruction* inst);
- void SoftwareInterrupt(Instruction* instr);
-
- // Stop helper functions.
- inline bool isStopInstruction(Instruction* instr);
- inline bool isWatchedStop(uint32_t bkpt_code);
- inline bool isEnabledStop(uint32_t bkpt_code);
- inline void EnableStop(uint32_t bkpt_code);
- inline void DisableStop(uint32_t bkpt_code);
- inline void IncreaseStopCounter(uint32_t bkpt_code);
- void PrintStopInfo(uint32_t code);
-
- // Read and write memory.
- inline uint8_t ReadBU(int32_t addr);
- inline int8_t ReadB(int32_t addr);
- inline void WriteB(int32_t addr, uint8_t value);
- inline void WriteB(int32_t addr, int8_t value);
-
- inline uint16_t ReadHU(int32_t addr, Instruction* instr);
- inline int16_t ReadH(int32_t addr, Instruction* instr);
- // Note: Overloaded on the sign of the value.
- inline void WriteH(int32_t addr, uint16_t value, Instruction* instr);
- inline void WriteH(int32_t addr, int16_t value, Instruction* instr);
-
- inline int ReadW(int32_t addr, Instruction* instr);
- inline void WriteW(int32_t addr, int value, Instruction* instr);
-
- int32_t* ReadDW(int32_t addr);
- void WriteDW(int32_t addr, int32_t value1, int32_t value2);
-
- // Executing is handled based on the instruction type.
- // Both type 0 and type 1 rolled into one.
- void DecodeType01(Instruction* instr);
- void DecodeType2(Instruction* instr);
- void DecodeType3(Instruction* instr);
- void DecodeType4(Instruction* instr);
- void DecodeType5(Instruction* instr);
- void DecodeType6(Instruction* instr);
- void DecodeType7(Instruction* instr);
-
- // Support for VFP.
- void DecodeTypeVFP(Instruction* instr);
- void DecodeType6CoprocessorIns(Instruction* instr);
-
- void DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(Instruction* instr);
- void DecodeVCMP(Instruction* instr);
- void DecodeVCVTBetweenDoubleAndSingle(Instruction* instr);
- void DecodeVCVTBetweenFloatingPointAndInteger(Instruction* instr);
-
- // Executes one instruction.
- void InstructionDecode(Instruction* instr);
-
- // ICache.
- static void CheckICache(v8::internal::HashMap* i_cache, Instruction* instr);
- static void FlushOnePage(v8::internal::HashMap* i_cache, intptr_t start,
- int size);
- static CachePage* GetCachePage(v8::internal::HashMap* i_cache, void* page);
-
- // Runtime call support.
- static void* RedirectExternalReference(
- void* external_function,
- v8::internal::ExternalReference::Type type);
-
- // For use in calls that take double value arguments.
- void GetFpArgs(double* x, double* y);
- void GetFpArgs(double* x);
- void GetFpArgs(double* x, int32_t* y);
- void SetFpResult(const double& result);
- void TrashCallerSaveRegisters();
-
- template<class ReturnType, int register_size>
- ReturnType GetFromVFPRegister(int reg_index);
-
- template<class InputType, int register_size>
- void SetVFPRegister(int reg_index, const InputType& value);
-
- void CallInternal(byte* entry);
-
- // Architecture state.
- // Saturating instructions require a Q flag to indicate saturation.
- // There is currently no way to read the CPSR directly, and thus read the Q
- // flag, so this is left unimplemented.
- int32_t registers_[16];
- bool n_flag_;
- bool z_flag_;
- bool c_flag_;
- bool v_flag_;
-
- // VFP architecture state.
- unsigned int vfp_registers_[num_d_registers * 2];
- bool n_flag_FPSCR_;
- bool z_flag_FPSCR_;
- bool c_flag_FPSCR_;
- bool v_flag_FPSCR_;
-
- // VFP rounding mode. See ARM DDI 0406B Page A2-29.
- VFPRoundingMode FPSCR_rounding_mode_;
-
- // VFP FP exception flags architecture state.
- bool inv_op_vfp_flag_;
- bool div_zero_vfp_flag_;
- bool overflow_vfp_flag_;
- bool underflow_vfp_flag_;
- bool inexact_vfp_flag_;
-
- // Simulator support.
- char* stack_;
- bool pc_modified_;
- int icount_;
-
- // Debugger input.
- char* last_debugger_input_;
-
- // Icache simulation
- v8::internal::HashMap* i_cache_;
-
- // Registered breakpoints.
- Instruction* break_pc_;
- Instr break_instr_;
-
- v8::internal::Isolate* isolate_;
-
- // A stop is watched if its code is less than kNumOfWatchedStops.
- // Only watched stops support enabling/disabling and the counter feature.
- static const uint32_t kNumOfWatchedStops = 256;
-
- // Breakpoint is disabled if bit 31 is set.
- static const uint32_t kStopDisabledBit = 1 << 31;
-
- // A stop is enabled, meaning the simulator will stop when meeting the
- // instruction, if bit 31 of watched_stops[code].count is unset.
- // The value watched_stops[code].count & ~(1 << 31) indicates how many times
- // the breakpoint was hit or gone through.
- struct StopCountAndDesc {
- uint32_t count;
- char* desc;
- };
- StopCountAndDesc watched_stops[kNumOfWatchedStops];
-};
-
-
-// When running with the simulator transition into simulated execution at this
-// point.
-#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
- reinterpret_cast<Object*>(Simulator::current(Isolate::Current())->Call( \
- FUNCTION_ADDR(entry), 5, p0, p1, p2, p3, p4))
-
-#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
- Simulator::current(Isolate::Current())->Call( \
- entry, 10, p0, p1, p2, p3, NULL, p4, p5, p6, p7, p8)
-
-#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
- try_catch_address == NULL ? \
- NULL : *(reinterpret_cast<TryCatch**>(try_catch_address))
-
-
-// The simulator has its own stack. Thus it has a different stack limit from
-// the C-based native code. Setting the c_limit to indicate a very small
-// stack cause stack overflow errors, since the simulator ignores the input.
-// This is unlikely to be an issue in practice, though it might cause testing
-// trouble down the line.
-class SimulatorStack : public v8::internal::AllStatic {
- public:
- static inline uintptr_t JsLimitFromCLimit(v8::internal::Isolate* isolate,
- uintptr_t c_limit) {
- return Simulator::current(isolate)->StackLimit();
- }
-
- static inline uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
- Simulator* sim = Simulator::current(Isolate::Current());
- return sim->PushAddress(try_catch_address);
- }
-
- static inline void UnregisterCTryCatch() {
- Simulator::current(Isolate::Current())->PopAddress();
- }
-};
-
-} } // namespace v8::internal
-
-#endif // !defined(USE_SIMULATOR)
-#endif // V8_ARM_SIMULATOR_ARM_H_
diff --git a/src/3rdparty/v8/src/arm/stub-cache-arm.cc b/src/3rdparty/v8/src/arm/stub-cache-arm.cc
deleted file mode 100644
index 03aa359..0000000
--- a/src/3rdparty/v8/src/arm/stub-cache-arm.cc
+++ /dev/null
@@ -1,4091 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_ARM)
-
-#include "ic-inl.h"
-#include "codegen.h"
-#include "stub-cache.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-
-static void ProbeTable(Isolate* isolate,
- MacroAssembler* masm,
- Code::Flags flags,
- StubCache::Table table,
- Register receiver,
- Register name,
- // Number of the cache entry, not scaled.
- Register offset,
- Register scratch,
- Register scratch2,
- Register offset_scratch) {
- ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
- ExternalReference value_offset(isolate->stub_cache()->value_reference(table));
- ExternalReference map_offset(isolate->stub_cache()->map_reference(table));
-
- uint32_t key_off_addr = reinterpret_cast<uint32_t>(key_offset.address());
- uint32_t value_off_addr = reinterpret_cast<uint32_t>(value_offset.address());
- uint32_t map_off_addr = reinterpret_cast<uint32_t>(map_offset.address());
-
- // Check the relative positions of the address fields.
- ASSERT(value_off_addr > key_off_addr);
- ASSERT((value_off_addr - key_off_addr) % 4 == 0);
- ASSERT((value_off_addr - key_off_addr) < (256 * 4));
- ASSERT(map_off_addr > key_off_addr);
- ASSERT((map_off_addr - key_off_addr) % 4 == 0);
- ASSERT((map_off_addr - key_off_addr) < (256 * 4));
-
- Label miss;
- Register base_addr = scratch;
- scratch = no_reg;
-
- // Multiply by 3 because there are 3 fields per entry (name, code, map).
- __ add(offset_scratch, offset, Operand(offset, LSL, 1));
-
- // Calculate the base address of the entry.
- __ mov(base_addr, Operand(key_offset));
- __ add(base_addr, base_addr, Operand(offset_scratch, LSL, kPointerSizeLog2));
-
- // Check that the key in the entry matches the name.
- __ ldr(ip, MemOperand(base_addr, 0));
- __ cmp(name, ip);
- __ b(ne, &miss);
-
- // Check the map matches.
- __ ldr(ip, MemOperand(base_addr, map_off_addr - key_off_addr));
- __ ldr(scratch2, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ cmp(ip, scratch2);
- __ b(ne, &miss);
-
- // Get the code entry from the cache.
- Register code = scratch2;
- scratch2 = no_reg;
- __ ldr(code, MemOperand(base_addr, value_off_addr - key_off_addr));
-
- // Check that the flags match what we're looking for.
- Register flags_reg = base_addr;
- base_addr = no_reg;
- __ ldr(flags_reg, FieldMemOperand(code, Code::kFlagsOffset));
- // It's a nice optimization if this constant is encodable in the bic insn.
-
- uint32_t mask = Code::kFlagsNotUsedInLookup;
- ASSERT(__ ImmediateFitsAddrMode1Instruction(mask));
- __ bic(flags_reg, flags_reg, Operand(mask));
- // Using cmn and the negative instead of cmp means we can use movw.
- if (flags < 0) {
- __ cmn(flags_reg, Operand(-flags));
- } else {
- __ cmp(flags_reg, Operand(flags));
- }
- __ b(ne, &miss);
-
-#ifdef DEBUG
- if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
- __ jmp(&miss);
- } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
- __ jmp(&miss);
- }
-#endif
-
- // Jump to the first instruction in the code stub.
- __ add(pc, code, Operand(Code::kHeaderSize - kHeapObjectTag));
-
- // Miss: fall through.
- __ bind(&miss);
-}
-
-
-// Helper function used to check that the dictionary doesn't contain
-// the property. This function may return false negatives, so miss_label
-// must always call a backup property check that is complete.
-// This function is safe to call if the receiver has fast properties.
-// Name must be internalized and receiver must be a heap object.
-static void GenerateDictionaryNegativeLookup(MacroAssembler* masm,
- Label* miss_label,
- Register receiver,
- Handle<String> name,
- Register scratch0,
- Register scratch1) {
- ASSERT(name->IsInternalizedString());
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->negative_lookups(), 1, scratch0, scratch1);
- __ IncrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
-
- Label done;
-
- const int kInterceptorOrAccessCheckNeededMask =
- (1 << Map::kHasNamedInterceptor) | (1 << Map::kIsAccessCheckNeeded);
-
- // Bail out if the receiver has a named interceptor or requires access checks.
- Register map = scratch1;
- __ ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ ldrb(scratch0, FieldMemOperand(map, Map::kBitFieldOffset));
- __ tst(scratch0, Operand(kInterceptorOrAccessCheckNeededMask));
- __ b(ne, miss_label);
-
- // Check that receiver is a JSObject.
- __ ldrb(scratch0, FieldMemOperand(map, Map::kInstanceTypeOffset));
- __ cmp(scratch0, Operand(FIRST_SPEC_OBJECT_TYPE));
- __ b(lt, miss_label);
-
- // Load properties array.
- Register properties = scratch0;
- __ ldr(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
- // Check that the properties array is a dictionary.
- __ ldr(map, FieldMemOperand(properties, HeapObject::kMapOffset));
- Register tmp = properties;
- __ LoadRoot(tmp, Heap::kHashTableMapRootIndex);
- __ cmp(map, tmp);
- __ b(ne, miss_label);
-
- // Restore the temporarily used register.
- __ ldr(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
-
-
- StringDictionaryLookupStub::GenerateNegativeLookup(masm,
- miss_label,
- &done,
- receiver,
- properties,
- name,
- scratch1);
- __ bind(&done);
- __ DecrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
-}
-
-
-void StubCache::GenerateProbe(MacroAssembler* masm,
- Code::Flags flags,
- Register receiver,
- Register name,
- Register scratch,
- Register extra,
- Register extra2,
- Register extra3) {
- Isolate* isolate = masm->isolate();
- Label miss;
-
- // Make sure that code is valid. The multiplying code relies on the
- // entry size being 12.
- ASSERT(sizeof(Entry) == 12);
-
- // Make sure the flags does not name a specific type.
- ASSERT(Code::ExtractTypeFromFlags(flags) == 0);
-
- // Make sure that there are no register conflicts.
- ASSERT(!scratch.is(receiver));
- ASSERT(!scratch.is(name));
- ASSERT(!extra.is(receiver));
- ASSERT(!extra.is(name));
- ASSERT(!extra.is(scratch));
- ASSERT(!extra2.is(receiver));
- ASSERT(!extra2.is(name));
- ASSERT(!extra2.is(scratch));
- ASSERT(!extra2.is(extra));
-
- // Check scratch, extra and extra2 registers are valid.
- ASSERT(!scratch.is(no_reg));
- ASSERT(!extra.is(no_reg));
- ASSERT(!extra2.is(no_reg));
- ASSERT(!extra3.is(no_reg));
-
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1,
- extra2, extra3);
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, &miss);
-
- // Get the map of the receiver and compute the hash.
- __ ldr(scratch, FieldMemOperand(name, String::kHashFieldOffset));
- __ ldr(ip, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ add(scratch, scratch, Operand(ip));
- uint32_t mask = kPrimaryTableSize - 1;
- // We shift out the last two bits because they are not part of the hash and
- // they are always 01 for maps.
- __ mov(scratch, Operand(scratch, LSR, kHeapObjectTagSize));
- // Mask down the eor argument to the minimum to keep the immediate
- // ARM-encodable.
- __ eor(scratch, scratch, Operand((flags >> kHeapObjectTagSize) & mask));
- // Prefer and_ to ubfx here because ubfx takes 2 cycles.
- __ and_(scratch, scratch, Operand(mask));
-
- // Probe the primary table.
- ProbeTable(isolate,
- masm,
- flags,
- kPrimary,
- receiver,
- name,
- scratch,
- extra,
- extra2,
- extra3);
-
- // Primary miss: Compute hash for secondary probe.
- __ sub(scratch, scratch, Operand(name, LSR, kHeapObjectTagSize));
- uint32_t mask2 = kSecondaryTableSize - 1;
- __ add(scratch, scratch, Operand((flags >> kHeapObjectTagSize) & mask2));
- __ and_(scratch, scratch, Operand(mask2));
-
- // Probe the secondary table.
- ProbeTable(isolate,
- masm,
- flags,
- kSecondary,
- receiver,
- name,
- scratch,
- extra,
- extra2,
- extra3);
-
- // Cache miss: Fall-through and let caller handle the miss by
- // entering the runtime system.
- __ bind(&miss);
- __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1,
- extra2, extra3);
-}
-
-
-void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm,
- int index,
- Register prototype) {
- // Load the global or builtins object from the current context.
- __ ldr(prototype,
- MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- // Load the native context from the global or builtins object.
- __ ldr(prototype,
- FieldMemOperand(prototype, GlobalObject::kNativeContextOffset));
- // Load the function from the native context.
- __ ldr(prototype, MemOperand(prototype, Context::SlotOffset(index)));
- // Load the initial map. The global functions all have initial maps.
- __ ldr(prototype,
- FieldMemOperand(prototype, JSFunction::kPrototypeOrInitialMapOffset));
- // Load the prototype from the initial map.
- __ ldr(prototype, FieldMemOperand(prototype, Map::kPrototypeOffset));
-}
-
-
-void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
- MacroAssembler* masm,
- int index,
- Register prototype,
- Label* miss) {
- Isolate* isolate = masm->isolate();
- // Check we're still in the same context.
- __ ldr(prototype,
- MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ Move(ip, isolate->global_object());
- __ cmp(prototype, ip);
- __ b(ne, miss);
- // Get the global function with the given index.
- Handle<JSFunction> function(
- JSFunction::cast(isolate->native_context()->get(index)));
- // Load its initial map. The global functions all have initial maps.
- __ Move(prototype, Handle<Map>(function->initial_map()));
- // Load the prototype from the initial map.
- __ ldr(prototype, FieldMemOperand(prototype, Map::kPrototypeOffset));
-}
-
-
-// Load a fast property out of a holder object (src). In-object properties
-// are loaded directly otherwise the property is loaded from the properties
-// fixed array.
-void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
- Register dst,
- Register src,
- Handle<JSObject> holder,
- PropertyIndex index) {
- DoGenerateFastPropertyLoad(
- masm, dst, src, index.is_inobject(holder), index.translate(holder));
-}
-
-
-void StubCompiler::DoGenerateFastPropertyLoad(MacroAssembler* masm,
- Register dst,
- Register src,
- bool inobject,
- int index) {
- int offset = index * kPointerSize;
- if (!inobject) {
- // Calculate the offset into the properties array.
- offset = offset + FixedArray::kHeaderSize;
- __ ldr(dst, FieldMemOperand(src, JSObject::kPropertiesOffset));
- src = dst;
- }
- __ ldr(dst, FieldMemOperand(src, offset));
-}
-
-
-void StubCompiler::GenerateLoadArrayLength(MacroAssembler* masm,
- Register receiver,
- Register scratch,
- Label* miss_label) {
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, miss_label);
-
- // Check that the object is a JS array.
- __ CompareObjectType(receiver, scratch, scratch, JS_ARRAY_TYPE);
- __ b(ne, miss_label);
-
- // Load length directly from the JS array.
- __ ldr(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
- __ Ret();
-}
-
-
-// Generate code to check if an object is a string. If the object is a
-// heap object, its map's instance type is left in the scratch1 register.
-// If this is not needed, scratch1 and scratch2 may be the same register.
-static void GenerateStringCheck(MacroAssembler* masm,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Label* smi,
- Label* non_string_object) {
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, smi);
-
- // Check that the object is a string.
- __ ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
- __ and_(scratch2, scratch1, Operand(kIsNotStringMask));
- // The cast is to resolve the overload for the argument of 0x0.
- __ cmp(scratch2, Operand(static_cast<int32_t>(kStringTag)));
- __ b(ne, non_string_object);
-}
-
-
-// Generate code to load the length from a string object and return the length.
-// If the receiver object is not a string or a wrapped string object the
-// execution continues at the miss label. The register containing the
-// receiver is potentially clobbered.
-void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Label* miss,
- bool support_wrappers) {
- Label check_wrapper;
-
- // Check if the object is a string leaving the instance type in the
- // scratch1 register.
- GenerateStringCheck(masm, receiver, scratch1, scratch2, miss,
- support_wrappers ? &check_wrapper : miss);
-
- // Load length directly from the string.
- __ ldr(r0, FieldMemOperand(receiver, String::kLengthOffset));
- __ Ret();
-
- if (support_wrappers) {
- // Check if the object is a JSValue wrapper.
- __ bind(&check_wrapper);
- __ cmp(scratch1, Operand(JS_VALUE_TYPE));
- __ b(ne, miss);
-
- // Unwrap the value and check if the wrapped value is a string.
- __ ldr(scratch1, FieldMemOperand(receiver, JSValue::kValueOffset));
- GenerateStringCheck(masm, scratch1, scratch2, scratch2, miss, miss);
- __ ldr(r0, FieldMemOperand(scratch1, String::kLengthOffset));
- __ Ret();
- }
-}
-
-
-void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Label* miss_label) {
- __ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label);
- __ mov(r0, scratch1);
- __ Ret();
-}
-
-
-// Generate StoreField code, value is passed in r0 register.
-// When leaving generated code after success, the receiver_reg and name_reg
-// may be clobbered. Upon branch to miss_label, the receiver and name
-// registers have their original values.
-void StubCompiler::GenerateStoreField(MacroAssembler* masm,
- Handle<JSObject> object,
- int index,
- Handle<Map> transition,
- Handle<String> name,
- Register receiver_reg,
- Register name_reg,
- Register scratch1,
- Register scratch2,
- Label* miss_label) {
- // r0 : value
- Label exit;
-
- LookupResult lookup(masm->isolate());
- object->Lookup(*name, &lookup);
- if (lookup.IsFound() && (lookup.IsReadOnly() || !lookup.IsCacheable())) {
- // In sloppy mode, we could just return the value and be done. However, we
- // might be in strict mode, where we have to throw. Since we cannot tell,
- // go into slow case unconditionally.
- __ jmp(miss_label);
- return;
- }
-
- // Check that the map of the object hasn't changed.
- CompareMapMode mode = transition.is_null() ? ALLOW_ELEMENT_TRANSITION_MAPS
- : REQUIRE_EXACT_MAP;
- __ CheckMap(receiver_reg, scratch1, Handle<Map>(object->map()), miss_label,
- DO_SMI_CHECK, mode);
-
- // Perform global security token check if needed.
- if (object->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(receiver_reg, scratch1, miss_label);
- }
-
- // Check that we are allowed to write this.
- if (!transition.is_null() && object->GetPrototype()->IsJSObject()) {
- JSObject* holder;
- if (lookup.IsFound()) {
- holder = lookup.holder();
- } else {
- // Find the top object.
- holder = *object;
- do {
- holder = JSObject::cast(holder->GetPrototype());
- } while (holder->GetPrototype()->IsJSObject());
- }
- // We need an extra register, push
- __ push(name_reg);
- Label miss_pop, done_check;
- CheckPrototypes(object, receiver_reg, Handle<JSObject>(holder), name_reg,
- scratch1, scratch2, name, &miss_pop);
- __ jmp(&done_check);
- __ bind(&miss_pop);
- __ pop(name_reg);
- __ jmp(miss_label);
- __ bind(&done_check);
- __ pop(name_reg);
- }
-
- // Stub never generated for non-global objects that require access
- // checks.
- ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
-
- // Perform map transition for the receiver if necessary.
- if (!transition.is_null() && (object->map()->unused_property_fields() == 0)) {
- // The properties must be extended before we can store the value.
- // We jump to a runtime call that extends the properties array.
- __ push(receiver_reg);
- __ mov(r2, Operand(transition));
- __ Push(r2, r0);
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage),
- masm->isolate()),
- 3,
- 1);
- return;
- }
-
- if (!transition.is_null()) {
- // Update the map of the object.
- __ mov(scratch1, Operand(transition));
- __ str(scratch1, FieldMemOperand(receiver_reg, HeapObject::kMapOffset));
-
- // Update the write barrier for the map field and pass the now unused
- // name_reg as scratch register.
- __ RecordWriteField(receiver_reg,
- HeapObject::kMapOffset,
- scratch1,
- name_reg,
- kLRHasNotBeenSaved,
- kDontSaveFPRegs,
- OMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- }
-
- // Adjust for the number of properties stored in the object. Even in the
- // face of a transition we can use the old map here because the size of the
- // object and the number of in-object properties is not going to change.
- index -= object->map()->inobject_properties();
-
- if (index < 0) {
- // Set the property straight into the object.
- int offset = object->map()->instance_size() + (index * kPointerSize);
- __ str(r0, FieldMemOperand(receiver_reg, offset));
-
- // Skip updating write barrier if storing a smi.
- __ JumpIfSmi(r0, &exit);
-
- // Update the write barrier for the array address.
- // Pass the now unused name_reg as a scratch register.
- __ mov(name_reg, r0);
- __ RecordWriteField(receiver_reg,
- offset,
- name_reg,
- scratch1,
- kLRHasNotBeenSaved,
- kDontSaveFPRegs);
- } else {
- // Write to the properties array.
- int offset = index * kPointerSize + FixedArray::kHeaderSize;
- // Get the properties array
- __ ldr(scratch1,
- FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
- __ str(r0, FieldMemOperand(scratch1, offset));
-
- // Skip updating write barrier if storing a smi.
- __ JumpIfSmi(r0, &exit);
-
- // Update the write barrier for the array address.
- // Ok to clobber receiver_reg and name_reg, since we return.
- __ mov(name_reg, r0);
- __ RecordWriteField(scratch1,
- offset,
- name_reg,
- receiver_reg,
- kLRHasNotBeenSaved,
- kDontSaveFPRegs);
- }
-
- // Return the value (register r0).
- __ bind(&exit);
- __ Ret();
-}
-
-
-void StubCompiler::GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind) {
- ASSERT(kind == Code::LOAD_IC || kind == Code::KEYED_LOAD_IC);
- Handle<Code> code = (kind == Code::LOAD_IC)
- ? masm->isolate()->builtins()->LoadIC_Miss()
- : masm->isolate()->builtins()->KeyedLoadIC_Miss();
- __ Jump(code, RelocInfo::CODE_TARGET);
-}
-
-
-void StubCompiler::GenerateStoreMiss(MacroAssembler* masm, Code::Kind kind) {
- ASSERT(kind == Code::STORE_IC || kind == Code::KEYED_STORE_IC);
- Handle<Code> code = (kind == Code::STORE_IC)
- ? masm->isolate()->builtins()->StoreIC_Miss()
- : masm->isolate()->builtins()->KeyedStoreIC_Miss();
- __ Jump(code, RelocInfo::CODE_TARGET);
-}
-
-
-static void GenerateCallFunction(MacroAssembler* masm,
- Handle<Object> object,
- const ParameterCount& arguments,
- Label* miss,
- Code::ExtraICState extra_ic_state) {
- // ----------- S t a t e -------------
- // -- r0: receiver
- // -- r1: function to call
- // -----------------------------------
-
- // Check that the function really is a function.
- __ JumpIfSmi(r1, miss);
- __ CompareObjectType(r1, r3, r3, JS_FUNCTION_TYPE);
- __ b(ne, miss);
-
- // Patch the receiver on the stack with the global proxy if
- // necessary.
- if (object->IsGlobalObject()) {
- __ ldr(r3, FieldMemOperand(r0, GlobalObject::kGlobalReceiverOffset));
- __ str(r3, MemOperand(sp, arguments.immediate() * kPointerSize));
- }
-
- // Invoke the function.
- CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state)
- ? CALL_AS_FUNCTION
- : CALL_AS_METHOD;
- __ InvokeFunction(r1, arguments, JUMP_FUNCTION, NullCallWrapper(), call_kind);
-}
-
-
-static void PushInterceptorArguments(MacroAssembler* masm,
- Register receiver,
- Register holder,
- Register name,
- Handle<JSObject> holder_obj) {
- __ push(name);
- Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor());
- ASSERT(!masm->isolate()->heap()->InNewSpace(*interceptor));
- Register scratch = name;
- __ mov(scratch, Operand(interceptor));
- __ push(scratch);
- __ push(receiver);
- __ push(holder);
- __ ldr(scratch, FieldMemOperand(scratch, InterceptorInfo::kDataOffset));
- __ push(scratch);
- __ mov(scratch, Operand(ExternalReference::isolate_address()));
- __ push(scratch);
-}
-
-
-static void CompileCallLoadPropertyWithInterceptor(
- MacroAssembler* masm,
- Register receiver,
- Register holder,
- Register name,
- Handle<JSObject> holder_obj) {
- PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
-
- ExternalReference ref =
- ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorOnly),
- masm->isolate());
- __ mov(r0, Operand(6));
- __ mov(r1, Operand(ref));
-
- CEntryStub stub(1);
- __ CallStub(&stub);
-}
-
-
-static const int kFastApiCallArguments = 4;
-
-// Reserves space for the extra arguments to API function in the
-// caller's frame.
-//
-// These arguments are set by CheckPrototypes and GenerateFastApiDirectCall.
-static void ReserveSpaceForFastApiCall(MacroAssembler* masm,
- Register scratch) {
- __ mov(scratch, Operand(Smi::FromInt(0)));
- for (int i = 0; i < kFastApiCallArguments; i++) {
- __ push(scratch);
- }
-}
-
-
-// Undoes the effects of ReserveSpaceForFastApiCall.
-static void FreeSpaceForFastApiCall(MacroAssembler* masm) {
- __ Drop(kFastApiCallArguments);
-}
-
-
-static void GenerateFastApiDirectCall(MacroAssembler* masm,
- const CallOptimization& optimization,
- int argc) {
- // ----------- S t a t e -------------
- // -- sp[0] : holder (set by CheckPrototypes)
- // -- sp[4] : callee JS function
- // -- sp[8] : call data
- // -- sp[12] : isolate
- // -- sp[16] : last JS argument
- // -- ...
- // -- sp[(argc + 3) * 4] : first JS argument
- // -- sp[(argc + 4) * 4] : receiver
- // -----------------------------------
- // Get the function and setup the context.
- Handle<JSFunction> function = optimization.constant_function();
- __ LoadHeapObject(r5, function);
- __ ldr(cp, FieldMemOperand(r5, JSFunction::kContextOffset));
-
- // Pass the additional arguments.
- Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
- Handle<Object> call_data(api_call_info->data(), masm->isolate());
- if (masm->isolate()->heap()->InNewSpace(*call_data)) {
- __ Move(r0, api_call_info);
- __ ldr(r6, FieldMemOperand(r0, CallHandlerInfo::kDataOffset));
- } else {
- __ Move(r6, call_data);
- }
- __ mov(r7, Operand(ExternalReference::isolate_address()));
- // Store JS function, call data and isolate.
- __ stm(ib, sp, r5.bit() | r6.bit() | r7.bit());
-
- // Prepare arguments.
- __ add(r2, sp, Operand(3 * kPointerSize));
-
- // Allocate the v8::Arguments structure in the arguments' space since
- // it's not controlled by GC.
- const int kApiStackSpace = 4;
-
- FrameScope frame_scope(masm, StackFrame::MANUAL);
- __ EnterExitFrame(false, kApiStackSpace);
-
- // r0 = v8::Arguments&
- // Arguments is after the return address.
- __ add(r0, sp, Operand(1 * kPointerSize));
- // v8::Arguments::implicit_args_
- __ str(r2, MemOperand(r0, 0 * kPointerSize));
- // v8::Arguments::values_
- __ add(ip, r2, Operand(argc * kPointerSize));
- __ str(ip, MemOperand(r0, 1 * kPointerSize));
- // v8::Arguments::length_ = argc
- __ mov(ip, Operand(argc));
- __ str(ip, MemOperand(r0, 2 * kPointerSize));
- // v8::Arguments::is_construct_call = 0
- __ mov(ip, Operand::Zero());
- __ str(ip, MemOperand(r0, 3 * kPointerSize));
-
- const int kStackUnwindSpace = argc + kFastApiCallArguments + 1;
- Address function_address = v8::ToCData<Address>(api_call_info->callback());
- ApiFunction fun(function_address);
- ExternalReference ref = ExternalReference(&fun,
- ExternalReference::DIRECT_API_CALL,
- masm->isolate());
- AllowExternalCallThatCantCauseGC scope(masm);
-
- __ CallApiFunctionAndReturn(ref, kStackUnwindSpace);
-}
-
-
-class CallInterceptorCompiler BASE_EMBEDDED {
- public:
- CallInterceptorCompiler(StubCompiler* stub_compiler,
- const ParameterCount& arguments,
- Register name,
- Code::ExtraICState extra_ic_state)
- : stub_compiler_(stub_compiler),
- arguments_(arguments),
- name_(name),
- extra_ic_state_(extra_ic_state) {}
-
- void Compile(MacroAssembler* masm,
- Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<String> name,
- LookupResult* lookup,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* miss) {
- ASSERT(holder->HasNamedInterceptor());
- ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, miss);
- CallOptimization optimization(lookup);
- if (optimization.is_constant_call()) {
- CompileCacheable(masm, object, receiver, scratch1, scratch2, scratch3,
- holder, lookup, name, optimization, miss);
- } else {
- CompileRegular(masm, object, receiver, scratch1, scratch2, scratch3,
- name, holder, miss);
- }
- }
-
- private:
- void CompileCacheable(MacroAssembler* masm,
- Handle<JSObject> object,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Handle<JSObject> interceptor_holder,
- LookupResult* lookup,
- Handle<String> name,
- const CallOptimization& optimization,
- Label* miss_label) {
- ASSERT(optimization.is_constant_call());
- ASSERT(!lookup->holder()->IsGlobalObject());
- Counters* counters = masm->isolate()->counters();
- int depth1 = kInvalidProtoDepth;
- int depth2 = kInvalidProtoDepth;
- bool can_do_fast_api_call = false;
- if (optimization.is_simple_api_call() &&
- !lookup->holder()->IsGlobalObject()) {
- depth1 = optimization.GetPrototypeDepthOfExpectedType(
- object, interceptor_holder);
- if (depth1 == kInvalidProtoDepth) {
- depth2 = optimization.GetPrototypeDepthOfExpectedType(
- interceptor_holder, Handle<JSObject>(lookup->holder()));
- }
- can_do_fast_api_call =
- depth1 != kInvalidProtoDepth || depth2 != kInvalidProtoDepth;
- }
-
- __ IncrementCounter(counters->call_const_interceptor(), 1,
- scratch1, scratch2);
-
- if (can_do_fast_api_call) {
- __ IncrementCounter(counters->call_const_interceptor_fast_api(), 1,
- scratch1, scratch2);
- ReserveSpaceForFastApiCall(masm, scratch1);
- }
-
- // Check that the maps from receiver to interceptor's holder
- // haven't changed and thus we can invoke interceptor.
- Label miss_cleanup;
- Label* miss = can_do_fast_api_call ? &miss_cleanup : miss_label;
- Register holder =
- stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
- scratch1, scratch2, scratch3,
- name, depth1, miss);
-
- // Invoke an interceptor and if it provides a value,
- // branch to |regular_invoke|.
- Label regular_invoke;
- LoadWithInterceptor(masm, receiver, holder, interceptor_holder, scratch2,
- &regular_invoke);
-
- // Interceptor returned nothing for this property. Try to use cached
- // constant function.
-
- // Check that the maps from interceptor's holder to constant function's
- // holder haven't changed and thus we can use cached constant function.
- if (*interceptor_holder != lookup->holder()) {
- stub_compiler_->CheckPrototypes(interceptor_holder, receiver,
- Handle<JSObject>(lookup->holder()),
- scratch1, scratch2, scratch3,
- name, depth2, miss);
- } else {
- // CheckPrototypes has a side effect of fetching a 'holder'
- // for API (object which is instanceof for the signature). It's
- // safe to omit it here, as if present, it should be fetched
- // by the previous CheckPrototypes.
- ASSERT(depth2 == kInvalidProtoDepth);
- }
-
- // Invoke function.
- if (can_do_fast_api_call) {
- GenerateFastApiDirectCall(masm, optimization, arguments_.immediate());
- } else {
- CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
- ? CALL_AS_FUNCTION
- : CALL_AS_METHOD;
- __ InvokeFunction(optimization.constant_function(), arguments_,
- JUMP_FUNCTION, NullCallWrapper(), call_kind);
- }
-
- // Deferred code for fast API call case---clean preallocated space.
- if (can_do_fast_api_call) {
- __ bind(&miss_cleanup);
- FreeSpaceForFastApiCall(masm);
- __ b(miss_label);
- }
-
- // Invoke a regular function.
- __ bind(&regular_invoke);
- if (can_do_fast_api_call) {
- FreeSpaceForFastApiCall(masm);
- }
- }
-
- void CompileRegular(MacroAssembler* masm,
- Handle<JSObject> object,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Handle<String> name,
- Handle<JSObject> interceptor_holder,
- Label* miss_label) {
- Register holder =
- stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
- scratch1, scratch2, scratch3,
- name, miss_label);
-
- // Call a runtime function to load the interceptor property.
- FrameScope scope(masm, StackFrame::INTERNAL);
- // Save the name_ register across the call.
- __ push(name_);
- PushInterceptorArguments(masm, receiver, holder, name_, interceptor_holder);
- __ CallExternalReference(
- ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForCall),
- masm->isolate()),
- 6);
- // Restore the name_ register.
- __ pop(name_);
- // Leave the internal frame.
- }
-
- void LoadWithInterceptor(MacroAssembler* masm,
- Register receiver,
- Register holder,
- Handle<JSObject> holder_obj,
- Register scratch,
- Label* interceptor_succeeded) {
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(holder, name_);
- CompileCallLoadPropertyWithInterceptor(masm,
- receiver,
- holder,
- name_,
- holder_obj);
- __ pop(name_); // Restore the name.
- __ pop(receiver); // Restore the holder.
- }
- // If interceptor returns no-result sentinel, call the constant function.
- __ LoadRoot(scratch, Heap::kNoInterceptorResultSentinelRootIndex);
- __ cmp(r0, scratch);
- __ b(ne, interceptor_succeeded);
- }
-
- StubCompiler* stub_compiler_;
- const ParameterCount& arguments_;
- Register name_;
- Code::ExtraICState extra_ic_state_;
-};
-
-
-// Generate code to check that a global property cell is empty. Create
-// the property cell at compilation time if no cell exists for the
-// property.
-static void GenerateCheckPropertyCell(MacroAssembler* masm,
- Handle<GlobalObject> global,
- Handle<String> name,
- Register scratch,
- Label* miss) {
- Handle<JSGlobalPropertyCell> cell =
- GlobalObject::EnsurePropertyCell(global, name);
- ASSERT(cell->value()->IsTheHole());
- __ mov(scratch, Operand(cell));
- __ ldr(scratch,
- FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ cmp(scratch, ip);
- __ b(ne, miss);
-}
-
-
-// Calls GenerateCheckPropertyCell for each global object in the prototype chain
-// from object to (but not including) holder.
-static void GenerateCheckPropertyCells(MacroAssembler* masm,
- Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<String> name,
- Register scratch,
- Label* miss) {
- Handle<JSObject> current = object;
- while (!current.is_identical_to(holder)) {
- if (current->IsGlobalObject()) {
- GenerateCheckPropertyCell(masm,
- Handle<GlobalObject>::cast(current),
- name,
- scratch,
- miss);
- }
- current = Handle<JSObject>(JSObject::cast(current->GetPrototype()));
- }
-}
-
-
-// Convert and store int passed in register ival to IEEE 754 single precision
-// floating point value at memory location (dst + 4 * wordoffset)
-// If VFP3 is available use it for conversion.
-static void StoreIntAsFloat(MacroAssembler* masm,
- Register dst,
- Register wordoffset,
- Register ival,
- Register fval,
- Register scratch1,
- Register scratch2) {
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatures::Scope scope(VFP2);
- __ vmov(s0, ival);
- __ add(scratch1, dst, Operand(wordoffset, LSL, 2));
- __ vcvt_f32_s32(s0, s0);
- __ vstr(s0, scratch1, 0);
- } else {
- Label not_special, done;
- // Move sign bit from source to destination. This works because the sign
- // bit in the exponent word of the double has the same position and polarity
- // as the 2's complement sign bit in a Smi.
- ASSERT(kBinary32SignMask == 0x80000000u);
-
- __ and_(fval, ival, Operand(kBinary32SignMask), SetCC);
- // Negate value if it is negative.
- __ rsb(ival, ival, Operand::Zero(), LeaveCC, ne);
-
- // We have -1, 0 or 1, which we treat specially. Register ival contains
- // absolute value: it is either equal to 1 (special case of -1 and 1),
- // greater than 1 (not a special case) or less than 1 (special case of 0).
- __ cmp(ival, Operand(1));
- __ b(gt, &not_special);
-
- // For 1 or -1 we need to or in the 0 exponent (biased).
- static const uint32_t exponent_word_for_1 =
- kBinary32ExponentBias << kBinary32ExponentShift;
-
- __ orr(fval, fval, Operand(exponent_word_for_1), LeaveCC, eq);
- __ b(&done);
-
- __ bind(&not_special);
- // Count leading zeros.
- // Gets the wrong answer for 0, but we already checked for that case above.
- Register zeros = scratch2;
- __ CountLeadingZeros(zeros, ival, scratch1);
-
- // Compute exponent and or it into the exponent register.
- __ rsb(scratch1,
- zeros,
- Operand((kBitsPerInt - 1) + kBinary32ExponentBias));
-
- __ orr(fval,
- fval,
- Operand(scratch1, LSL, kBinary32ExponentShift));
-
- // Shift up the source chopping the top bit off.
- __ add(zeros, zeros, Operand(1));
- // This wouldn't work for 1 and -1 as the shift would be 32 which means 0.
- __ mov(ival, Operand(ival, LSL, zeros));
- // And the top (top 20 bits).
- __ orr(fval,
- fval,
- Operand(ival, LSR, kBitsPerInt - kBinary32MantissaBits));
-
- __ bind(&done);
- __ str(fval, MemOperand(dst, wordoffset, LSL, 2));
- }
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm())
-
-
-void StubCompiler::GenerateTailCall(Handle<Code> code) {
- __ Jump(code, RelocInfo::CODE_TARGET);
-}
-
-
-Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
- Register object_reg,
- Handle<JSObject> holder,
- Register holder_reg,
- Register scratch1,
- Register scratch2,
- Handle<String> name,
- int save_at_depth,
- Label* miss,
- PrototypeCheckType check) {
- Handle<JSObject> first = object;
- // Make sure there's no overlap between holder and object registers.
- ASSERT(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
- ASSERT(!scratch2.is(object_reg) && !scratch2.is(holder_reg)
- && !scratch2.is(scratch1));
-
- // Keep track of the current object in register reg.
- Register reg = object_reg;
- int depth = 0;
-
- if (save_at_depth == depth) {
- __ str(reg, MemOperand(sp));
- }
-
- // Check the maps in the prototype chain.
- // Traverse the prototype chain from the object and do map checks.
- Handle<JSObject> current = object;
- while (!current.is_identical_to(holder)) {
- ++depth;
-
- // Only global objects and objects that do not require access
- // checks are allowed in stubs.
- ASSERT(current->IsJSGlobalProxy() || !current->IsAccessCheckNeeded());
-
- Handle<JSObject> prototype(JSObject::cast(current->GetPrototype()));
- if (!current->HasFastProperties() &&
- !current->IsJSGlobalObject() &&
- !current->IsJSGlobalProxy()) {
- if (!name->IsInternalizedString()) {
- name = factory()->InternalizeString(name);
- }
- ASSERT(current->property_dictionary()->FindEntry(*name) ==
- StringDictionary::kNotFound);
-
- GenerateDictionaryNegativeLookup(masm(), miss, reg, name,
- scratch1, scratch2);
-
- __ ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
- reg = holder_reg; // From now on the object will be in holder_reg.
- __ ldr(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
- } else {
- Register map_reg = scratch1;
- if (!current.is_identical_to(first) || check == CHECK_ALL_MAPS) {
- Handle<Map> current_map(current->map());
- // CheckMap implicitly loads the map of |reg| into |map_reg|.
- __ CheckMap(reg, map_reg, current_map, miss, DONT_DO_SMI_CHECK,
- ALLOW_ELEMENT_TRANSITION_MAPS);
- } else {
- __ ldr(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
- }
-
- // Check access rights to the global object. This has to happen after
- // the map check so that we know that the object is actually a global
- // object.
- if (current->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(reg, scratch2, miss);
- }
- reg = holder_reg; // From now on the object will be in holder_reg.
-
- if (heap()->InNewSpace(*prototype)) {
- // The prototype is in new space; we cannot store a reference to it
- // in the code. Load it from the map.
- __ ldr(reg, FieldMemOperand(map_reg, Map::kPrototypeOffset));
- } else {
- // The prototype is in old space; load it directly.
- __ mov(reg, Operand(prototype));
- }
- }
-
- if (save_at_depth == depth) {
- __ str(reg, MemOperand(sp));
- }
-
- // Go to the next object in the prototype chain.
- current = prototype;
- }
-
- // Log the check depth.
- LOG(masm()->isolate(), IntEvent("check-maps-depth", depth + 1));
-
- if (!holder.is_identical_to(first) || check == CHECK_ALL_MAPS) {
- // Check the holder map.
- __ CheckMap(reg, scratch1, Handle<Map>(holder->map()), miss,
- DONT_DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
- }
-
- // Perform security check for access to the global object.
- ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
- if (holder->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(reg, scratch1, miss);
- }
-
- // If we've skipped any global objects, it's not enough to verify that
- // their maps haven't changed. We also need to check that the property
- // cell for the property is still empty.
- GenerateCheckPropertyCells(masm(), object, holder, name, scratch1, miss);
-
- // Return the register containing the holder.
- return reg;
-}
-
-
-void BaseLoadStubCompiler::HandlerFrontendFooter(Label* success,
- Label* miss) {
- if (!miss->is_unused()) {
- __ b(success);
- __ bind(miss);
- GenerateLoadMiss(masm(), kind());
- }
-}
-
-
-Register BaseLoadStubCompiler::CallbackHandlerFrontend(
- Handle<JSObject> object,
- Register object_reg,
- Handle<JSObject> holder,
- Handle<String> name,
- Label* success,
- Handle<ExecutableAccessorInfo> callback) {
- Label miss;
-
- Register reg = HandlerFrontendHeader(object, object_reg, holder, name, &miss);
-
- if (!holder->HasFastProperties() && !holder->IsJSGlobalObject()) {
- ASSERT(!reg.is(scratch2()));
- ASSERT(!reg.is(scratch3()));
- ASSERT(!reg.is(scratch4()));
-
- // Load the properties dictionary.
- Register dictionary = scratch4();
- __ ldr(dictionary, FieldMemOperand(reg, JSObject::kPropertiesOffset));
-
- // Probe the dictionary.
- Label probe_done;
- StringDictionaryLookupStub::GeneratePositiveLookup(masm(),
- &miss,
- &probe_done,
- dictionary,
- this->name(),
- scratch2(),
- scratch3());
- __ bind(&probe_done);
-
- // If probing finds an entry in the dictionary, scratch3 contains the
- // pointer into the dictionary. Check that the value is the callback.
- Register pointer = scratch3();
- const int kElementsStartOffset = StringDictionary::kHeaderSize +
- StringDictionary::kElementsStartIndex * kPointerSize;
- const int kValueOffset = kElementsStartOffset + kPointerSize;
- __ ldr(scratch2(), FieldMemOperand(pointer, kValueOffset));
- __ cmp(scratch2(), Operand(callback));
- __ b(ne, &miss);
- }
-
- HandlerFrontendFooter(success, &miss);
- return reg;
-}
-
-
-void BaseLoadStubCompiler::NonexistentHandlerFrontend(
- Handle<JSObject> object,
- Handle<JSObject> last,
- Handle<String> name,
- Label* success,
- Handle<GlobalObject> global) {
- Label miss;
-
- Register reg = HandlerFrontendHeader(object, receiver(), last, name, &miss);
-
- // If the last object in the prototype chain is a global object,
- // check that the global property cell is empty.
- if (!global.is_null()) {
- GenerateCheckPropertyCell(masm(), global, name, scratch2(), &miss);
- }
-
- if (!last->HasFastProperties()) {
- __ ldr(scratch2(), FieldMemOperand(reg, HeapObject::kMapOffset));
- __ ldr(scratch2(), FieldMemOperand(scratch2(), Map::kPrototypeOffset));
- __ cmp(scratch2(), Operand(isolate()->factory()->null_value()));
- __ b(ne, &miss);
- }
-
- HandlerFrontendFooter(success, &miss);
-}
-
-
-void BaseLoadStubCompiler::GenerateLoadField(Register reg,
- Handle<JSObject> holder,
- PropertyIndex index) {
- GenerateFastPropertyLoad(masm(), r0, reg, holder, index);
- __ Ret();
-}
-
-
-void BaseLoadStubCompiler::GenerateLoadConstant(Handle<JSFunction> value) {
- // Return the constant value.
- __ LoadHeapObject(r0, value);
- __ Ret();
-}
-
-
-void BaseLoadStubCompiler::GenerateLoadCallback(
- Register reg,
- Handle<ExecutableAccessorInfo> callback) {
- // Build AccessorInfo::args_ list on the stack and push property name below
- // the exit frame to make GC aware of them and store pointers to them.
- __ push(receiver());
- __ mov(scratch2(), sp); // scratch2 = AccessorInfo::args_
- if (heap()->InNewSpace(callback->data())) {
- __ Move(scratch3(), callback);
- __ ldr(scratch3(), FieldMemOperand(scratch3(),
- ExecutableAccessorInfo::kDataOffset));
- } else {
- __ Move(scratch3(), Handle<Object>(callback->data(),
- callback->GetIsolate()));
- }
- __ Push(reg, scratch3());
- __ mov(scratch3(), Operand(ExternalReference::isolate_address()));
- __ Push(scratch3(), name());
- __ mov(r0, sp); // r0 = Handle<String>
-
- const int kApiStackSpace = 1;
- FrameScope frame_scope(masm(), StackFrame::MANUAL);
- __ EnterExitFrame(false, kApiStackSpace);
-
- // Create AccessorInfo instance on the stack above the exit frame with
- // scratch2 (internal::Object** args_) as the data.
- __ str(scratch2(), MemOperand(sp, 1 * kPointerSize));
- __ add(r1, sp, Operand(1 * kPointerSize)); // r1 = AccessorInfo&
-
- const int kStackUnwindSpace = 5;
- Address getter_address = v8::ToCData<Address>(callback->getter());
- ApiFunction fun(getter_address);
- ExternalReference ref =
- ExternalReference(&fun,
- ExternalReference::DIRECT_GETTER_CALL,
- masm()->isolate());
- __ CallApiFunctionAndReturn(ref, kStackUnwindSpace);
-}
-
-
-void BaseLoadStubCompiler::GenerateLoadInterceptor(
- Register holder_reg,
- Handle<JSObject> object,
- Handle<JSObject> interceptor_holder,
- LookupResult* lookup,
- Handle<String> name) {
- ASSERT(interceptor_holder->HasNamedInterceptor());
- ASSERT(!interceptor_holder->GetNamedInterceptor()->getter()->IsUndefined());
-
- // So far the most popular follow ups for interceptor loads are FIELD
- // and CALLBACKS, so inline only them, other cases may be added
- // later.
- bool compile_followup_inline = false;
- if (lookup->IsFound() && lookup->IsCacheable()) {
- if (lookup->IsField()) {
- compile_followup_inline = true;
- } else if (lookup->type() == CALLBACKS &&
- lookup->GetCallbackObject()->IsExecutableAccessorInfo()) {
- ExecutableAccessorInfo* callback =
- ExecutableAccessorInfo::cast(lookup->GetCallbackObject());
- compile_followup_inline = callback->getter() != NULL &&
- callback->IsCompatibleReceiver(*object);
- }
- }
-
- if (compile_followup_inline) {
- // Compile the interceptor call, followed by inline code to load the
- // property from further up the prototype chain if the call fails.
- // Check that the maps haven't changed.
- ASSERT(holder_reg.is(receiver()) || holder_reg.is(scratch1()));
-
- // Preserve the receiver register explicitly whenever it is different from
- // the holder and it is needed should the interceptor return without any
- // result. The CALLBACKS case needs the receiver to be passed into C++ code,
- // the FIELD case might cause a miss during the prototype check.
- bool must_perfrom_prototype_check = *interceptor_holder != lookup->holder();
- bool must_preserve_receiver_reg = !receiver().is(holder_reg) &&
- (lookup->type() == CALLBACKS || must_perfrom_prototype_check);
-
- // Save necessary data before invoking an interceptor.
- // Requires a frame to make GC aware of pushed pointers.
- {
- FrameScope frame_scope(masm(), StackFrame::INTERNAL);
- if (must_preserve_receiver_reg) {
- __ Push(receiver(), holder_reg, this->name());
- } else {
- __ Push(holder_reg, this->name());
- }
- // Invoke an interceptor. Note: map checks from receiver to
- // interceptor's holder has been compiled before (see a caller
- // of this method.)
- CompileCallLoadPropertyWithInterceptor(masm(),
- receiver(),
- holder_reg,
- this->name(),
- interceptor_holder);
- // Check if interceptor provided a value for property. If it's
- // the case, return immediately.
- Label interceptor_failed;
- __ LoadRoot(scratch1(), Heap::kNoInterceptorResultSentinelRootIndex);
- __ cmp(r0, scratch1());
- __ b(eq, &interceptor_failed);
- frame_scope.GenerateLeaveFrame();
- __ Ret();
-
- __ bind(&interceptor_failed);
- __ pop(this->name());
- __ pop(holder_reg);
- if (must_preserve_receiver_reg) {
- __ pop(receiver());
- }
- // Leave the internal frame.
- }
-
- GenerateLoadPostInterceptor(holder_reg, interceptor_holder, name, lookup);
- } else { // !compile_followup_inline
- // Call the runtime system to load the interceptor.
- // Check that the maps haven't changed.
- PushInterceptorArguments(masm(), receiver(), holder_reg,
- this->name(), interceptor_holder);
-
- ExternalReference ref =
- ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForLoad),
- masm()->isolate());
- __ TailCallExternalReference(ref, 6, 1);
- }
-}
-
-
-void CallStubCompiler::GenerateNameCheck(Handle<String> name, Label* miss) {
- if (kind_ == Code::KEYED_CALL_IC) {
- __ cmp(r2, Operand(name));
- __ b(ne, miss);
- }
-}
-
-
-void CallStubCompiler::GenerateGlobalReceiverCheck(Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<String> name,
- Label* miss) {
- ASSERT(holder->IsGlobalObject());
-
- // Get the number of arguments.
- const int argc = arguments().immediate();
-
- // Get the receiver from the stack.
- __ ldr(r0, MemOperand(sp, argc * kPointerSize));
-
- // Check that the maps haven't changed.
- __ JumpIfSmi(r0, miss);
- CheckPrototypes(object, r0, holder, r3, r1, r4, name, miss);
-}
-
-
-void CallStubCompiler::GenerateLoadFunctionFromCell(
- Handle<JSGlobalPropertyCell> cell,
- Handle<JSFunction> function,
- Label* miss) {
- // Get the value from the cell.
- __ mov(r3, Operand(cell));
- __ ldr(r1, FieldMemOperand(r3, JSGlobalPropertyCell::kValueOffset));
-
- // Check that the cell contains the same function.
- if (heap()->InNewSpace(*function)) {
- // We can't embed a pointer to a function in new space so we have
- // to verify that the shared function info is unchanged. This has
- // the nice side effect that multiple closures based on the same
- // function can all use this call IC. Before we load through the
- // function, we have to verify that it still is a function.
- __ JumpIfSmi(r1, miss);
- __ CompareObjectType(r1, r3, r3, JS_FUNCTION_TYPE);
- __ b(ne, miss);
-
- // Check the shared function info. Make sure it hasn't changed.
- __ Move(r3, Handle<SharedFunctionInfo>(function->shared()));
- __ ldr(r4, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
- __ cmp(r4, r3);
- } else {
- __ cmp(r1, Operand(function));
- }
- __ b(ne, miss);
-}
-
-
-void CallStubCompiler::GenerateMissBranch() {
- Handle<Code> code =
- isolate()->stub_cache()->ComputeCallMiss(arguments().immediate(),
- kind_,
- extra_state_);
- __ Jump(code, RelocInfo::CODE_TARGET);
-}
-
-
-Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object,
- Handle<JSObject> holder,
- PropertyIndex index,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
- Label miss;
-
- GenerateNameCheck(name, &miss);
-
- const int argc = arguments().immediate();
-
- // Get the receiver of the function from the stack into r0.
- __ ldr(r0, MemOperand(sp, argc * kPointerSize));
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(r0, &miss);
-
- // Do the right check and compute the holder register.
- Register reg = CheckPrototypes(object, r0, holder, r1, r3, r4, name, &miss);
- GenerateFastPropertyLoad(masm(), r1, reg, holder, index);
-
- GenerateCallFunction(masm(), object, arguments(), &miss, extra_state_);
-
- // Handle call cache miss.
- __ bind(&miss);
- GenerateMissBranch();
-
- // Return the generated code.
- return GetCode(Code::FIELD, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileArrayPushCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
- Handle<JSFunction> function,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- r2 : name
- // -- lr : return address
- // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
- // -- ...
- // -- sp[argc * 4] : receiver
- // -----------------------------------
-
- // If object is not an array, bail out to regular call.
- if (!object->IsJSArray() || !cell.is_null()) return Handle<Code>::null();
-
- Label miss;
- GenerateNameCheck(name, &miss);
-
- Register receiver = r1;
- // Get the receiver from the stack
- const int argc = arguments().immediate();
- __ ldr(receiver, MemOperand(sp, argc * kPointerSize));
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, &miss);
-
- // Check that the maps haven't changed.
- CheckPrototypes(Handle<JSObject>::cast(object), receiver, holder, r3, r0, r4,
- name, &miss);
-
- if (argc == 0) {
- // Nothing to do, just return the length.
- __ ldr(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
- __ Drop(argc + 1);
- __ Ret();
- } else {
- Label call_builtin;
-
- if (argc == 1) { // Otherwise fall through to call the builtin.
- Label attempt_to_grow_elements, with_write_barrier, check_double;
-
- Register elements = r6;
- Register end_elements = r5;
- // Get the elements array of the object.
- __ ldr(elements, FieldMemOperand(receiver, JSArray::kElementsOffset));
-
- // Check that the elements are in fast mode and writable.
- __ CheckMap(elements,
- r0,
- Heap::kFixedArrayMapRootIndex,
- &check_double,
- DONT_DO_SMI_CHECK);
-
- // Get the array's length into r0 and calculate new length.
- __ ldr(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
- STATIC_ASSERT(kSmiTagSize == 1);
- STATIC_ASSERT(kSmiTag == 0);
- __ add(r0, r0, Operand(Smi::FromInt(argc)));
-
- // Get the elements' length.
- __ ldr(r4, FieldMemOperand(elements, FixedArray::kLengthOffset));
-
- // Check if we could survive without allocation.
- __ cmp(r0, r4);
- __ b(gt, &attempt_to_grow_elements);
-
- // Check if value is a smi.
- __ ldr(r4, MemOperand(sp, (argc - 1) * kPointerSize));
- __ JumpIfNotSmi(r4, &with_write_barrier);
-
- // Save new length.
- __ str(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
-
- // Store the value.
- // We may need a register containing the address end_elements below,
- // so write back the value in end_elements.
- __ add(end_elements, elements,
- Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize));
- const int kEndElementsOffset =
- FixedArray::kHeaderSize - kHeapObjectTag - argc * kPointerSize;
- __ str(r4, MemOperand(end_elements, kEndElementsOffset, PreIndex));
-
- // Check for a smi.
- __ Drop(argc + 1);
- __ Ret();
-
- __ bind(&check_double);
-
- // Check that the elements are in fast mode and writable.
- __ CheckMap(elements,
- r0,
- Heap::kFixedDoubleArrayMapRootIndex,
- &call_builtin,
- DONT_DO_SMI_CHECK);
-
- // Get the array's length into r0 and calculate new length.
- __ ldr(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
- STATIC_ASSERT(kSmiTagSize == 1);
- STATIC_ASSERT(kSmiTag == 0);
- __ add(r0, r0, Operand(Smi::FromInt(argc)));
-
- // Get the elements' length.
- __ ldr(r4, FieldMemOperand(elements, FixedArray::kLengthOffset));
-
- // Check if we could survive without allocation.
- __ cmp(r0, r4);
- __ b(gt, &call_builtin);
-
- __ ldr(r4, MemOperand(sp, (argc - 1) * kPointerSize));
- __ StoreNumberToDoubleElements(
- r4, r0, elements, r3, r5, r2, r9,
- &call_builtin, argc * kDoubleSize);
-
- // Save new length.
- __ str(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
-
- // Check for a smi.
- __ Drop(argc + 1);
- __ Ret();
-
- __ bind(&with_write_barrier);
-
- __ ldr(r3, FieldMemOperand(receiver, HeapObject::kMapOffset));
-
- if (FLAG_smi_only_arrays && !FLAG_trace_elements_transitions) {
- Label fast_object, not_fast_object;
- __ CheckFastObjectElements(r3, r7, &not_fast_object);
- __ jmp(&fast_object);
- // In case of fast smi-only, convert to fast object, otherwise bail out.
- __ bind(&not_fast_object);
- __ CheckFastSmiElements(r3, r7, &call_builtin);
-
- __ ldr(r7, FieldMemOperand(r4, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
- __ cmp(r7, ip);
- __ b(eq, &call_builtin);
- // edx: receiver
- // r3: map
- Label try_holey_map;
- __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- FAST_ELEMENTS,
- r3,
- r7,
- &try_holey_map);
- __ mov(r2, receiver);
- ElementsTransitionGenerator::
- GenerateMapChangeElementsTransition(masm(),
- DONT_TRACK_ALLOCATION_SITE,
- NULL);
- __ jmp(&fast_object);
-
- __ bind(&try_holey_map);
- __ LoadTransitionedArrayMapConditional(FAST_HOLEY_SMI_ELEMENTS,
- FAST_HOLEY_ELEMENTS,
- r3,
- r7,
- &call_builtin);
- __ mov(r2, receiver);
- ElementsTransitionGenerator::
- GenerateMapChangeElementsTransition(masm(),
- DONT_TRACK_ALLOCATION_SITE,
- NULL);
- __ bind(&fast_object);
- } else {
- __ CheckFastObjectElements(r3, r3, &call_builtin);
- }
-
- // Save new length.
- __ str(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
-
- // Store the value.
- // We may need a register containing the address end_elements below,
- // so write back the value in end_elements.
- __ add(end_elements, elements,
- Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ str(r4, MemOperand(end_elements, kEndElementsOffset, PreIndex));
-
- __ RecordWrite(elements,
- end_elements,
- r4,
- kLRHasNotBeenSaved,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- __ Drop(argc + 1);
- __ Ret();
-
- __ bind(&attempt_to_grow_elements);
- // r0: array's length + 1.
- // r4: elements' length.
-
- if (!FLAG_inline_new) {
- __ b(&call_builtin);
- }
-
- __ ldr(r2, MemOperand(sp, (argc - 1) * kPointerSize));
- // Growing elements that are SMI-only requires special handling in case
- // the new element is non-Smi. For now, delegate to the builtin.
- Label no_fast_elements_check;
- __ JumpIfSmi(r2, &no_fast_elements_check);
- __ ldr(r7, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ CheckFastObjectElements(r7, r7, &call_builtin);
- __ bind(&no_fast_elements_check);
-
- Isolate* isolate = masm()->isolate();
- ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address(isolate);
- ExternalReference new_space_allocation_limit =
- ExternalReference::new_space_allocation_limit_address(isolate);
-
- const int kAllocationDelta = 4;
- // Load top and check if it is the end of elements.
- __ add(end_elements, elements,
- Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ add(end_elements, end_elements, Operand(kEndElementsOffset));
- __ mov(r7, Operand(new_space_allocation_top));
- __ ldr(r3, MemOperand(r7));
- __ cmp(end_elements, r3);
- __ b(ne, &call_builtin);
-
- __ mov(r9, Operand(new_space_allocation_limit));
- __ ldr(r9, MemOperand(r9));
- __ add(r3, r3, Operand(kAllocationDelta * kPointerSize));
- __ cmp(r3, r9);
- __ b(hi, &call_builtin);
-
- // We fit and could grow elements.
- // Update new_space_allocation_top.
- __ str(r3, MemOperand(r7));
- // Push the argument.
- __ str(r2, MemOperand(end_elements));
- // Fill the rest with holes.
- __ LoadRoot(r3, Heap::kTheHoleValueRootIndex);
- for (int i = 1; i < kAllocationDelta; i++) {
- __ str(r3, MemOperand(end_elements, i * kPointerSize));
- }
-
- // Update elements' and array's sizes.
- __ str(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
- __ add(r4, r4, Operand(Smi::FromInt(kAllocationDelta)));
- __ str(r4, FieldMemOperand(elements, FixedArray::kLengthOffset));
-
- // Elements are in new space, so write barrier is not required.
- __ Drop(argc + 1);
- __ Ret();
- }
- __ bind(&call_builtin);
- __ TailCallExternalReference(ExternalReference(Builtins::c_ArrayPush,
- masm()->isolate()),
- argc + 1,
- 1);
- }
-
- // Handle call cache miss.
- __ bind(&miss);
- GenerateMissBranch();
-
- // Return the generated code.
- return GetCode(function);
-}
-
-
-Handle<Code> CallStubCompiler::CompileArrayPopCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
- Handle<JSFunction> function,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- r2 : name
- // -- lr : return address
- // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
- // -- ...
- // -- sp[argc * 4] : receiver
- // -----------------------------------
-
- // If object is not an array, bail out to regular call.
- if (!object->IsJSArray() || !cell.is_null()) return Handle<Code>::null();
-
- Label miss, return_undefined, call_builtin;
- Register receiver = r1;
- Register elements = r3;
- GenerateNameCheck(name, &miss);
-
- // Get the receiver from the stack
- const int argc = arguments().immediate();
- __ ldr(receiver, MemOperand(sp, argc * kPointerSize));
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, &miss);
-
- // Check that the maps haven't changed.
- CheckPrototypes(Handle<JSObject>::cast(object), receiver, holder, elements,
- r4, r0, name, &miss);
-
- // Get the elements array of the object.
- __ ldr(elements, FieldMemOperand(receiver, JSArray::kElementsOffset));
-
- // Check that the elements are in fast mode and writable.
- __ CheckMap(elements,
- r0,
- Heap::kFixedArrayMapRootIndex,
- &call_builtin,
- DONT_DO_SMI_CHECK);
-
- // Get the array's length into r4 and calculate new length.
- __ ldr(r4, FieldMemOperand(receiver, JSArray::kLengthOffset));
- __ sub(r4, r4, Operand(Smi::FromInt(1)), SetCC);
- __ b(lt, &return_undefined);
-
- // Get the last element.
- __ LoadRoot(r6, Heap::kTheHoleValueRootIndex);
- STATIC_ASSERT(kSmiTagSize == 1);
- STATIC_ASSERT(kSmiTag == 0);
- // We can't address the last element in one operation. Compute the more
- // expensive shift first, and use an offset later on.
- __ add(elements, elements, Operand(r4, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ ldr(r0, FieldMemOperand(elements, FixedArray::kHeaderSize));
- __ cmp(r0, r6);
- __ b(eq, &call_builtin);
-
- // Set the array's length.
- __ str(r4, FieldMemOperand(receiver, JSArray::kLengthOffset));
-
- // Fill with the hole.
- __ str(r6, FieldMemOperand(elements, FixedArray::kHeaderSize));
- __ Drop(argc + 1);
- __ Ret();
-
- __ bind(&return_undefined);
- __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
- __ Drop(argc + 1);
- __ Ret();
-
- __ bind(&call_builtin);
- __ TailCallExternalReference(ExternalReference(Builtins::c_ArrayPop,
- masm()->isolate()),
- argc + 1,
- 1);
-
- // Handle call cache miss.
- __ bind(&miss);
- GenerateMissBranch();
-
- // Return the generated code.
- return GetCode(function);
-}
-
-
-Handle<Code> CallStubCompiler::CompileStringCharCodeAtCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
- Handle<JSFunction> function,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- r2 : function name
- // -- lr : return address
- // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
- // -- ...
- // -- sp[argc * 4] : receiver
- // -----------------------------------
-
- // If object is not a string, bail out to regular call.
- if (!object->IsString() || !cell.is_null()) return Handle<Code>::null();
-
- const int argc = arguments().immediate();
- Label miss;
- Label name_miss;
- Label index_out_of_range;
- Label* index_out_of_range_label = &index_out_of_range;
-
- if (kind_ == Code::CALL_IC &&
- (CallICBase::StringStubState::decode(extra_state_) ==
- DEFAULT_STRING_STUB)) {
- index_out_of_range_label = &miss;
- }
- GenerateNameCheck(name, &name_miss);
-
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(masm(),
- Context::STRING_FUNCTION_INDEX,
- r0,
- &miss);
- ASSERT(!object.is_identical_to(holder));
- CheckPrototypes(
- Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
- r0, holder, r1, r3, r4, name, &miss);
-
- Register receiver = r1;
- Register index = r4;
- Register result = r0;
- __ ldr(receiver, MemOperand(sp, argc * kPointerSize));
- if (argc > 0) {
- __ ldr(index, MemOperand(sp, (argc - 1) * kPointerSize));
- } else {
- __ LoadRoot(index, Heap::kUndefinedValueRootIndex);
- }
-
- StringCharCodeAtGenerator generator(receiver,
- index,
- result,
- &miss, // When not a string.
- &miss, // When not a number.
- index_out_of_range_label,
- STRING_INDEX_IS_NUMBER);
- generator.GenerateFast(masm());
- __ Drop(argc + 1);
- __ Ret();
-
- StubRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm(), call_helper);
-
- if (index_out_of_range.is_linked()) {
- __ bind(&index_out_of_range);
- __ LoadRoot(r0, Heap::kNanValueRootIndex);
- __ Drop(argc + 1);
- __ Ret();
- }
-
- __ bind(&miss);
- // Restore function name in r2.
- __ Move(r2, name);
- __ bind(&name_miss);
- GenerateMissBranch();
-
- // Return the generated code.
- return GetCode(function);
-}
-
-
-Handle<Code> CallStubCompiler::CompileStringCharAtCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
- Handle<JSFunction> function,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- r2 : function name
- // -- lr : return address
- // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
- // -- ...
- // -- sp[argc * 4] : receiver
- // -----------------------------------
-
- // If object is not a string, bail out to regular call.
- if (!object->IsString() || !cell.is_null()) return Handle<Code>::null();
-
- const int argc = arguments().immediate();
- Label miss;
- Label name_miss;
- Label index_out_of_range;
- Label* index_out_of_range_label = &index_out_of_range;
- if (kind_ == Code::CALL_IC &&
- (CallICBase::StringStubState::decode(extra_state_) ==
- DEFAULT_STRING_STUB)) {
- index_out_of_range_label = &miss;
- }
- GenerateNameCheck(name, &name_miss);
-
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(masm(),
- Context::STRING_FUNCTION_INDEX,
- r0,
- &miss);
- ASSERT(!object.is_identical_to(holder));
- CheckPrototypes(
- Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
- r0, holder, r1, r3, r4, name, &miss);
-
- Register receiver = r0;
- Register index = r4;
- Register scratch = r3;
- Register result = r0;
- __ ldr(receiver, MemOperand(sp, argc * kPointerSize));
- if (argc > 0) {
- __ ldr(index, MemOperand(sp, (argc - 1) * kPointerSize));
- } else {
- __ LoadRoot(index, Heap::kUndefinedValueRootIndex);
- }
-
- StringCharAtGenerator generator(receiver,
- index,
- scratch,
- result,
- &miss, // When not a string.
- &miss, // When not a number.
- index_out_of_range_label,
- STRING_INDEX_IS_NUMBER);
- generator.GenerateFast(masm());
- __ Drop(argc + 1);
- __ Ret();
-
- StubRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm(), call_helper);
-
- if (index_out_of_range.is_linked()) {
- __ bind(&index_out_of_range);
- __ LoadRoot(r0, Heap::kempty_stringRootIndex);
- __ Drop(argc + 1);
- __ Ret();
- }
-
- __ bind(&miss);
- // Restore function name in r2.
- __ Move(r2, name);
- __ bind(&name_miss);
- GenerateMissBranch();
-
- // Return the generated code.
- return GetCode(function);
-}
-
-
-Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
- Handle<JSFunction> function,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- r2 : function name
- // -- lr : return address
- // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
- // -- ...
- // -- sp[argc * 4] : receiver
- // -----------------------------------
-
- const int argc = arguments().immediate();
-
- // If the object is not a JSObject or we got an unexpected number of
- // arguments, bail out to the regular call.
- if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
-
- Label miss;
- GenerateNameCheck(name, &miss);
-
- if (cell.is_null()) {
- __ ldr(r1, MemOperand(sp, 1 * kPointerSize));
-
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfSmi(r1, &miss);
-
- CheckPrototypes(Handle<JSObject>::cast(object), r1, holder, r0, r3, r4,
- name, &miss);
- } else {
- ASSERT(cell->value() == *function);
- GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
- &miss);
- GenerateLoadFunctionFromCell(cell, function, &miss);
- }
-
- // Load the char code argument.
- Register code = r1;
- __ ldr(code, MemOperand(sp, 0 * kPointerSize));
-
- // Check the code is a smi.
- Label slow;
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfNotSmi(code, &slow);
-
- // Convert the smi code to uint16.
- __ and_(code, code, Operand(Smi::FromInt(0xffff)));
-
- StringCharFromCodeGenerator generator(code, r0);
- generator.GenerateFast(masm());
- __ Drop(argc + 1);
- __ Ret();
-
- StubRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm(), call_helper);
-
- // Tail call the full function. We do not have to patch the receiver
- // because the function makes no use of it.
- __ bind(&slow);
- __ InvokeFunction(
- function, arguments(), JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
-
- __ bind(&miss);
- // r2: function name.
- GenerateMissBranch();
-
- // Return the generated code.
- return cell.is_null() ? GetCode(function) : GetCode(Code::NORMAL, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileMathFloorCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
- Handle<JSFunction> function,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- r2 : function name
- // -- lr : return address
- // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
- // -- ...
- // -- sp[argc * 4] : receiver
- // -----------------------------------
-
- if (!CpuFeatures::IsSupported(VFP2)) {
- return Handle<Code>::null();
- }
-
- CpuFeatures::Scope scope_vfp2(VFP2);
- const int argc = arguments().immediate();
- // If the object is not a JSObject or we got an unexpected number of
- // arguments, bail out to the regular call.
- if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
-
- Label miss, slow;
- GenerateNameCheck(name, &miss);
-
- if (cell.is_null()) {
- __ ldr(r1, MemOperand(sp, 1 * kPointerSize));
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfSmi(r1, &miss);
- CheckPrototypes(Handle<JSObject>::cast(object), r1, holder, r0, r3, r4,
- name, &miss);
- } else {
- ASSERT(cell->value() == *function);
- GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
- &miss);
- GenerateLoadFunctionFromCell(cell, function, &miss);
- }
-
- // Load the (only) argument into r0.
- __ ldr(r0, MemOperand(sp, 0 * kPointerSize));
-
- // If the argument is a smi, just return.
- STATIC_ASSERT(kSmiTag == 0);
- __ tst(r0, Operand(kSmiTagMask));
- __ Drop(argc + 1, eq);
- __ Ret(eq);
-
- __ CheckMap(r0, r1, Heap::kHeapNumberMapRootIndex, &slow, DONT_DO_SMI_CHECK);
-
- Label wont_fit_smi, no_vfp_exception, restore_fpscr_and_return;
-
- // If vfp3 is enabled, we use the fpu rounding with the RM (round towards
- // minus infinity) mode.
-
- // Load the HeapNumber value.
- // We will need access to the value in the core registers, so we load it
- // with ldrd and move it to the fpu. It also spares a sub instruction for
- // updating the HeapNumber value address, as vldr expects a multiple
- // of 4 offset.
- __ Ldrd(r4, r5, FieldMemOperand(r0, HeapNumber::kValueOffset));
- __ vmov(d1, r4, r5);
-
- // Backup FPSCR.
- __ vmrs(r3);
- // Set custom FPCSR:
- // - Set rounding mode to "Round towards Minus Infinity"
- // (i.e. bits [23:22] = 0b10).
- // - Clear vfp cumulative exception flags (bits [3:0]).
- // - Make sure Flush-to-zero mode control bit is unset (bit 22).
- __ bic(r9, r3,
- Operand(kVFPExceptionMask | kVFPRoundingModeMask | kVFPFlushToZeroMask));
- __ orr(r9, r9, Operand(kRoundToMinusInf));
- __ vmsr(r9);
-
- // Convert the argument to an integer.
- __ vcvt_s32_f64(s0, d1, kFPSCRRounding);
-
- // Use vcvt latency to start checking for special cases.
- // Get the argument exponent and clear the sign bit.
- __ bic(r6, r5, Operand(HeapNumber::kSignMask));
- __ mov(r6, Operand(r6, LSR, HeapNumber::kMantissaBitsInTopWord));
-
- // Retrieve FPSCR and check for vfp exceptions.
- __ vmrs(r9);
- __ tst(r9, Operand(kVFPExceptionMask));
- __ b(&no_vfp_exception, eq);
-
- // Check for NaN, Infinity, and -Infinity.
- // They are invariant through a Math.Floor call, so just
- // return the original argument.
- __ sub(r7, r6, Operand(HeapNumber::kExponentMask
- >> HeapNumber::kMantissaBitsInTopWord), SetCC);
- __ b(&restore_fpscr_and_return, eq);
- // We had an overflow or underflow in the conversion. Check if we
- // have a big exponent.
- __ cmp(r7, Operand(HeapNumber::kMantissaBits));
- // If greater or equal, the argument is already round and in r0.
- __ b(&restore_fpscr_and_return, ge);
- __ b(&wont_fit_smi);
-
- __ bind(&no_vfp_exception);
- // Move the result back to general purpose register r0.
- __ vmov(r0, s0);
- // Check if the result fits into a smi.
- __ add(r1, r0, Operand(0x40000000), SetCC);
- __ b(&wont_fit_smi, mi);
- // Tag the result.
- STATIC_ASSERT(kSmiTag == 0);
- __ mov(r0, Operand(r0, LSL, kSmiTagSize));
-
- // Check for -0.
- __ cmp(r0, Operand::Zero());
- __ b(&restore_fpscr_and_return, ne);
- // r5 already holds the HeapNumber exponent.
- __ tst(r5, Operand(HeapNumber::kSignMask));
- // If our HeapNumber is negative it was -0, so load its address and return.
- // Else r0 is loaded with 0, so we can also just return.
- __ ldr(r0, MemOperand(sp, 0 * kPointerSize), ne);
-
- __ bind(&restore_fpscr_and_return);
- // Restore FPSCR and return.
- __ vmsr(r3);
- __ Drop(argc + 1);
- __ Ret();
-
- __ bind(&wont_fit_smi);
- // Restore FPCSR and fall to slow case.
- __ vmsr(r3);
-
- __ bind(&slow);
- // Tail call the full function. We do not have to patch the receiver
- // because the function makes no use of it.
- __ InvokeFunction(
- function, arguments(), JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
-
- __ bind(&miss);
- // r2: function name.
- GenerateMissBranch();
-
- // Return the generated code.
- return cell.is_null() ? GetCode(function) : GetCode(Code::NORMAL, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileMathAbsCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
- Handle<JSFunction> function,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- r2 : function name
- // -- lr : return address
- // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
- // -- ...
- // -- sp[argc * 4] : receiver
- // -----------------------------------
-
- const int argc = arguments().immediate();
- // If the object is not a JSObject or we got an unexpected number of
- // arguments, bail out to the regular call.
- if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
-
- Label miss;
- GenerateNameCheck(name, &miss);
- if (cell.is_null()) {
- __ ldr(r1, MemOperand(sp, 1 * kPointerSize));
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfSmi(r1, &miss);
- CheckPrototypes(Handle<JSObject>::cast(object), r1, holder, r0, r3, r4,
- name, &miss);
- } else {
- ASSERT(cell->value() == *function);
- GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
- &miss);
- GenerateLoadFunctionFromCell(cell, function, &miss);
- }
-
- // Load the (only) argument into r0.
- __ ldr(r0, MemOperand(sp, 0 * kPointerSize));
-
- // Check if the argument is a smi.
- Label not_smi;
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfNotSmi(r0, &not_smi);
-
- // Do bitwise not or do nothing depending on the sign of the
- // argument.
- __ eor(r1, r0, Operand(r0, ASR, kBitsPerInt - 1));
-
- // Add 1 or do nothing depending on the sign of the argument.
- __ sub(r0, r1, Operand(r0, ASR, kBitsPerInt - 1), SetCC);
-
- // If the result is still negative, go to the slow case.
- // This only happens for the most negative smi.
- Label slow;
- __ b(mi, &slow);
-
- // Smi case done.
- __ Drop(argc + 1);
- __ Ret();
-
- // Check if the argument is a heap number and load its exponent and
- // sign.
- __ bind(&not_smi);
- __ CheckMap(r0, r1, Heap::kHeapNumberMapRootIndex, &slow, DONT_DO_SMI_CHECK);
- __ ldr(r1, FieldMemOperand(r0, HeapNumber::kExponentOffset));
-
- // Check the sign of the argument. If the argument is positive,
- // just return it.
- Label negative_sign;
- __ tst(r1, Operand(HeapNumber::kSignMask));
- __ b(ne, &negative_sign);
- __ Drop(argc + 1);
- __ Ret();
-
- // If the argument is negative, clear the sign, and return a new
- // number.
- __ bind(&negative_sign);
- __ eor(r1, r1, Operand(HeapNumber::kSignMask));
- __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
- __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(r0, r4, r5, r6, &slow);
- __ str(r1, FieldMemOperand(r0, HeapNumber::kExponentOffset));
- __ str(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
- __ Drop(argc + 1);
- __ Ret();
-
- // Tail call the full function. We do not have to patch the receiver
- // because the function makes no use of it.
- __ bind(&slow);
- __ InvokeFunction(
- function, arguments(), JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
-
- __ bind(&miss);
- // r2: function name.
- GenerateMissBranch();
-
- // Return the generated code.
- return cell.is_null() ? GetCode(function) : GetCode(Code::NORMAL, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileFastApiCall(
- const CallOptimization& optimization,
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
- Handle<JSFunction> function,
- Handle<String> name) {
- Counters* counters = isolate()->counters();
-
- ASSERT(optimization.is_simple_api_call());
- // Bail out if object is a global object as we don't want to
- // repatch it to global receiver.
- if (object->IsGlobalObject()) return Handle<Code>::null();
- if (!cell.is_null()) return Handle<Code>::null();
- if (!object->IsJSObject()) return Handle<Code>::null();
- int depth = optimization.GetPrototypeDepthOfExpectedType(
- Handle<JSObject>::cast(object), holder);
- if (depth == kInvalidProtoDepth) return Handle<Code>::null();
-
- Label miss, miss_before_stack_reserved;
- GenerateNameCheck(name, &miss_before_stack_reserved);
-
- // Get the receiver from the stack.
- const int argc = arguments().immediate();
- __ ldr(r1, MemOperand(sp, argc * kPointerSize));
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(r1, &miss_before_stack_reserved);
-
- __ IncrementCounter(counters->call_const(), 1, r0, r3);
- __ IncrementCounter(counters->call_const_fast_api(), 1, r0, r3);
-
- ReserveSpaceForFastApiCall(masm(), r0);
-
- // Check that the maps haven't changed and find a Holder as a side effect.
- CheckPrototypes(Handle<JSObject>::cast(object), r1, holder, r0, r3, r4, name,
- depth, &miss);
-
- GenerateFastApiDirectCall(masm(), optimization, argc);
-
- __ bind(&miss);
- FreeSpaceForFastApiCall(masm());
-
- __ bind(&miss_before_stack_reserved);
- GenerateMissBranch();
-
- // Return the generated code.
- return GetCode(function);
-}
-
-
-void CallStubCompiler::CompileHandlerFrontend(Handle<Object> object,
- Handle<JSObject> holder,
- Handle<String> name,
- CheckType check,
- Label* success) {
- // ----------- S t a t e -------------
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
- Label miss;
- GenerateNameCheck(name, &miss);
-
- // Get the receiver from the stack
- const int argc = arguments().immediate();
- __ ldr(r1, MemOperand(sp, argc * kPointerSize));
-
- // Check that the receiver isn't a smi.
- if (check != NUMBER_CHECK) {
- __ JumpIfSmi(r1, &miss);
- }
-
- // Make sure that it's okay not to patch the on stack receiver
- // unless we're doing a receiver map check.
- ASSERT(!object->IsGlobalObject() || check == RECEIVER_MAP_CHECK);
- switch (check) {
- case RECEIVER_MAP_CHECK:
- __ IncrementCounter(masm()->isolate()->counters()->call_const(),
- 1, r0, r3);
-
- // Check that the maps haven't changed.
- CheckPrototypes(Handle<JSObject>::cast(object), r1, holder, r0, r3, r4,
- name, &miss);
-
- // Patch the receiver on the stack with the global proxy if
- // necessary.
- if (object->IsGlobalObject()) {
- __ ldr(r3, FieldMemOperand(r1, GlobalObject::kGlobalReceiverOffset));
- __ str(r3, MemOperand(sp, argc * kPointerSize));
- }
- break;
-
- case STRING_CHECK:
- // Check that the object is a string.
- __ CompareObjectType(r1, r3, r3, FIRST_NONSTRING_TYPE);
- __ b(ge, &miss);
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::STRING_FUNCTION_INDEX, r0, &miss);
- CheckPrototypes(
- Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
- r0, holder, r3, r1, r4, name, &miss);
- break;
-
- case SYMBOL_CHECK:
- // Check that the object is a symbol.
- __ CompareObjectType(r1, r1, r3, SYMBOL_TYPE);
- __ b(ne, &miss);
- break;
-
- case NUMBER_CHECK: {
- Label fast;
- // Check that the object is a smi or a heap number.
- __ JumpIfSmi(r1, &fast);
- __ CompareObjectType(r1, r0, r0, HEAP_NUMBER_TYPE);
- __ b(ne, &miss);
- __ bind(&fast);
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::NUMBER_FUNCTION_INDEX, r0, &miss);
- CheckPrototypes(
- Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
- r0, holder, r3, r1, r4, name, &miss);
- break;
- }
- case BOOLEAN_CHECK: {
- Label fast;
- // Check that the object is a boolean.
- __ LoadRoot(ip, Heap::kTrueValueRootIndex);
- __ cmp(r1, ip);
- __ b(eq, &fast);
- __ LoadRoot(ip, Heap::kFalseValueRootIndex);
- __ cmp(r1, ip);
- __ b(ne, &miss);
- __ bind(&fast);
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::BOOLEAN_FUNCTION_INDEX, r0, &miss);
- CheckPrototypes(
- Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
- r0, holder, r3, r1, r4, name, &miss);
- break;
- }
- }
-
- __ b(success);
-
- // Handle call cache miss.
- __ bind(&miss);
- GenerateMissBranch();
-}
-
-
-void CallStubCompiler::CompileHandlerBackend(Handle<JSFunction> function) {
- CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
- ? CALL_AS_FUNCTION
- : CALL_AS_METHOD;
- __ InvokeFunction(
- function, arguments(), JUMP_FUNCTION, NullCallWrapper(), call_kind);
-}
-
-
-Handle<Code> CallStubCompiler::CompileCallConstant(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<String> name,
- CheckType check,
- Handle<JSFunction> function) {
- if (HasCustomCallGenerator(function)) {
- Handle<Code> code = CompileCustomCall(object, holder,
- Handle<JSGlobalPropertyCell>::null(),
- function, name);
- // A null handle means bail out to the regular compiler code below.
- if (!code.is_null()) return code;
- }
-
- Label success;
-
- CompileHandlerFrontend(object, holder, name, check, &success);
- __ bind(&success);
- CompileHandlerBackend(function);
-
-
- // Return the generated code.
- return GetCode(function);
-}
-
-
-Handle<Code> CallStubCompiler::CompileCallInterceptor(Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
- Label miss;
- GenerateNameCheck(name, &miss);
-
- // Get the number of arguments.
- const int argc = arguments().immediate();
- LookupResult lookup(isolate());
- LookupPostInterceptor(holder, name, &lookup);
-
- // Get the receiver from the stack.
- __ ldr(r1, MemOperand(sp, argc * kPointerSize));
-
- CallInterceptorCompiler compiler(this, arguments(), r2, extra_state_);
- compiler.Compile(masm(), object, holder, name, &lookup, r1, r3, r4, r0,
- &miss);
-
- // Move returned value, the function to call, to r1.
- __ mov(r1, r0);
- // Restore receiver.
- __ ldr(r0, MemOperand(sp, argc * kPointerSize));
-
- GenerateCallFunction(masm(), object, arguments(), &miss, extra_state_);
-
- // Handle call cache miss.
- __ bind(&miss);
- GenerateMissBranch();
-
- // Return the generated code.
- return GetCode(Code::INTERCEPTOR, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileCallGlobal(
- Handle<JSObject> object,
- Handle<GlobalObject> holder,
- Handle<JSGlobalPropertyCell> cell,
- Handle<JSFunction> function,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
- if (HasCustomCallGenerator(function)) {
- Handle<Code> code = CompileCustomCall(object, holder, cell, function, name);
- // A null handle means bail out to the regular compiler code below.
- if (!code.is_null()) return code;
- }
-
- Label miss;
- GenerateNameCheck(name, &miss);
-
- // Get the number of arguments.
- const int argc = arguments().immediate();
- GenerateGlobalReceiverCheck(object, holder, name, &miss);
- GenerateLoadFunctionFromCell(cell, function, &miss);
-
- // Patch the receiver on the stack with the global proxy if
- // necessary.
- if (object->IsGlobalObject()) {
- __ ldr(r3, FieldMemOperand(r0, GlobalObject::kGlobalReceiverOffset));
- __ str(r3, MemOperand(sp, argc * kPointerSize));
- }
-
- // Set up the context (function already in r1).
- __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
-
- // Jump to the cached code (tail call).
- Counters* counters = masm()->isolate()->counters();
- __ IncrementCounter(counters->call_global_inline(), 1, r3, r4);
- ParameterCount expected(function->shared()->formal_parameter_count());
- CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
- ? CALL_AS_FUNCTION
- : CALL_AS_METHOD;
- // We call indirectly through the code field in the function to
- // allow recompilation to take effect without changing any of the
- // call sites.
- __ ldr(r3, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
- __ InvokeCode(r3, expected, arguments(), JUMP_FUNCTION,
- NullCallWrapper(), call_kind);
-
- // Handle call cache miss.
- __ bind(&miss);
- __ IncrementCounter(counters->call_global_inline_miss(), 1, r1, r3);
- GenerateMissBranch();
-
- // Return the generated code.
- return GetCode(Code::NORMAL, name);
-}
-
-
-Handle<Code> StoreStubCompiler::CompileStoreField(Handle<JSObject> object,
- int index,
- Handle<Map> transition,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- r0 : value
- // -- r1 : receiver
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
- Label miss;
-
- GenerateStoreField(masm(),
- object,
- index,
- transition,
- name,
- r1, r2, r3, r4,
- &miss);
- __ bind(&miss);
- Handle<Code> ic = masm()->isolate()->builtins()->StoreIC_Miss();
- __ Jump(ic, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode(transition.is_null()
- ? Code::FIELD
- : Code::MAP_TRANSITION, name);
-}
-
-
-Handle<Code> StoreStubCompiler::CompileStoreCallback(
- Handle<String> name,
- Handle<JSObject> receiver,
- Handle<JSObject> holder,
- Handle<ExecutableAccessorInfo> callback) {
- // ----------- S t a t e -------------
- // -- r0 : value
- // -- r1 : receiver
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
- Label miss;
- // Check that the maps haven't changed.
- __ JumpIfSmi(r1, &miss);
- CheckPrototypes(receiver, r1, holder, r3, r4, r5, name, &miss);
-
- // Stub never generated for non-global objects that require access checks.
- ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
-
- __ push(r1); // receiver
- __ mov(ip, Operand(callback)); // callback info
- __ Push(ip, r2, r0);
-
- // Do tail-call to the runtime system.
- ExternalReference store_callback_property =
- ExternalReference(IC_Utility(IC::kStoreCallbackProperty),
- masm()->isolate());
- __ TailCallExternalReference(store_callback_property, 4, 1);
-
- // Handle store cache miss.
- __ bind(&miss);
- Handle<Code> ic = masm()->isolate()->builtins()->StoreIC_Miss();
- __ Jump(ic, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode(Code::CALLBACKS, name);
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm)
-
-
-void StoreStubCompiler::GenerateStoreViaSetter(
- MacroAssembler* masm,
- Handle<JSFunction> setter) {
- // ----------- S t a t e -------------
- // -- r0 : value
- // -- r1 : receiver
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Save value register, so we can restore it later.
- __ push(r0);
-
- if (!setter.is_null()) {
- // Call the JavaScript setter with receiver and value on the stack.
- __ Push(r1, r0);
- ParameterCount actual(1);
- __ InvokeFunction(setter, actual, CALL_FUNCTION, NullCallWrapper(),
- CALL_AS_METHOD);
- } else {
- // If we generate a global code snippet for deoptimization only, remember
- // the place to continue after deoptimization.
- masm->isolate()->heap()->SetSetterStubDeoptPCOffset(masm->pc_offset());
- }
-
- // We have to return the passed value, not the return value of the setter.
- __ pop(r0);
-
- // Restore context register.
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- }
- __ Ret();
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm())
-
-
-Handle<Code> StoreStubCompiler::CompileStoreViaSetter(
- Handle<String> name,
- Handle<JSObject> receiver,
- Handle<JSObject> holder,
- Handle<JSFunction> setter) {
- // ----------- S t a t e -------------
- // -- r0 : value
- // -- r1 : receiver
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
- Label miss;
-
- // Check that the maps haven't changed.
- __ JumpIfSmi(r1, &miss);
- CheckPrototypes(receiver, r1, holder, r3, r4, r5, name, &miss);
-
- GenerateStoreViaSetter(masm(), setter);
-
- __ bind(&miss);
- Handle<Code> ic = masm()->isolate()->builtins()->StoreIC_Miss();
- __ Jump(ic, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode(Code::CALLBACKS, name);
-}
-
-
-Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
- Handle<JSObject> receiver,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- r0 : value
- // -- r1 : receiver
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
- Label miss;
-
- // Check that the map of the object hasn't changed.
- __ CheckMap(r1, r3, Handle<Map>(receiver->map()), &miss,
- DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
-
- // Perform global security token check if needed.
- if (receiver->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(r1, r3, &miss);
- }
-
- // Stub is never generated for non-global objects that require access
- // checks.
- ASSERT(receiver->IsJSGlobalProxy() || !receiver->IsAccessCheckNeeded());
-
- __ Push(r1, r2, r0); // Receiver, name, value.
-
- __ mov(r0, Operand(Smi::FromInt(strict_mode_)));
- __ push(r0); // strict mode
-
- // Do tail-call to the runtime system.
- ExternalReference store_ic_property =
- ExternalReference(IC_Utility(IC::kStoreInterceptorProperty),
- masm()->isolate());
- __ TailCallExternalReference(store_ic_property, 4, 1);
-
- // Handle store cache miss.
- __ bind(&miss);
- Handle<Code> ic = masm()->isolate()->builtins()->StoreIC_Miss();
- __ Jump(ic, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode(Code::INTERCEPTOR, name);
-}
-
-
-Handle<Code> StoreStubCompiler::CompileStoreGlobal(
- Handle<GlobalObject> object,
- Handle<JSGlobalPropertyCell> cell,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- r0 : value
- // -- r1 : receiver
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
- Label miss;
-
- // Check that the map of the global has not changed.
- __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
- __ cmp(r3, Operand(Handle<Map>(object->map())));
- __ b(ne, &miss);
-
- // Check that the value in the cell is not the hole. If it is, this
- // cell could have been deleted and reintroducing the global needs
- // to update the property details in the property dictionary of the
- // global object. We bail out to the runtime system to do that.
- __ mov(r4, Operand(cell));
- __ LoadRoot(r5, Heap::kTheHoleValueRootIndex);
- __ ldr(r6, FieldMemOperand(r4, JSGlobalPropertyCell::kValueOffset));
- __ cmp(r5, r6);
- __ b(eq, &miss);
-
- // Store the value in the cell.
- __ str(r0, FieldMemOperand(r4, JSGlobalPropertyCell::kValueOffset));
- // Cells are always rescanned, so no write barrier here.
-
- Counters* counters = masm()->isolate()->counters();
- __ IncrementCounter(counters->named_store_global_inline(), 1, r4, r3);
- __ Ret();
-
- // Handle store cache miss.
- __ bind(&miss);
- __ IncrementCounter(counters->named_store_global_inline_miss(), 1, r4, r3);
- Handle<Code> ic = masm()->isolate()->builtins()->StoreIC_Miss();
- __ Jump(ic, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode(Code::NORMAL, name);
-}
-
-
-Handle<Code> LoadStubCompiler::CompileLoadNonexistent(
- Handle<JSObject> object,
- Handle<JSObject> last,
- Handle<String> name,
- Handle<GlobalObject> global) {
- Label success;
-
- NonexistentHandlerFrontend(object, last, name, &success, global);
-
- __ bind(&success);
- // Return undefined if maps of the full prototype chain are still the
- // same and no global property with this name contains a value.
- __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
- __ Ret();
-
- // Return the generated code.
- return GetCode(Code::HANDLER_FRAGMENT, Code::NONEXISTENT, name);
-}
-
-
-Register* LoadStubCompiler::registers() {
- // receiver, name, scratch1, scratch2, scratch3, scratch4.
- static Register registers[] = { r0, r2, r3, r1, r4, r5 };
- return registers;
-}
-
-
-Register* KeyedLoadStubCompiler::registers() {
- // receiver, name, scratch1, scratch2, scratch3, scratch4.
- static Register registers[] = { r1, r0, r2, r3, r4, r5 };
- return registers;
-}
-
-
-void KeyedLoadStubCompiler::GenerateNameCheck(Handle<String> name,
- Register name_reg,
- Label* miss) {
- __ cmp(name_reg, Operand(name));
- __ b(ne, miss);
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm)
-
-
-void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
- Handle<JSFunction> getter) {
- // ----------- S t a t e -------------
- // -- r0 : receiver
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- if (!getter.is_null()) {
- // Call the JavaScript getter with the receiver on the stack.
- __ push(r0);
- ParameterCount actual(0);
- __ InvokeFunction(getter, actual, CALL_FUNCTION, NullCallWrapper(),
- CALL_AS_METHOD);
- } else {
- // If we generate a global code snippet for deoptimization only, remember
- // the place to continue after deoptimization.
- masm->isolate()->heap()->SetGetterStubDeoptPCOffset(masm->pc_offset());
- }
-
- // Restore context register.
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- }
- __ Ret();
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm())
-
-
-Handle<Code> LoadStubCompiler::CompileLoadGlobal(
- Handle<JSObject> object,
- Handle<GlobalObject> global,
- Handle<JSGlobalPropertyCell> cell,
- Handle<String> name,
- bool is_dont_delete) {
- Label success, miss;
-
- __ CheckMap(
- receiver(), scratch1(), Handle<Map>(object->map()), &miss, DO_SMI_CHECK);
- HandlerFrontendHeader(
- object, receiver(), Handle<JSObject>::cast(global), name, &miss);
-
- // Get the value from the cell.
- __ mov(r3, Operand(cell));
- __ ldr(r4, FieldMemOperand(r3, JSGlobalPropertyCell::kValueOffset));
-
- // Check for deleted property if property can actually be deleted.
- if (!is_dont_delete) {
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ cmp(r4, ip);
- __ b(eq, &miss);
- }
-
- HandlerFrontendFooter(&success, &miss);
- __ bind(&success);
-
- Counters* counters = masm()->isolate()->counters();
- __ IncrementCounter(counters->named_load_global_stub(), 1, r1, r3);
- __ mov(r0, r4);
- __ Ret();
-
- // Return the generated code.
- return GetCode(Code::IC_FRAGMENT, Code::NORMAL, name);
-}
-
-
-Handle<Code> KeyedLoadStubCompiler::CompileLoadElement(
- Handle<Map> receiver_map) {
- // ----------- S t a t e -------------
- // -- lr : return address
- // -- r0 : key
- // -- r1 : receiver
- // -----------------------------------
- ElementsKind elements_kind = receiver_map->elements_kind();
- if (receiver_map->has_fast_elements() ||
- receiver_map->has_external_array_elements()) {
- Handle<Code> stub = KeyedLoadFastElementStub(
- receiver_map->instance_type() == JS_ARRAY_TYPE,
- elements_kind).GetCode(isolate());
- __ DispatchMap(r1, r2, receiver_map, stub, DO_SMI_CHECK);
- } else {
- Handle<Code> stub =
- KeyedLoadDictionaryElementStub().GetCode(isolate());
- __ DispatchMap(r1, r2, receiver_map, stub, DO_SMI_CHECK);
- }
-
- Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Miss();
- __ Jump(ic, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode(Code::IC_FRAGMENT, Code::NORMAL, factory()->empty_string());
-}
-
-
-Handle<Code> BaseLoadStubCompiler::CompilePolymorphicIC(
- MapHandleList* receiver_maps,
- CodeHandleList* handlers,
- Handle<String> name,
- Code::StubType type,
- IcCheckType check) {
- Label miss;
-
- if (check == PROPERTY) {
- GenerateNameCheck(name, this->name(), &miss);
- }
-
- __ JumpIfSmi(receiver(), &miss);
- Register map_reg = scratch1();
-
- int receiver_count = receiver_maps->length();
- __ ldr(map_reg, FieldMemOperand(receiver(), HeapObject::kMapOffset));
- for (int current = 0; current < receiver_count; ++current) {
- __ mov(ip, Operand(receiver_maps->at(current)));
- __ cmp(map_reg, ip);
- __ Jump(handlers->at(current), RelocInfo::CODE_TARGET, eq);
- }
-
- __ bind(&miss);
- GenerateLoadMiss(masm(), kind());
-
- // Return the generated code.
- InlineCacheState state =
- receiver_maps->length() > 1 ? POLYMORPHIC : MONOMORPHIC;
- return GetCode(Code::IC_FRAGMENT, type, name, state);
-}
-
-
-Handle<Code> KeyedStoreStubCompiler::CompileStoreField(Handle<JSObject> object,
- int index,
- Handle<Map> transition,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- r0 : value
- // -- r1 : name
- // -- r2 : receiver
- // -- lr : return address
- // -----------------------------------
- Label miss;
-
- Counters* counters = masm()->isolate()->counters();
- __ IncrementCounter(counters->keyed_store_field(), 1, r3, r4);
-
- // Check that the name has not changed.
- __ cmp(r1, Operand(name));
- __ b(ne, &miss);
-
- // r3 is used as scratch register. r1 and r2 keep their values if a jump to
- // the miss label is generated.
- GenerateStoreField(masm(),
- object,
- index,
- transition,
- name,
- r2, r1, r3, r4,
- &miss);
- __ bind(&miss);
-
- __ DecrementCounter(counters->keyed_store_field(), 1, r3, r4);
- Handle<Code> ic = masm()->isolate()->builtins()->KeyedStoreIC_Miss();
- __ Jump(ic, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode(transition.is_null()
- ? Code::FIELD
- : Code::MAP_TRANSITION, name);
-}
-
-
-Handle<Code> KeyedStoreStubCompiler::CompileStoreElement(
- Handle<Map> receiver_map) {
- // ----------- S t a t e -------------
- // -- r0 : value
- // -- r1 : key
- // -- r2 : receiver
- // -- lr : return address
- // -- r3 : scratch
- // -----------------------------------
- ElementsKind elements_kind = receiver_map->elements_kind();
- bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
- Handle<Code> stub =
- KeyedStoreElementStub(is_js_array,
- elements_kind,
- grow_mode_).GetCode(isolate());
-
- __ DispatchMap(r2, r3, receiver_map, stub, DO_SMI_CHECK);
-
- Handle<Code> ic = isolate()->builtins()->KeyedStoreIC_Miss();
- __ Jump(ic, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode(Code::NORMAL, factory()->empty_string());
-}
-
-
-Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic(
- MapHandleList* receiver_maps,
- CodeHandleList* handler_stubs,
- MapHandleList* transitioned_maps) {
- // ----------- S t a t e -------------
- // -- r0 : value
- // -- r1 : key
- // -- r2 : receiver
- // -- lr : return address
- // -- r3 : scratch
- // -----------------------------------
- Label miss;
- __ JumpIfSmi(r2, &miss);
-
- int receiver_count = receiver_maps->length();
- __ ldr(r3, FieldMemOperand(r2, HeapObject::kMapOffset));
- for (int i = 0; i < receiver_count; ++i) {
- __ mov(ip, Operand(receiver_maps->at(i)));
- __ cmp(r3, ip);
- if (transitioned_maps->at(i).is_null()) {
- __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET, eq);
- } else {
- Label next_map;
- __ b(ne, &next_map);
- __ mov(r3, Operand(transitioned_maps->at(i)));
- __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET, al);
- __ bind(&next_map);
- }
- }
-
- __ bind(&miss);
- Handle<Code> miss_ic = isolate()->builtins()->KeyedStoreIC_Miss();
- __ Jump(miss_ic, RelocInfo::CODE_TARGET, al);
-
- // Return the generated code.
- return GetCode(Code::NORMAL, factory()->empty_string(), POLYMORPHIC);
-}
-
-
-Handle<Code> ConstructStubCompiler::CompileConstructStub(
- Handle<JSFunction> function) {
- // ----------- S t a t e -------------
- // -- r0 : argc
- // -- r1 : constructor
- // -- lr : return address
- // -- [sp] : last argument
- // -----------------------------------
- Label generic_stub_call;
-
- // Use r7 for holding undefined which is used in several places below.
- __ LoadRoot(r7, Heap::kUndefinedValueRootIndex);
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- // Check to see whether there are any break points in the function code. If
- // there are jump to the generic constructor stub which calls the actual
- // code for the function thereby hitting the break points.
- __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
- __ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kDebugInfoOffset));
- __ cmp(r2, r7);
- __ b(ne, &generic_stub_call);
-#endif
-
- // Load the initial map and verify that it is in fact a map.
- // r1: constructor function
- // r7: undefined
- __ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
- __ JumpIfSmi(r2, &generic_stub_call);
- __ CompareObjectType(r2, r3, r4, MAP_TYPE);
- __ b(ne, &generic_stub_call);
-
-#ifdef DEBUG
- // Cannot construct functions this way.
- // r0: argc
- // r1: constructor function
- // r2: initial map
- // r7: undefined
- __ CompareInstanceType(r2, r3, JS_FUNCTION_TYPE);
- __ Check(ne, "Function constructed by construct stub.");
-#endif
-
- // Now allocate the JSObject in new space.
- // r0: argc
- // r1: constructor function
- // r2: initial map
- // r7: undefined
- ASSERT(function->has_initial_map());
- __ ldrb(r3, FieldMemOperand(r2, Map::kInstanceSizeOffset));
-#ifdef DEBUG
- int instance_size = function->initial_map()->instance_size();
- __ cmp(r3, Operand(instance_size >> kPointerSizeLog2));
- __ Check(eq, "Instance size of initial map changed.");
-#endif
- __ AllocateInNewSpace(r3, r4, r5, r6, &generic_stub_call, SIZE_IN_WORDS);
-
- // Allocated the JSObject, now initialize the fields. Map is set to initial
- // map and properties and elements are set to empty fixed array.
- // r0: argc
- // r1: constructor function
- // r2: initial map
- // r3: object size (in words)
- // r4: JSObject (not tagged)
- // r7: undefined
- __ LoadRoot(r6, Heap::kEmptyFixedArrayRootIndex);
- __ mov(r5, r4);
- ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
- __ str(r2, MemOperand(r5, kPointerSize, PostIndex));
- ASSERT_EQ(1 * kPointerSize, JSObject::kPropertiesOffset);
- __ str(r6, MemOperand(r5, kPointerSize, PostIndex));
- ASSERT_EQ(2 * kPointerSize, JSObject::kElementsOffset);
- __ str(r6, MemOperand(r5, kPointerSize, PostIndex));
-
- // Calculate the location of the first argument. The stack contains only the
- // argc arguments.
- __ add(r1, sp, Operand(r0, LSL, kPointerSizeLog2));
-
- // Fill all the in-object properties with undefined.
- // r0: argc
- // r1: first argument
- // r3: object size (in words)
- // r4: JSObject (not tagged)
- // r5: First in-object property of JSObject (not tagged)
- // r7: undefined
- // Fill the initialized properties with a constant value or a passed argument
- // depending on the this.x = ...; assignment in the function.
- Handle<SharedFunctionInfo> shared(function->shared());
- for (int i = 0; i < shared->this_property_assignments_count(); i++) {
- if (shared->IsThisPropertyAssignmentArgument(i)) {
- Label not_passed, next;
- // Check if the argument assigned to the property is actually passed.
- int arg_number = shared->GetThisPropertyAssignmentArgument(i);
- __ cmp(r0, Operand(arg_number));
- __ b(le, &not_passed);
- // Argument passed - find it on the stack.
- __ ldr(r2, MemOperand(r1, (arg_number + 1) * -kPointerSize));
- __ str(r2, MemOperand(r5, kPointerSize, PostIndex));
- __ b(&next);
- __ bind(&not_passed);
- // Set the property to undefined.
- __ str(r7, MemOperand(r5, kPointerSize, PostIndex));
- __ bind(&next);
- } else {
- // Set the property to the constant value.
- Handle<Object> constant(shared->GetThisPropertyAssignmentConstant(i),
- isolate());
- __ mov(r2, Operand(constant));
- __ str(r2, MemOperand(r5, kPointerSize, PostIndex));
- }
- }
-
- // Fill the unused in-object property fields with undefined.
- for (int i = shared->this_property_assignments_count();
- i < function->initial_map()->inobject_properties();
- i++) {
- __ str(r7, MemOperand(r5, kPointerSize, PostIndex));
- }
-
- // r0: argc
- // r4: JSObject (not tagged)
- // Move argc to r1 and the JSObject to return to r0 and tag it.
- __ mov(r1, r0);
- __ mov(r0, r4);
- __ orr(r0, r0, Operand(kHeapObjectTag));
-
- // r0: JSObject
- // r1: argc
- // Remove caller arguments and receiver from the stack and return.
- __ add(sp, sp, Operand(r1, LSL, kPointerSizeLog2));
- __ add(sp, sp, Operand(kPointerSize));
- Counters* counters = masm()->isolate()->counters();
- __ IncrementCounter(counters->constructed_objects(), 1, r1, r2);
- __ IncrementCounter(counters->constructed_objects_stub(), 1, r1, r2);
- __ Jump(lr);
-
- // Jump to the generic stub in case the specialized code cannot handle the
- // construction.
- __ bind(&generic_stub_call);
- Handle<Code> code = masm()->isolate()->builtins()->JSConstructStubGeneric();
- __ Jump(code, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode();
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm)
-
-
-void KeyedLoadStubCompiler::GenerateLoadDictionaryElement(
- MacroAssembler* masm) {
- // ---------- S t a t e --------------
- // -- lr : return address
- // -- r0 : key
- // -- r1 : receiver
- // -----------------------------------
- Label slow, miss_force_generic;
-
- Register key = r0;
- Register receiver = r1;
-
- __ JumpIfNotSmi(key, &miss_force_generic);
- __ mov(r2, Operand(key, ASR, kSmiTagSize));
- __ ldr(r4, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ LoadFromNumberDictionary(&slow, r4, key, r0, r2, r3, r5);
- __ Ret();
-
- __ bind(&slow);
- __ IncrementCounter(
- masm->isolate()->counters()->keyed_load_external_array_slow(),
- 1, r2, r3);
-
- // ---------- S t a t e --------------
- // -- lr : return address
- // -- r0 : key
- // -- r1 : receiver
- // -----------------------------------
- Handle<Code> slow_ic =
- masm->isolate()->builtins()->KeyedLoadIC_Slow();
- __ Jump(slow_ic, RelocInfo::CODE_TARGET);
-
- // Miss case, call the runtime.
- __ bind(&miss_force_generic);
-
- // ---------- S t a t e --------------
- // -- lr : return address
- // -- r0 : key
- // -- r1 : receiver
- // -----------------------------------
-
- Handle<Code> miss_ic =
- masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
- __ Jump(miss_ic, RelocInfo::CODE_TARGET);
-}
-
-
-static bool IsElementTypeSigned(ElementsKind elements_kind) {
- switch (elements_kind) {
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_INT_ELEMENTS:
- return true;
-
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- case EXTERNAL_PIXEL_ELEMENTS:
- return false;
-
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_SMI_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- UNREACHABLE();
- return false;
- }
- return false;
-}
-
-
-static void GenerateSmiKeyCheck(MacroAssembler* masm,
- Register key,
- Register scratch0,
- Register scratch1,
- DwVfpRegister double_scratch0,
- DwVfpRegister double_scratch1,
- Label* fail) {
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatures::Scope scope(VFP2);
- Label key_ok;
- // Check for smi or a smi inside a heap number. We convert the heap
- // number and check if the conversion is exact and fits into the smi
- // range.
- __ JumpIfSmi(key, &key_ok);
- __ CheckMap(key,
- scratch0,
- Heap::kHeapNumberMapRootIndex,
- fail,
- DONT_DO_SMI_CHECK);
- __ sub(ip, key, Operand(kHeapObjectTag));
- __ vldr(double_scratch0, ip, HeapNumber::kValueOffset);
- __ EmitVFPTruncate(kRoundToZero,
- scratch0,
- double_scratch0,
- scratch1,
- double_scratch1,
- kCheckForInexactConversion);
- __ b(ne, fail);
- __ TrySmiTag(scratch0, fail, scratch1);
- __ mov(key, scratch0);
- __ bind(&key_ok);
- } else {
- // Check that the key is a smi.
- __ JumpIfNotSmi(key, fail);
- }
-}
-
-
-void KeyedStoreStubCompiler::GenerateStoreExternalArray(
- MacroAssembler* masm,
- ElementsKind elements_kind) {
- // ---------- S t a t e --------------
- // -- r0 : value
- // -- r1 : key
- // -- r2 : receiver
- // -- lr : return address
- // -----------------------------------
- Label slow, check_heap_number, miss_force_generic;
-
- // Register usage.
- Register value = r0;
- Register key = r1;
- Register receiver = r2;
- // r3 mostly holds the elements array or the destination external array.
-
- // This stub is meant to be tail-jumped to, the receiver must already
- // have been verified by the caller to not be a smi.
-
- // Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, key, r4, r5, d1, d2, &miss_force_generic);
-
- __ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset));
-
- // Check that the index is in range
- __ ldr(ip, FieldMemOperand(r3, ExternalArray::kLengthOffset));
- __ cmp(key, ip);
- // Unsigned comparison catches both negative and too-large values.
- __ b(hs, &miss_force_generic);
-
- // Handle both smis and HeapNumbers in the fast path. Go to the
- // runtime for all other kinds of values.
- // r3: external array.
- if (elements_kind == EXTERNAL_PIXEL_ELEMENTS) {
- // Double to pixel conversion is only implemented in the runtime for now.
- __ JumpIfNotSmi(value, &slow);
- } else {
- __ JumpIfNotSmi(value, &check_heap_number);
- }
- __ SmiUntag(r5, value);
- __ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset));
-
- // r3: base pointer of external storage.
- // r5: value (integer).
- switch (elements_kind) {
- case EXTERNAL_PIXEL_ELEMENTS:
- // Clamp the value to [0..255].
- __ Usat(r5, 8, Operand(r5));
- __ strb(r5, MemOperand(r3, key, LSR, 1));
- break;
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- __ strb(r5, MemOperand(r3, key, LSR, 1));
- break;
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- __ strh(r5, MemOperand(r3, key, LSL, 0));
- break;
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- __ str(r5, MemOperand(r3, key, LSL, 1));
- break;
- case EXTERNAL_FLOAT_ELEMENTS:
- // Perform int-to-float conversion and store to memory.
- __ SmiUntag(r4, key);
- StoreIntAsFloat(masm, r3, r4, r5, r6, r7, r9);
- break;
- case EXTERNAL_DOUBLE_ELEMENTS:
- __ add(r3, r3, Operand(key, LSL, 2));
- // r3: effective address of the double element
- FloatingPointHelper::Destination destination;
- if (CpuFeatures::IsSupported(VFP2)) {
- destination = FloatingPointHelper::kVFPRegisters;
- } else {
- destination = FloatingPointHelper::kCoreRegisters;
- }
- FloatingPointHelper::ConvertIntToDouble(
- masm, r5, destination,
- d0, r6, r7, // These are: double_dst, dst_mantissa, dst_exponent.
- r4, s2); // These are: scratch2, single_scratch.
- if (destination == FloatingPointHelper::kVFPRegisters) {
- CpuFeatures::Scope scope(VFP2);
- __ vstr(d0, r3, 0);
- } else {
- __ str(r6, MemOperand(r3, 0));
- __ str(r7, MemOperand(r3, Register::kSizeInBytes));
- }
- break;
- case FAST_ELEMENTS:
- case FAST_SMI_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- UNREACHABLE();
- break;
- }
-
- // Entry registers are intact, r0 holds the value which is the return value.
- __ Ret();
-
- if (elements_kind != EXTERNAL_PIXEL_ELEMENTS) {
- // r3: external array.
- __ bind(&check_heap_number);
- __ CompareObjectType(value, r5, r6, HEAP_NUMBER_TYPE);
- __ b(ne, &slow);
-
- __ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset));
-
- // r3: base pointer of external storage.
-
- // The WebGL specification leaves the behavior of storing NaN and
- // +/-Infinity into integer arrays basically undefined. For more
- // reproducible behavior, convert these to zero.
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatures::Scope scope(VFP2);
-
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
- // vldr requires offset to be a multiple of 4 so we can not
- // include -kHeapObjectTag into it.
- __ sub(r5, r0, Operand(kHeapObjectTag));
- __ vldr(d0, r5, HeapNumber::kValueOffset);
- __ add(r5, r3, Operand(key, LSL, 1));
- __ vcvt_f32_f64(s0, d0);
- __ vstr(s0, r5, 0);
- } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
- __ sub(r5, r0, Operand(kHeapObjectTag));
- __ vldr(d0, r5, HeapNumber::kValueOffset);
- __ add(r5, r3, Operand(key, LSL, 2));
- __ vstr(d0, r5, 0);
- } else {
- // Hoisted load. vldr requires offset to be a multiple of 4 so we can
- // not include -kHeapObjectTag into it.
- __ sub(r5, value, Operand(kHeapObjectTag));
- __ vldr(d0, r5, HeapNumber::kValueOffset);
- __ EmitECMATruncate(r5, d0, d1, r6, r7, r9);
-
- switch (elements_kind) {
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- __ strb(r5, MemOperand(r3, key, LSR, 1));
- break;
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- __ strh(r5, MemOperand(r3, key, LSL, 0));
- break;
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- __ str(r5, MemOperand(r3, key, LSL, 1));
- break;
- case EXTERNAL_PIXEL_ELEMENTS:
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_SMI_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- UNREACHABLE();
- break;
- }
- }
-
- // Entry registers are intact, r0 holds the value which is the return
- // value.
- __ Ret();
- } else {
- // VFP3 is not available do manual conversions.
- __ ldr(r5, FieldMemOperand(value, HeapNumber::kExponentOffset));
- __ ldr(r6, FieldMemOperand(value, HeapNumber::kMantissaOffset));
-
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
- Label done, nan_or_infinity_or_zero;
- static const int kMantissaInHiWordShift =
- kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord;
-
- static const int kMantissaInLoWordShift =
- kBitsPerInt - kMantissaInHiWordShift;
-
- // Test for all special exponent values: zeros, subnormal numbers, NaNs
- // and infinities. All these should be converted to 0.
- __ mov(r7, Operand(HeapNumber::kExponentMask));
- __ and_(r9, r5, Operand(r7), SetCC);
- __ b(eq, &nan_or_infinity_or_zero);
-
- __ teq(r9, Operand(r7));
- __ mov(r9, Operand(kBinary32ExponentMask), LeaveCC, eq);
- __ b(eq, &nan_or_infinity_or_zero);
-
- // Rebias exponent.
- __ mov(r9, Operand(r9, LSR, HeapNumber::kExponentShift));
- __ add(r9,
- r9,
- Operand(kBinary32ExponentBias - HeapNumber::kExponentBias));
-
- __ cmp(r9, Operand(kBinary32MaxExponent));
- __ and_(r5, r5, Operand(HeapNumber::kSignMask), LeaveCC, gt);
- __ orr(r5, r5, Operand(kBinary32ExponentMask), LeaveCC, gt);
- __ b(gt, &done);
-
- __ cmp(r9, Operand(kBinary32MinExponent));
- __ and_(r5, r5, Operand(HeapNumber::kSignMask), LeaveCC, lt);
- __ b(lt, &done);
-
- __ and_(r7, r5, Operand(HeapNumber::kSignMask));
- __ and_(r5, r5, Operand(HeapNumber::kMantissaMask));
- __ orr(r7, r7, Operand(r5, LSL, kMantissaInHiWordShift));
- __ orr(r7, r7, Operand(r6, LSR, kMantissaInLoWordShift));
- __ orr(r5, r7, Operand(r9, LSL, kBinary32ExponentShift));
-
- __ bind(&done);
- __ str(r5, MemOperand(r3, key, LSL, 1));
- // Entry registers are intact, r0 holds the value which is the return
- // value.
- __ Ret();
-
- __ bind(&nan_or_infinity_or_zero);
- __ and_(r7, r5, Operand(HeapNumber::kSignMask));
- __ and_(r5, r5, Operand(HeapNumber::kMantissaMask));
- __ orr(r9, r9, r7);
- __ orr(r9, r9, Operand(r5, LSL, kMantissaInHiWordShift));
- __ orr(r5, r9, Operand(r6, LSR, kMantissaInLoWordShift));
- __ b(&done);
- } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
- __ add(r7, r3, Operand(key, LSL, 2));
- // r7: effective address of destination element.
- __ str(r6, MemOperand(r7, 0));
- __ str(r5, MemOperand(r7, Register::kSizeInBytes));
- __ Ret();
- } else {
- bool is_signed_type = IsElementTypeSigned(elements_kind);
- int meaningfull_bits = is_signed_type ? (kBitsPerInt - 1) : kBitsPerInt;
- int32_t min_value = is_signed_type ? 0x80000000 : 0x00000000;
-
- Label done, sign;
-
- // Test for all special exponent values: zeros, subnormal numbers, NaNs
- // and infinities. All these should be converted to 0.
- __ mov(r7, Operand(HeapNumber::kExponentMask));
- __ and_(r9, r5, Operand(r7), SetCC);
- __ mov(r5, Operand::Zero(), LeaveCC, eq);
- __ b(eq, &done);
-
- __ teq(r9, Operand(r7));
- __ mov(r5, Operand::Zero(), LeaveCC, eq);
- __ b(eq, &done);
-
- // Unbias exponent.
- __ mov(r9, Operand(r9, LSR, HeapNumber::kExponentShift));
- __ sub(r9, r9, Operand(HeapNumber::kExponentBias), SetCC);
- // If exponent is negative then result is 0.
- __ mov(r5, Operand::Zero(), LeaveCC, mi);
- __ b(mi, &done);
-
- // If exponent is too big then result is minimal value.
- __ cmp(r9, Operand(meaningfull_bits - 1));
- __ mov(r5, Operand(min_value), LeaveCC, ge);
- __ b(ge, &done);
-
- __ and_(r7, r5, Operand(HeapNumber::kSignMask), SetCC);
- __ and_(r5, r5, Operand(HeapNumber::kMantissaMask));
- __ orr(r5, r5, Operand(1u << HeapNumber::kMantissaBitsInTopWord));
-
- __ rsb(r9, r9, Operand(HeapNumber::kMantissaBitsInTopWord), SetCC);
- __ mov(r5, Operand(r5, LSR, r9), LeaveCC, pl);
- __ b(pl, &sign);
-
- __ rsb(r9, r9, Operand::Zero());
- __ mov(r5, Operand(r5, LSL, r9));
- __ rsb(r9, r9, Operand(meaningfull_bits));
- __ orr(r5, r5, Operand(r6, LSR, r9));
-
- __ bind(&sign);
- __ teq(r7, Operand::Zero());
- __ rsb(r5, r5, Operand::Zero(), LeaveCC, ne);
-
- __ bind(&done);
- switch (elements_kind) {
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- __ strb(r5, MemOperand(r3, key, LSR, 1));
- break;
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- __ strh(r5, MemOperand(r3, key, LSL, 0));
- break;
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- __ str(r5, MemOperand(r3, key, LSL, 1));
- break;
- case EXTERNAL_PIXEL_ELEMENTS:
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_SMI_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- UNREACHABLE();
- break;
- }
- }
- }
- }
-
- // Slow case, key and receiver still in r0 and r1.
- __ bind(&slow);
- __ IncrementCounter(
- masm->isolate()->counters()->keyed_load_external_array_slow(),
- 1, r2, r3);
-
- // ---------- S t a t e --------------
- // -- lr : return address
- // -- r0 : key
- // -- r1 : receiver
- // -----------------------------------
- Handle<Code> slow_ic =
- masm->isolate()->builtins()->KeyedStoreIC_Slow();
- __ Jump(slow_ic, RelocInfo::CODE_TARGET);
-
- // Miss case, call the runtime.
- __ bind(&miss_force_generic);
-
- // ---------- S t a t e --------------
- // -- lr : return address
- // -- r0 : key
- // -- r1 : receiver
- // -----------------------------------
-
- Handle<Code> miss_ic =
- masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
- __ Jump(miss_ic, RelocInfo::CODE_TARGET);
-}
-
-
-void KeyedStoreStubCompiler::GenerateStoreFastElement(
- MacroAssembler* masm,
- bool is_js_array,
- ElementsKind elements_kind,
- KeyedAccessGrowMode grow_mode) {
- // ----------- S t a t e -------------
- // -- r0 : value
- // -- r1 : key
- // -- r2 : receiver
- // -- lr : return address
- // -- r3 : scratch
- // -- r4 : scratch (elements)
- // -----------------------------------
- Label miss_force_generic, transition_elements_kind, grow, slow;
- Label finish_store, check_capacity;
-
- Register value_reg = r0;
- Register key_reg = r1;
- Register receiver_reg = r2;
- Register scratch = r4;
- Register elements_reg = r3;
- Register length_reg = r5;
- Register scratch2 = r6;
-
- // This stub is meant to be tail-jumped to, the receiver must already
- // have been verified by the caller to not be a smi.
-
- // Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, key_reg, r4, r5, d1, d2, &miss_force_generic);
-
- if (IsFastSmiElementsKind(elements_kind)) {
- __ JumpIfNotSmi(value_reg, &transition_elements_kind);
- }
-
- // Check that the key is within bounds.
- __ ldr(elements_reg,
- FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
- if (is_js_array) {
- __ ldr(scratch, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
- } else {
- __ ldr(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
- }
- // Compare smis.
- __ cmp(key_reg, scratch);
- if (is_js_array && grow_mode == ALLOW_JSARRAY_GROWTH) {
- __ b(hs, &grow);
- } else {
- __ b(hs, &miss_force_generic);
- }
-
- // Make sure elements is a fast element array, not 'cow'.
- __ CheckMap(elements_reg,
- scratch,
- Heap::kFixedArrayMapRootIndex,
- &miss_force_generic,
- DONT_DO_SMI_CHECK);
-
- __ bind(&finish_store);
- if (IsFastSmiElementsKind(elements_kind)) {
- __ add(scratch,
- elements_reg,
- Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
- __ add(scratch,
- scratch,
- Operand(key_reg, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ str(value_reg, MemOperand(scratch));
- } else {
- ASSERT(IsFastObjectElementsKind(elements_kind));
- __ add(scratch,
- elements_reg,
- Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
- __ add(scratch,
- scratch,
- Operand(key_reg, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ str(value_reg, MemOperand(scratch));
- __ mov(receiver_reg, value_reg);
- __ RecordWrite(elements_reg, // Object.
- scratch, // Address.
- receiver_reg, // Value.
- kLRHasNotBeenSaved,
- kDontSaveFPRegs);
- }
- // value_reg (r0) is preserved.
- // Done.
- __ Ret();
-
- __ bind(&miss_force_generic);
- Handle<Code> ic =
- masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
- __ Jump(ic, RelocInfo::CODE_TARGET);
-
- __ bind(&transition_elements_kind);
- Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss();
- __ Jump(ic_miss, RelocInfo::CODE_TARGET);
-
- if (is_js_array && grow_mode == ALLOW_JSARRAY_GROWTH) {
- // Grow the array by a single element if possible.
- __ bind(&grow);
-
- // Make sure the array is only growing by a single element, anything else
- // must be handled by the runtime. Flags already set by previous compare.
- __ b(ne, &miss_force_generic);
-
- // Check for the empty array, and preallocate a small backing store if
- // possible.
- __ ldr(length_reg,
- FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
- __ ldr(elements_reg,
- FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
- __ CompareRoot(elements_reg, Heap::kEmptyFixedArrayRootIndex);
- __ b(ne, &check_capacity);
-
- int size = FixedArray::SizeFor(JSArray::kPreallocatedArrayElements);
- __ AllocateInNewSpace(size, elements_reg, scratch, scratch2, &slow,
- TAG_OBJECT);
-
- __ LoadRoot(scratch, Heap::kFixedArrayMapRootIndex);
- __ str(scratch, FieldMemOperand(elements_reg, JSObject::kMapOffset));
- __ mov(scratch, Operand(Smi::FromInt(JSArray::kPreallocatedArrayElements)));
- __ str(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
- __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
- for (int i = 1; i < JSArray::kPreallocatedArrayElements; ++i) {
- __ str(scratch, FieldMemOperand(elements_reg, FixedArray::SizeFor(i)));
- }
-
- // Store the element at index zero.
- __ str(value_reg, FieldMemOperand(elements_reg, FixedArray::SizeFor(0)));
-
- // Install the new backing store in the JSArray.
- __ str(elements_reg,
- FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
- __ RecordWriteField(receiver_reg, JSObject::kElementsOffset, elements_reg,
- scratch, kLRHasNotBeenSaved, kDontSaveFPRegs,
- EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-
- // Increment the length of the array.
- __ mov(length_reg, Operand(Smi::FromInt(1)));
- __ str(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
- __ Ret();
-
- __ bind(&check_capacity);
- // Check for cow elements, in general they are not handled by this stub
- __ CheckMap(elements_reg,
- scratch,
- Heap::kFixedCOWArrayMapRootIndex,
- &miss_force_generic,
- DONT_DO_SMI_CHECK);
-
- __ ldr(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
- __ cmp(length_reg, scratch);
- __ b(hs, &slow);
-
- // Grow the array and finish the store.
- __ add(length_reg, length_reg, Operand(Smi::FromInt(1)));
- __ str(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
- __ jmp(&finish_store);
-
- __ bind(&slow);
- Handle<Code> ic_slow = masm->isolate()->builtins()->KeyedStoreIC_Slow();
- __ Jump(ic_slow, RelocInfo::CODE_TARGET);
- }
-}
-
-
-void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
- MacroAssembler* masm,
- bool is_js_array,
- KeyedAccessGrowMode grow_mode) {
- // ----------- S t a t e -------------
- // -- r0 : value
- // -- r1 : key
- // -- r2 : receiver
- // -- lr : return address
- // -- r3 : scratch (elements backing store)
- // -- r4 : scratch
- // -- r5 : scratch
- // -- r6 : scratch
- // -- r7 : scratch
- // -- r9 : scratch
- // -----------------------------------
- Label miss_force_generic, transition_elements_kind, grow, slow;
- Label finish_store, check_capacity;
-
- Register value_reg = r0;
- Register key_reg = r1;
- Register receiver_reg = r2;
- Register elements_reg = r3;
- Register scratch1 = r4;
- Register scratch2 = r5;
- Register scratch3 = r6;
- Register scratch4 = r7;
- Register scratch5 = r9;
- Register length_reg = r7;
-
- // This stub is meant to be tail-jumped to, the receiver must already
- // have been verified by the caller to not be a smi.
-
- // Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, key_reg, r4, r5, d1, d2, &miss_force_generic);
-
- __ ldr(elements_reg,
- FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
-
- // Check that the key is within bounds.
- if (is_js_array) {
- __ ldr(scratch1, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
- } else {
- __ ldr(scratch1,
- FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
- }
- // Compare smis, unsigned compare catches both negative and out-of-bound
- // indexes.
- __ cmp(key_reg, scratch1);
- if (grow_mode == ALLOW_JSARRAY_GROWTH) {
- __ b(hs, &grow);
- } else {
- __ b(hs, &miss_force_generic);
- }
-
- __ bind(&finish_store);
- __ StoreNumberToDoubleElements(value_reg,
- key_reg,
- // All registers after this are overwritten.
- elements_reg,
- scratch1,
- scratch2,
- scratch3,
- scratch4,
- &transition_elements_kind);
- __ Ret();
-
- // Handle store cache miss, replacing the ic with the generic stub.
- __ bind(&miss_force_generic);
- Handle<Code> ic =
- masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
- __ Jump(ic, RelocInfo::CODE_TARGET);
-
- __ bind(&transition_elements_kind);
- Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss();
- __ Jump(ic_miss, RelocInfo::CODE_TARGET);
-
- if (is_js_array && grow_mode == ALLOW_JSARRAY_GROWTH) {
- // Grow the array by a single element if possible.
- __ bind(&grow);
-
- // Make sure the array is only growing by a single element, anything else
- // must be handled by the runtime. Flags already set by previous compare.
- __ b(ne, &miss_force_generic);
-
- // Transition on values that can't be stored in a FixedDoubleArray.
- Label value_is_smi;
- __ JumpIfSmi(value_reg, &value_is_smi);
- __ ldr(scratch1, FieldMemOperand(value_reg, HeapObject::kMapOffset));
- __ CompareRoot(scratch1, Heap::kHeapNumberMapRootIndex);
- __ b(ne, &transition_elements_kind);
- __ bind(&value_is_smi);
-
- // Check for the empty array, and preallocate a small backing store if
- // possible.
- __ ldr(length_reg,
- FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
- __ ldr(elements_reg,
- FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
- __ CompareRoot(elements_reg, Heap::kEmptyFixedArrayRootIndex);
- __ b(ne, &check_capacity);
-
- int size = FixedDoubleArray::SizeFor(JSArray::kPreallocatedArrayElements);
- __ AllocateInNewSpace(size, elements_reg, scratch1, scratch2, &slow,
- TAG_OBJECT);
-
- // Initialize the new FixedDoubleArray.
- __ LoadRoot(scratch1, Heap::kFixedDoubleArrayMapRootIndex);
- __ str(scratch1, FieldMemOperand(elements_reg, JSObject::kMapOffset));
- __ mov(scratch1,
- Operand(Smi::FromInt(JSArray::kPreallocatedArrayElements)));
- __ str(scratch1,
- FieldMemOperand(elements_reg, FixedDoubleArray::kLengthOffset));
-
- __ mov(scratch1, elements_reg);
- __ StoreNumberToDoubleElements(value_reg,
- key_reg,
- // All registers after this are overwritten.
- scratch1,
- scratch2,
- scratch3,
- scratch4,
- scratch5,
- &transition_elements_kind);
-
- __ mov(scratch1, Operand(kHoleNanLower32));
- __ mov(scratch2, Operand(kHoleNanUpper32));
- for (int i = 1; i < JSArray::kPreallocatedArrayElements; i++) {
- int offset = FixedDoubleArray::OffsetOfElementAt(i);
- __ str(scratch1, FieldMemOperand(elements_reg, offset));
- __ str(scratch2, FieldMemOperand(elements_reg, offset + kPointerSize));
- }
-
- // Install the new backing store in the JSArray.
- __ str(elements_reg,
- FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
- __ RecordWriteField(receiver_reg, JSObject::kElementsOffset, elements_reg,
- scratch1, kLRHasNotBeenSaved, kDontSaveFPRegs,
- EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-
- // Increment the length of the array.
- __ mov(length_reg, Operand(Smi::FromInt(1)));
- __ str(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
- __ ldr(elements_reg,
- FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
- __ Ret();
-
- __ bind(&check_capacity);
- // Make sure that the backing store can hold additional elements.
- __ ldr(scratch1,
- FieldMemOperand(elements_reg, FixedDoubleArray::kLengthOffset));
- __ cmp(length_reg, scratch1);
- __ b(hs, &slow);
-
- // Grow the array and finish the store.
- __ add(length_reg, length_reg, Operand(Smi::FromInt(1)));
- __ str(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
- __ jmp(&finish_store);
-
- __ bind(&slow);
- Handle<Code> ic_slow = masm->isolate()->builtins()->KeyedStoreIC_Slow();
- __ Jump(ic_slow, RelocInfo::CODE_TARGET);
- }
-}
-
-
-#undef __
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_ARM