summaryrefslogtreecommitdiffstats
path: root/src/3rdparty/v8/src/ia32
diff options
context:
space:
mode:
Diffstat (limited to 'src/3rdparty/v8/src/ia32')
-rw-r--r--src/3rdparty/v8/src/ia32/assembler-ia32-inl.h503
-rw-r--r--src/3rdparty/v8/src/ia32/assembler-ia32.cc2696
-rw-r--r--src/3rdparty/v8/src/ia32/assembler-ia32.h1281
-rw-r--r--src/3rdparty/v8/src/ia32/builtins-ia32.cc1869
-rw-r--r--src/3rdparty/v8/src/ia32/code-stubs-ia32.cc7936
-rw-r--r--src/3rdparty/v8/src/ia32/code-stubs-ia32.h646
-rw-r--r--src/3rdparty/v8/src/ia32/codegen-ia32.cc967
-rw-r--r--src/3rdparty/v8/src/ia32/codegen-ia32.h107
-rw-r--r--src/3rdparty/v8/src/ia32/cpu-ia32.cc89
-rw-r--r--src/3rdparty/v8/src/ia32/debug-ia32.cc362
-rw-r--r--src/3rdparty/v8/src/ia32/deoptimizer-ia32.cc1184
-rw-r--r--src/3rdparty/v8/src/ia32/disasm-ia32.cc1728
-rw-r--r--src/3rdparty/v8/src/ia32/frames-ia32.cc45
-rw-r--r--src/3rdparty/v8/src/ia32/frames-ia32.h137
-rw-r--r--src/3rdparty/v8/src/ia32/full-codegen-ia32.cc4595
-rw-r--r--src/3rdparty/v8/src/ia32/ic-ia32.cc1675
-rw-r--r--src/3rdparty/v8/src/ia32/lithium-codegen-ia32.cc6266
-rw-r--r--src/3rdparty/v8/src/ia32/lithium-codegen-ia32.h475
-rw-r--r--src/3rdparty/v8/src/ia32/lithium-gap-resolver-ia32.cc494
-rw-r--r--src/3rdparty/v8/src/ia32/lithium-gap-resolver-ia32.h110
-rw-r--r--src/3rdparty/v8/src/ia32/lithium-ia32.cc2604
-rw-r--r--src/3rdparty/v8/src/ia32/lithium-ia32.h2849
-rw-r--r--src/3rdparty/v8/src/ia32/macro-assembler-ia32.cc3101
-rw-r--r--src/3rdparty/v8/src/ia32/macro-assembler-ia32.h1018
-rw-r--r--src/3rdparty/v8/src/ia32/regexp-macro-assembler-ia32.cc1420
-rw-r--r--src/3rdparty/v8/src/ia32/regexp-macro-assembler-ia32.h224
-rw-r--r--src/3rdparty/v8/src/ia32/simulator-ia32.cc30
-rw-r--r--src/3rdparty/v8/src/ia32/simulator-ia32.h74
-rw-r--r--src/3rdparty/v8/src/ia32/stub-cache-ia32.cc3833
29 files changed, 0 insertions, 48318 deletions
diff --git a/src/3rdparty/v8/src/ia32/assembler-ia32-inl.h b/src/3rdparty/v8/src/ia32/assembler-ia32-inl.h
deleted file mode 100644
index 56d88b0..0000000
--- a/src/3rdparty/v8/src/ia32/assembler-ia32-inl.h
+++ /dev/null
@@ -1,503 +0,0 @@
-// Copyright (c) 1994-2006 Sun Microsystems Inc.
-// All Rights Reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// - Redistributions of source code must retain the above copyright notice,
-// this list of conditions and the following disclaimer.
-//
-// - Redistribution in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// - Neither the name of Sun Microsystems or the names of contributors may
-// be used to endorse or promote products derived from this software without
-// specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
-// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
-// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// The original source code covered by the above license above has been
-// modified significantly by Google Inc.
-// Copyright 2012 the V8 project authors. All rights reserved.
-
-// A light-weight IA32 Assembler.
-
-#ifndef V8_IA32_ASSEMBLER_IA32_INL_H_
-#define V8_IA32_ASSEMBLER_IA32_INL_H_
-
-#include "ia32/assembler-ia32.h"
-
-#include "cpu.h"
-#include "debug.h"
-
-namespace v8 {
-namespace internal {
-
-
-static const byte kCallOpcode = 0xE8;
-
-
-// The modes possibly affected by apply must be in kApplyMask.
-void RelocInfo::apply(intptr_t delta) {
- if (rmode_ == RUNTIME_ENTRY || IsCodeTarget(rmode_)) {
- int32_t* p = reinterpret_cast<int32_t*>(pc_);
- *p -= delta; // Relocate entry.
- CPU::FlushICache(p, sizeof(uint32_t));
- } else if (rmode_ == CODE_AGE_SEQUENCE) {
- if (*pc_ == kCallOpcode) {
- int32_t* p = reinterpret_cast<int32_t*>(pc_ + 1);
- *p -= delta; // Relocate entry.
- CPU::FlushICache(p, sizeof(uint32_t));
- }
- } else if (rmode_ == JS_RETURN && IsPatchedReturnSequence()) {
- // Special handling of js_return when a break point is set (call
- // instruction has been inserted).
- int32_t* p = reinterpret_cast<int32_t*>(pc_ + 1);
- *p -= delta; // Relocate entry.
- CPU::FlushICache(p, sizeof(uint32_t));
- } else if (rmode_ == DEBUG_BREAK_SLOT && IsPatchedDebugBreakSlotSequence()) {
- // Special handling of a debug break slot when a break point is set (call
- // instruction has been inserted).
- int32_t* p = reinterpret_cast<int32_t*>(pc_ + 1);
- *p -= delta; // Relocate entry.
- CPU::FlushICache(p, sizeof(uint32_t));
- } else if (IsInternalReference(rmode_)) {
- // absolute code pointer inside code object moves with the code object.
- int32_t* p = reinterpret_cast<int32_t*>(pc_);
- *p += delta; // Relocate entry.
- CPU::FlushICache(p, sizeof(uint32_t));
- }
-}
-
-
-Address RelocInfo::target_address() {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
- return Assembler::target_address_at(pc_);
-}
-
-
-Address RelocInfo::target_address_address() {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY
- || rmode_ == EMBEDDED_OBJECT
- || rmode_ == EXTERNAL_REFERENCE);
- return reinterpret_cast<Address>(pc_);
-}
-
-
-int RelocInfo::target_address_size() {
- return Assembler::kSpecialTargetSize;
-}
-
-
-void RelocInfo::set_target_address(Address target, WriteBarrierMode mode) {
- Assembler::set_target_address_at(pc_, target);
- ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
- if (mode == UPDATE_WRITE_BARRIER && host() != NULL && IsCodeTarget(rmode_)) {
- Object* target_code = Code::GetCodeFromTargetAddress(target);
- host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
- host(), this, HeapObject::cast(target_code));
- }
-}
-
-
-Object* RelocInfo::target_object() {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- return Memory::Object_at(pc_);
-}
-
-
-Handle<Object> RelocInfo::target_object_handle(Assembler* origin) {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- return Memory::Object_Handle_at(pc_);
-}
-
-
-Object** RelocInfo::target_object_address() {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- return &Memory::Object_at(pc_);
-}
-
-
-void RelocInfo::set_target_object(Object* target, WriteBarrierMode mode) {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- Memory::Object_at(pc_) = target;
- CPU::FlushICache(pc_, sizeof(Address));
- if (mode == UPDATE_WRITE_BARRIER &&
- host() != NULL &&
- target->IsHeapObject()) {
- host()->GetHeap()->incremental_marking()->RecordWrite(
- host(), &Memory::Object_at(pc_), HeapObject::cast(target));
- }
-}
-
-
-Address* RelocInfo::target_reference_address() {
- ASSERT(rmode_ == RelocInfo::EXTERNAL_REFERENCE);
- return reinterpret_cast<Address*>(pc_);
-}
-
-
-Handle<JSGlobalPropertyCell> RelocInfo::target_cell_handle() {
- ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
- Address address = Memory::Address_at(pc_);
- return Handle<JSGlobalPropertyCell>(
- reinterpret_cast<JSGlobalPropertyCell**>(address));
-}
-
-
-JSGlobalPropertyCell* RelocInfo::target_cell() {
- ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
- return JSGlobalPropertyCell::FromValueAddress(Memory::Address_at(pc_));
-}
-
-
-void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell,
- WriteBarrierMode mode) {
- ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
- Address address = cell->address() + JSGlobalPropertyCell::kValueOffset;
- Memory::Address_at(pc_) = address;
- CPU::FlushICache(pc_, sizeof(Address));
- if (mode == UPDATE_WRITE_BARRIER && host() != NULL) {
- // TODO(1550) We are passing NULL as a slot because cell can never be on
- // evacuation candidate.
- host()->GetHeap()->incremental_marking()->RecordWrite(
- host(), NULL, cell);
- }
-}
-
-
-Code* RelocInfo::code_age_stub() {
- ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
- ASSERT(*pc_ == kCallOpcode);
- return Code::GetCodeFromTargetAddress(
- Assembler::target_address_at(pc_ + 1));
-}
-
-
-void RelocInfo::set_code_age_stub(Code* stub) {
- ASSERT(*pc_ == kCallOpcode);
- ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
- Assembler::set_target_address_at(pc_ + 1, stub->instruction_start());
-}
-
-
-Address RelocInfo::call_address() {
- ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
- (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
- return Assembler::target_address_at(pc_ + 1);
-}
-
-
-void RelocInfo::set_call_address(Address target) {
- ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
- (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
- Assembler::set_target_address_at(pc_ + 1, target);
- if (host() != NULL) {
- Object* target_code = Code::GetCodeFromTargetAddress(target);
- host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
- host(), this, HeapObject::cast(target_code));
- }
-}
-
-
-Object* RelocInfo::call_object() {
- return *call_object_address();
-}
-
-
-void RelocInfo::set_call_object(Object* target) {
- *call_object_address() = target;
-}
-
-
-Object** RelocInfo::call_object_address() {
- ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
- (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
- return reinterpret_cast<Object**>(pc_ + 1);
-}
-
-
-bool RelocInfo::IsPatchedReturnSequence() {
- return *pc_ == kCallOpcode;
-}
-
-
-bool RelocInfo::IsPatchedDebugBreakSlotSequence() {
- return !Assembler::IsNop(pc());
-}
-
-
-void RelocInfo::Visit(ObjectVisitor* visitor) {
- RelocInfo::Mode mode = rmode();
- if (mode == RelocInfo::EMBEDDED_OBJECT) {
- visitor->VisitEmbeddedPointer(this);
- CPU::FlushICache(pc_, sizeof(Address));
- } else if (RelocInfo::IsCodeTarget(mode)) {
- visitor->VisitCodeTarget(this);
- } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
- visitor->VisitGlobalPropertyCell(this);
- } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
- visitor->VisitExternalReference(this);
- CPU::FlushICache(pc_, sizeof(Address));
- } else if (RelocInfo::IsCodeAgeSequence(mode)) {
- visitor->VisitCodeAgeSequence(this);
- #ifdef ENABLE_DEBUGGER_SUPPORT
- // TODO(isolates): Get a cached isolate below.
- } else if (((RelocInfo::IsJSReturn(mode) &&
- IsPatchedReturnSequence()) ||
- (RelocInfo::IsDebugBreakSlot(mode) &&
- IsPatchedDebugBreakSlotSequence())) &&
- Isolate::Current()->debug()->has_break_points()) {
- visitor->VisitDebugTarget(this);
-#endif
- } else if (mode == RelocInfo::RUNTIME_ENTRY) {
- visitor->VisitRuntimeEntry(this);
- }
-}
-
-
-template<typename StaticVisitor>
-void RelocInfo::Visit(Heap* heap) {
- RelocInfo::Mode mode = rmode();
- if (mode == RelocInfo::EMBEDDED_OBJECT) {
- StaticVisitor::VisitEmbeddedPointer(heap, this);
- CPU::FlushICache(pc_, sizeof(Address));
- } else if (RelocInfo::IsCodeTarget(mode)) {
- StaticVisitor::VisitCodeTarget(heap, this);
- } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
- StaticVisitor::VisitGlobalPropertyCell(heap, this);
- } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
- StaticVisitor::VisitExternalReference(this);
- CPU::FlushICache(pc_, sizeof(Address));
- } else if (RelocInfo::IsCodeAgeSequence(mode)) {
- StaticVisitor::VisitCodeAgeSequence(heap, this);
-#ifdef ENABLE_DEBUGGER_SUPPORT
- } else if (heap->isolate()->debug()->has_break_points() &&
- ((RelocInfo::IsJSReturn(mode) &&
- IsPatchedReturnSequence()) ||
- (RelocInfo::IsDebugBreakSlot(mode) &&
- IsPatchedDebugBreakSlotSequence()))) {
- StaticVisitor::VisitDebugTarget(heap, this);
-#endif
- } else if (mode == RelocInfo::RUNTIME_ENTRY) {
- StaticVisitor::VisitRuntimeEntry(this);
- }
-}
-
-
-
-Immediate::Immediate(int x) {
- x_ = x;
- rmode_ = RelocInfo::NONE32;
-}
-
-
-Immediate::Immediate(const ExternalReference& ext) {
- x_ = reinterpret_cast<int32_t>(ext.address());
- rmode_ = RelocInfo::EXTERNAL_REFERENCE;
-}
-
-
-Immediate::Immediate(Label* internal_offset) {
- x_ = reinterpret_cast<int32_t>(internal_offset);
- rmode_ = RelocInfo::INTERNAL_REFERENCE;
-}
-
-
-Immediate::Immediate(Handle<Object> handle) {
- // Verify all Objects referred by code are NOT in new space.
- Object* obj = *handle;
- ASSERT(!HEAP->InNewSpace(obj));
- if (obj->IsHeapObject()) {
- x_ = reinterpret_cast<intptr_t>(handle.location());
- rmode_ = RelocInfo::EMBEDDED_OBJECT;
- } else {
- // no relocation needed
- x_ = reinterpret_cast<intptr_t>(obj);
- rmode_ = RelocInfo::NONE32;
- }
-}
-
-
-Immediate::Immediate(Smi* value) {
- x_ = reinterpret_cast<intptr_t>(value);
- rmode_ = RelocInfo::NONE32;
-}
-
-
-Immediate::Immediate(Address addr) {
- x_ = reinterpret_cast<int32_t>(addr);
- rmode_ = RelocInfo::NONE32;
-}
-
-
-void Assembler::emit(uint32_t x) {
- *reinterpret_cast<uint32_t*>(pc_) = x;
- pc_ += sizeof(uint32_t);
-}
-
-
-void Assembler::emit(Handle<Object> handle) {
- // Verify all Objects referred by code are NOT in new space.
- Object* obj = *handle;
- ASSERT(!isolate()->heap()->InNewSpace(obj));
- if (obj->IsHeapObject()) {
- emit(reinterpret_cast<intptr_t>(handle.location()),
- RelocInfo::EMBEDDED_OBJECT);
- } else {
- // no relocation needed
- emit(reinterpret_cast<intptr_t>(obj));
- }
-}
-
-
-void Assembler::emit(uint32_t x, RelocInfo::Mode rmode, TypeFeedbackId id) {
- if (rmode == RelocInfo::CODE_TARGET && !id.IsNone()) {
- RecordRelocInfo(RelocInfo::CODE_TARGET_WITH_ID, id.ToInt());
- } else if (!RelocInfo::IsNone(rmode)) {
- RecordRelocInfo(rmode);
- }
- emit(x);
-}
-
-
-void Assembler::emit(const Immediate& x) {
- if (x.rmode_ == RelocInfo::INTERNAL_REFERENCE) {
- Label* label = reinterpret_cast<Label*>(x.x_);
- emit_code_relative_offset(label);
- return;
- }
- if (!RelocInfo::IsNone(x.rmode_)) RecordRelocInfo(x.rmode_);
- emit(x.x_);
-}
-
-
-void Assembler::emit_code_relative_offset(Label* label) {
- if (label->is_bound()) {
- int32_t pos;
- pos = label->pos() + Code::kHeaderSize - kHeapObjectTag;
- emit(pos);
- } else {
- emit_disp(label, Displacement::CODE_RELATIVE);
- }
-}
-
-
-void Assembler::emit_w(const Immediate& x) {
- ASSERT(RelocInfo::IsNone(x.rmode_));
- uint16_t value = static_cast<uint16_t>(x.x_);
- reinterpret_cast<uint16_t*>(pc_)[0] = value;
- pc_ += sizeof(uint16_t);
-}
-
-
-Address Assembler::target_address_at(Address pc) {
- return pc + sizeof(int32_t) + *reinterpret_cast<int32_t*>(pc);
-}
-
-
-void Assembler::set_target_address_at(Address pc, Address target) {
- int32_t* p = reinterpret_cast<int32_t*>(pc);
- *p = target - (pc + sizeof(int32_t));
- CPU::FlushICache(p, sizeof(int32_t));
-}
-
-
-Address Assembler::target_address_from_return_address(Address pc) {
- return pc - kCallTargetAddressOffset;
-}
-
-
-Displacement Assembler::disp_at(Label* L) {
- return Displacement(long_at(L->pos()));
-}
-
-
-void Assembler::disp_at_put(Label* L, Displacement disp) {
- long_at_put(L->pos(), disp.data());
-}
-
-
-void Assembler::emit_disp(Label* L, Displacement::Type type) {
- Displacement disp(L, type);
- L->link_to(pc_offset());
- emit(static_cast<int>(disp.data()));
-}
-
-
-void Assembler::emit_near_disp(Label* L) {
- byte disp = 0x00;
- if (L->is_near_linked()) {
- int offset = L->near_link_pos() - pc_offset();
- ASSERT(is_int8(offset));
- disp = static_cast<byte>(offset & 0xFF);
- }
- L->link_to(pc_offset(), Label::kNear);
- *pc_++ = disp;
-}
-
-
-void Operand::set_modrm(int mod, Register rm) {
- ASSERT((mod & -4) == 0);
- buf_[0] = mod << 6 | rm.code();
- len_ = 1;
-}
-
-
-void Operand::set_sib(ScaleFactor scale, Register index, Register base) {
- ASSERT(len_ == 1);
- ASSERT((scale & -4) == 0);
- // Use SIB with no index register only for base esp.
- ASSERT(!index.is(esp) || base.is(esp));
- buf_[1] = scale << 6 | index.code() << 3 | base.code();
- len_ = 2;
-}
-
-
-void Operand::set_disp8(int8_t disp) {
- ASSERT(len_ == 1 || len_ == 2);
- *reinterpret_cast<int8_t*>(&buf_[len_++]) = disp;
-}
-
-
-void Operand::set_dispr(int32_t disp, RelocInfo::Mode rmode) {
- ASSERT(len_ == 1 || len_ == 2);
- int32_t* p = reinterpret_cast<int32_t*>(&buf_[len_]);
- *p = disp;
- len_ += sizeof(int32_t);
- rmode_ = rmode;
-}
-
-Operand::Operand(Register reg) {
- // reg
- set_modrm(3, reg);
-}
-
-
-Operand::Operand(XMMRegister xmm_reg) {
- Register reg = { xmm_reg.code() };
- set_modrm(3, reg);
-}
-
-
-Operand::Operand(int32_t disp, RelocInfo::Mode rmode) {
- // [disp/r]
- set_modrm(0, ebp);
- set_dispr(disp, rmode);
-}
-
-} } // namespace v8::internal
-
-#endif // V8_IA32_ASSEMBLER_IA32_INL_H_
diff --git a/src/3rdparty/v8/src/ia32/assembler-ia32.cc b/src/3rdparty/v8/src/ia32/assembler-ia32.cc
deleted file mode 100644
index 123383c..0000000
--- a/src/3rdparty/v8/src/ia32/assembler-ia32.cc
+++ /dev/null
@@ -1,2696 +0,0 @@
-// Copyright (c) 1994-2006 Sun Microsystems Inc.
-// All Rights Reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-//
-// - Redistributions of source code must retain the above copyright notice,
-// this list of conditions and the following disclaimer.
-//
-// - Redistribution in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the
-// distribution.
-//
-// - Neither the name of Sun Microsystems or the names of contributors may
-// be used to endorse or promote products derived from this software without
-// specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
-// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
-// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
-// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
-// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
-// OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// The original source code covered by the above license above has been modified
-// significantly by Google Inc.
-// Copyright 2012 the V8 project authors. All rights reserved.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_IA32)
-
-#include "disassembler.h"
-#include "macro-assembler.h"
-#include "serialize.h"
-
-namespace v8 {
-namespace internal {
-
-// -----------------------------------------------------------------------------
-// Implementation of CpuFeatures
-
-#ifdef DEBUG
-bool CpuFeatures::initialized_ = false;
-#endif
-uint64_t CpuFeatures::supported_ = 0;
-uint64_t CpuFeatures::found_by_runtime_probing_ = 0;
-
-
-ExternalReference ExternalReference::cpu_features() {
- ASSERT(CpuFeatures::initialized_);
- return ExternalReference(&CpuFeatures::supported_);
-}
-
-
-int IntelDoubleRegister::NumAllocatableRegisters() {
- if (CpuFeatures::IsSupported(SSE2)) {
- return XMMRegister::kNumAllocatableRegisters;
- } else {
- return X87TopOfStackRegister::kNumAllocatableRegisters;
- }
-}
-
-
-int IntelDoubleRegister::NumRegisters() {
- if (CpuFeatures::IsSupported(SSE2)) {
- return XMMRegister::kNumRegisters;
- } else {
- return X87TopOfStackRegister::kNumRegisters;
- }
-}
-
-
-const char* IntelDoubleRegister::AllocationIndexToString(int index) {
- if (CpuFeatures::IsSupported(SSE2)) {
- return XMMRegister::AllocationIndexToString(index);
- } else {
- return X87TopOfStackRegister::AllocationIndexToString(index);
- }
-}
-
-
-// The Probe method needs executable memory, so it uses Heap::CreateCode.
-// Allocation failure is silent and leads to safe default.
-void CpuFeatures::Probe() {
- ASSERT(!initialized_);
- ASSERT(supported_ == 0);
-#ifdef DEBUG
- initialized_ = true;
-#endif
- if (Serializer::enabled()) {
- supported_ |= OS::CpuFeaturesImpliedByPlatform();
- return; // No features if we might serialize.
- }
-
- const int kBufferSize = 4 * KB;
- VirtualMemory* memory = new VirtualMemory(kBufferSize);
- if (!memory->IsReserved()) {
- delete memory;
- return;
- }
- ASSERT(memory->size() >= static_cast<size_t>(kBufferSize));
- if (!memory->Commit(memory->address(), kBufferSize, true/*executable*/)) {
- delete memory;
- return;
- }
-
- Assembler assm(NULL, memory->address(), kBufferSize);
- Label cpuid, done;
-#define __ assm.
- // Save old esp, since we are going to modify the stack.
- __ push(ebp);
- __ pushfd();
- __ push(ecx);
- __ push(ebx);
- __ mov(ebp, esp);
-
- // If we can modify bit 21 of the EFLAGS register, then CPUID is supported.
- __ pushfd();
- __ pop(eax);
- __ mov(edx, eax);
- __ xor_(eax, 0x200000); // Flip bit 21.
- __ push(eax);
- __ popfd();
- __ pushfd();
- __ pop(eax);
- __ xor_(eax, edx); // Different if CPUID is supported.
- __ j(not_zero, &cpuid);
-
- // CPUID not supported. Clear the supported features in edx:eax.
- __ xor_(eax, eax);
- __ xor_(edx, edx);
- __ jmp(&done);
-
- // Invoke CPUID with 1 in eax to get feature information in
- // ecx:edx. Temporarily enable CPUID support because we know it's
- // safe here.
- __ bind(&cpuid);
- __ mov(eax, 1);
- supported_ = (1 << CPUID);
- { Scope fscope(CPUID);
- __ cpuid();
- }
- supported_ = 0;
-
- // Move the result from ecx:edx to edx:eax and make sure to mark the
- // CPUID feature as supported.
- __ mov(eax, edx);
- __ or_(eax, 1 << CPUID);
- __ mov(edx, ecx);
-
- // Done.
- __ bind(&done);
- __ mov(esp, ebp);
- __ pop(ebx);
- __ pop(ecx);
- __ popfd();
- __ pop(ebp);
- __ ret(0);
-#undef __
-
- typedef uint64_t (*F0)();
- F0 probe = FUNCTION_CAST<F0>(reinterpret_cast<Address>(memory->address()));
- supported_ = probe();
- found_by_runtime_probing_ = supported_;
- uint64_t os_guarantees = OS::CpuFeaturesImpliedByPlatform();
- supported_ |= os_guarantees;
- found_by_runtime_probing_ &= ~os_guarantees;
-
- delete memory;
-}
-
-
-// -----------------------------------------------------------------------------
-// Implementation of Displacement
-
-void Displacement::init(Label* L, Type type) {
- ASSERT(!L->is_bound());
- int next = 0;
- if (L->is_linked()) {
- next = L->pos();
- ASSERT(next > 0); // Displacements must be at positions > 0
- }
- // Ensure that we _never_ overflow the next field.
- ASSERT(NextField::is_valid(Assembler::kMaximalBufferSize));
- data_ = NextField::encode(next) | TypeField::encode(type);
-}
-
-
-// -----------------------------------------------------------------------------
-// Implementation of RelocInfo
-
-
-const int RelocInfo::kApplyMask =
- RelocInfo::kCodeTargetMask | 1 << RelocInfo::RUNTIME_ENTRY |
- 1 << RelocInfo::JS_RETURN | 1 << RelocInfo::INTERNAL_REFERENCE |
- 1 << RelocInfo::DEBUG_BREAK_SLOT | 1 << RelocInfo::CODE_AGE_SEQUENCE;
-
-
-bool RelocInfo::IsCodedSpecially() {
- // The deserializer needs to know whether a pointer is specially coded. Being
- // specially coded on IA32 means that it is a relative address, as used by
- // branch instructions. These are also the ones that need changing when a
- // code object moves.
- return (1 << rmode_) & kApplyMask;
-}
-
-
-void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
- // Patch the code at the current address with the supplied instructions.
- for (int i = 0; i < instruction_count; i++) {
- *(pc_ + i) = *(instructions + i);
- }
-
- // Indicate that code has changed.
- CPU::FlushICache(pc_, instruction_count);
-}
-
-
-// Patch the code at the current PC with a call to the target address.
-// Additional guard int3 instructions can be added if required.
-void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
- // Call instruction takes up 5 bytes and int3 takes up one byte.
- static const int kCallCodeSize = 5;
- int code_size = kCallCodeSize + guard_bytes;
-
- // Create a code patcher.
- CodePatcher patcher(pc_, code_size);
-
- // Add a label for checking the size of the code used for returning.
-#ifdef DEBUG
- Label check_codesize;
- patcher.masm()->bind(&check_codesize);
-#endif
-
- // Patch the code.
- patcher.masm()->call(target, RelocInfo::NONE32);
-
- // Check that the size of the code generated is as expected.
- ASSERT_EQ(kCallCodeSize,
- patcher.masm()->SizeOfCodeGeneratedSince(&check_codesize));
-
- // Add the requested number of int3 instructions after the call.
- ASSERT_GE(guard_bytes, 0);
- for (int i = 0; i < guard_bytes; i++) {
- patcher.masm()->int3();
- }
-}
-
-
-// -----------------------------------------------------------------------------
-// Implementation of Operand
-
-Operand::Operand(Register base, int32_t disp, RelocInfo::Mode rmode) {
- // [base + disp/r]
- if (disp == 0 && RelocInfo::IsNone(rmode) && !base.is(ebp)) {
- // [base]
- set_modrm(0, base);
- if (base.is(esp)) set_sib(times_1, esp, base);
- } else if (is_int8(disp) && RelocInfo::IsNone(rmode)) {
- // [base + disp8]
- set_modrm(1, base);
- if (base.is(esp)) set_sib(times_1, esp, base);
- set_disp8(disp);
- } else {
- // [base + disp/r]
- set_modrm(2, base);
- if (base.is(esp)) set_sib(times_1, esp, base);
- set_dispr(disp, rmode);
- }
-}
-
-
-Operand::Operand(Register base,
- Register index,
- ScaleFactor scale,
- int32_t disp,
- RelocInfo::Mode rmode) {
- ASSERT(!index.is(esp)); // illegal addressing mode
- // [base + index*scale + disp/r]
- if (disp == 0 && RelocInfo::IsNone(rmode) && !base.is(ebp)) {
- // [base + index*scale]
- set_modrm(0, esp);
- set_sib(scale, index, base);
- } else if (is_int8(disp) && RelocInfo::IsNone(rmode)) {
- // [base + index*scale + disp8]
- set_modrm(1, esp);
- set_sib(scale, index, base);
- set_disp8(disp);
- } else {
- // [base + index*scale + disp/r]
- set_modrm(2, esp);
- set_sib(scale, index, base);
- set_dispr(disp, rmode);
- }
-}
-
-
-Operand::Operand(Register index,
- ScaleFactor scale,
- int32_t disp,
- RelocInfo::Mode rmode) {
- ASSERT(!index.is(esp)); // illegal addressing mode
- // [index*scale + disp/r]
- set_modrm(0, esp);
- set_sib(scale, index, ebp);
- set_dispr(disp, rmode);
-}
-
-
-bool Operand::is_reg(Register reg) const {
- return ((buf_[0] & 0xF8) == 0xC0) // addressing mode is register only.
- && ((buf_[0] & 0x07) == reg.code()); // register codes match.
-}
-
-
-bool Operand::is_reg_only() const {
- return (buf_[0] & 0xF8) == 0xC0; // Addressing mode is register only.
-}
-
-
-Register Operand::reg() const {
- ASSERT(is_reg_only());
- return Register::from_code(buf_[0] & 0x07);
-}
-
-
-// -----------------------------------------------------------------------------
-// Implementation of Assembler.
-
-// Emit a single byte. Must always be inlined.
-#define EMIT(x) \
- *pc_++ = (x)
-
-
-#ifdef GENERATED_CODE_COVERAGE
-static void InitCoverageLog();
-#endif
-
-Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
- : AssemblerBase(isolate, buffer, buffer_size),
- positions_recorder_(this) {
- // Clear the buffer in debug mode unless it was provided by the
- // caller in which case we can't be sure it's okay to overwrite
- // existing code in it; see CodePatcher::CodePatcher(...).
-#ifdef DEBUG
- if (own_buffer_) {
- memset(buffer_, 0xCC, buffer_size_); // int3
- }
-#endif
-
- reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
-
-#ifdef GENERATED_CODE_COVERAGE
- InitCoverageLog();
-#endif
-}
-
-
-void Assembler::GetCode(CodeDesc* desc) {
- // Finalize code (at this point overflow() may be true, but the gap ensures
- // that we are still not overlapping instructions and relocation info).
- ASSERT(pc_ <= reloc_info_writer.pos()); // No overlap.
- // Set up code descriptor.
- desc->buffer = buffer_;
- desc->buffer_size = buffer_size_;
- desc->instr_size = pc_offset();
- desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
- desc->origin = this;
-}
-
-
-void Assembler::Align(int m) {
- ASSERT(IsPowerOf2(m));
- int mask = m - 1;
- int addr = pc_offset();
- Nop((m - (addr & mask)) & mask);
-}
-
-
-bool Assembler::IsNop(Address addr) {
- Address a = addr;
- while (*a == 0x66) a++;
- if (*a == 0x90) return true;
- if (a[0] == 0xf && a[1] == 0x1f) return true;
- return false;
-}
-
-
-void Assembler::Nop(int bytes) {
- EnsureSpace ensure_space(this);
-
- if (!CpuFeatures::IsSupported(SSE2)) {
- // Older CPUs that do not support SSE2 may not support multibyte NOP
- // instructions.
- for (; bytes > 0; bytes--) {
- EMIT(0x90);
- }
- return;
- }
-
- // Multi byte nops from http://support.amd.com/us/Processor_TechDocs/40546.pdf
- while (bytes > 0) {
- switch (bytes) {
- case 2:
- EMIT(0x66);
- case 1:
- EMIT(0x90);
- return;
- case 3:
- EMIT(0xf);
- EMIT(0x1f);
- EMIT(0);
- return;
- case 4:
- EMIT(0xf);
- EMIT(0x1f);
- EMIT(0x40);
- EMIT(0);
- return;
- case 6:
- EMIT(0x66);
- case 5:
- EMIT(0xf);
- EMIT(0x1f);
- EMIT(0x44);
- EMIT(0);
- EMIT(0);
- return;
- case 7:
- EMIT(0xf);
- EMIT(0x1f);
- EMIT(0x80);
- EMIT(0);
- EMIT(0);
- EMIT(0);
- EMIT(0);
- return;
- default:
- case 11:
- EMIT(0x66);
- bytes--;
- case 10:
- EMIT(0x66);
- bytes--;
- case 9:
- EMIT(0x66);
- bytes--;
- case 8:
- EMIT(0xf);
- EMIT(0x1f);
- EMIT(0x84);
- EMIT(0);
- EMIT(0);
- EMIT(0);
- EMIT(0);
- EMIT(0);
- bytes -= 8;
- }
- }
-}
-
-
-void Assembler::CodeTargetAlign() {
- Align(16); // Preferred alignment of jump targets on ia32.
-}
-
-
-void Assembler::cpuid() {
- ASSERT(CpuFeatures::IsEnabled(CPUID));
- EnsureSpace ensure_space(this);
- EMIT(0x0F);
- EMIT(0xA2);
-}
-
-
-void Assembler::pushad() {
- EnsureSpace ensure_space(this);
- EMIT(0x60);
-}
-
-
-void Assembler::popad() {
- EnsureSpace ensure_space(this);
- EMIT(0x61);
-}
-
-
-void Assembler::pushfd() {
- EnsureSpace ensure_space(this);
- EMIT(0x9C);
-}
-
-
-void Assembler::popfd() {
- EnsureSpace ensure_space(this);
- EMIT(0x9D);
-}
-
-
-void Assembler::push(const Immediate& x) {
- EnsureSpace ensure_space(this);
- if (x.is_int8()) {
- EMIT(0x6a);
- EMIT(x.x_);
- } else {
- EMIT(0x68);
- emit(x);
- }
-}
-
-
-void Assembler::push_imm32(int32_t imm32) {
- EnsureSpace ensure_space(this);
- EMIT(0x68);
- emit(imm32);
-}
-
-
-void Assembler::push(Register src) {
- EnsureSpace ensure_space(this);
- EMIT(0x50 | src.code());
-}
-
-
-void Assembler::push(const Operand& src) {
- EnsureSpace ensure_space(this);
- EMIT(0xFF);
- emit_operand(esi, src);
-}
-
-
-void Assembler::pop(Register dst) {
- ASSERT(reloc_info_writer.last_pc() != NULL);
- EnsureSpace ensure_space(this);
- EMIT(0x58 | dst.code());
-}
-
-
-void Assembler::pop(const Operand& dst) {
- EnsureSpace ensure_space(this);
- EMIT(0x8F);
- emit_operand(eax, dst);
-}
-
-
-void Assembler::enter(const Immediate& size) {
- EnsureSpace ensure_space(this);
- EMIT(0xC8);
- emit_w(size);
- EMIT(0);
-}
-
-
-void Assembler::leave() {
- EnsureSpace ensure_space(this);
- EMIT(0xC9);
-}
-
-
-void Assembler::mov_b(Register dst, const Operand& src) {
- CHECK(dst.is_byte_register());
- EnsureSpace ensure_space(this);
- EMIT(0x8A);
- emit_operand(dst, src);
-}
-
-
-void Assembler::mov_b(const Operand& dst, int8_t imm8) {
- EnsureSpace ensure_space(this);
- EMIT(0xC6);
- emit_operand(eax, dst);
- EMIT(imm8);
-}
-
-
-void Assembler::mov_b(const Operand& dst, Register src) {
- CHECK(src.is_byte_register());
- EnsureSpace ensure_space(this);
- EMIT(0x88);
- emit_operand(src, dst);
-}
-
-
-void Assembler::mov_w(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- EMIT(0x66);
- EMIT(0x8B);
- emit_operand(dst, src);
-}
-
-
-void Assembler::mov_w(const Operand& dst, Register src) {
- EnsureSpace ensure_space(this);
- EMIT(0x66);
- EMIT(0x89);
- emit_operand(src, dst);
-}
-
-
-void Assembler::mov(Register dst, int32_t imm32) {
- EnsureSpace ensure_space(this);
- EMIT(0xB8 | dst.code());
- emit(imm32);
-}
-
-
-void Assembler::mov(Register dst, const Immediate& x) {
- EnsureSpace ensure_space(this);
- EMIT(0xB8 | dst.code());
- emit(x);
-}
-
-
-void Assembler::mov(Register dst, Handle<Object> handle) {
- EnsureSpace ensure_space(this);
- EMIT(0xB8 | dst.code());
- emit(handle);
-}
-
-
-void Assembler::mov(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- EMIT(0x8B);
- emit_operand(dst, src);
-}
-
-
-void Assembler::mov(Register dst, Register src) {
- EnsureSpace ensure_space(this);
- EMIT(0x89);
- EMIT(0xC0 | src.code() << 3 | dst.code());
-}
-
-
-void Assembler::mov(const Operand& dst, const Immediate& x) {
- EnsureSpace ensure_space(this);
- EMIT(0xC7);
- emit_operand(eax, dst);
- emit(x);
-}
-
-
-void Assembler::mov(const Operand& dst, Handle<Object> handle) {
- EnsureSpace ensure_space(this);
- EMIT(0xC7);
- emit_operand(eax, dst);
- emit(handle);
-}
-
-
-void Assembler::mov(const Operand& dst, Register src) {
- EnsureSpace ensure_space(this);
- EMIT(0x89);
- emit_operand(src, dst);
-}
-
-
-void Assembler::movsx_b(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- EMIT(0x0F);
- EMIT(0xBE);
- emit_operand(dst, src);
-}
-
-
-void Assembler::movsx_w(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- EMIT(0x0F);
- EMIT(0xBF);
- emit_operand(dst, src);
-}
-
-
-void Assembler::movzx_b(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- EMIT(0x0F);
- EMIT(0xB6);
- emit_operand(dst, src);
-}
-
-
-void Assembler::movzx_w(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- EMIT(0x0F);
- EMIT(0xB7);
- emit_operand(dst, src);
-}
-
-
-void Assembler::cmov(Condition cc, Register dst, const Operand& src) {
- ASSERT(CpuFeatures::IsEnabled(CMOV));
- EnsureSpace ensure_space(this);
- // Opcode: 0f 40 + cc /r.
- EMIT(0x0F);
- EMIT(0x40 + cc);
- emit_operand(dst, src);
-}
-
-
-void Assembler::cld() {
- EnsureSpace ensure_space(this);
- EMIT(0xFC);
-}
-
-
-void Assembler::rep_movs() {
- EnsureSpace ensure_space(this);
- EMIT(0xF3);
- EMIT(0xA5);
-}
-
-
-void Assembler::rep_stos() {
- EnsureSpace ensure_space(this);
- EMIT(0xF3);
- EMIT(0xAB);
-}
-
-
-void Assembler::stos() {
- EnsureSpace ensure_space(this);
- EMIT(0xAB);
-}
-
-
-void Assembler::xchg(Register dst, Register src) {
- EnsureSpace ensure_space(this);
- if (src.is(eax) || dst.is(eax)) { // Single-byte encoding.
- EMIT(0x90 | (src.is(eax) ? dst.code() : src.code()));
- } else {
- EMIT(0x87);
- EMIT(0xC0 | src.code() << 3 | dst.code());
- }
-}
-
-
-void Assembler::adc(Register dst, int32_t imm32) {
- EnsureSpace ensure_space(this);
- emit_arith(2, Operand(dst), Immediate(imm32));
-}
-
-
-void Assembler::adc(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- EMIT(0x13);
- emit_operand(dst, src);
-}
-
-
-void Assembler::add(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- EMIT(0x03);
- emit_operand(dst, src);
-}
-
-
-void Assembler::add(const Operand& dst, Register src) {
- EnsureSpace ensure_space(this);
- EMIT(0x01);
- emit_operand(src, dst);
-}
-
-
-void Assembler::add(const Operand& dst, const Immediate& x) {
- ASSERT(reloc_info_writer.last_pc() != NULL);
- EnsureSpace ensure_space(this);
- emit_arith(0, dst, x);
-}
-
-
-void Assembler::and_(Register dst, int32_t imm32) {
- and_(dst, Immediate(imm32));
-}
-
-
-void Assembler::and_(Register dst, const Immediate& x) {
- EnsureSpace ensure_space(this);
- emit_arith(4, Operand(dst), x);
-}
-
-
-void Assembler::and_(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- EMIT(0x23);
- emit_operand(dst, src);
-}
-
-
-void Assembler::and_(const Operand& dst, const Immediate& x) {
- EnsureSpace ensure_space(this);
- emit_arith(4, dst, x);
-}
-
-
-void Assembler::and_(const Operand& dst, Register src) {
- EnsureSpace ensure_space(this);
- EMIT(0x21);
- emit_operand(src, dst);
-}
-
-
-void Assembler::cmpb(const Operand& op, int8_t imm8) {
- EnsureSpace ensure_space(this);
- if (op.is_reg(eax)) {
- EMIT(0x3C);
- } else {
- EMIT(0x80);
- emit_operand(edi, op); // edi == 7
- }
- EMIT(imm8);
-}
-
-
-void Assembler::cmpb(const Operand& op, Register reg) {
- CHECK(reg.is_byte_register());
- EnsureSpace ensure_space(this);
- EMIT(0x38);
- emit_operand(reg, op);
-}
-
-
-void Assembler::cmpb(Register reg, const Operand& op) {
- CHECK(reg.is_byte_register());
- EnsureSpace ensure_space(this);
- EMIT(0x3A);
- emit_operand(reg, op);
-}
-
-
-void Assembler::cmpw(const Operand& op, Immediate imm16) {
- ASSERT(imm16.is_int16());
- EnsureSpace ensure_space(this);
- EMIT(0x66);
- EMIT(0x81);
- emit_operand(edi, op);
- emit_w(imm16);
-}
-
-
-void Assembler::cmp(Register reg, int32_t imm32) {
- EnsureSpace ensure_space(this);
- emit_arith(7, Operand(reg), Immediate(imm32));
-}
-
-
-void Assembler::cmp(Register reg, Handle<Object> handle) {
- EnsureSpace ensure_space(this);
- emit_arith(7, Operand(reg), Immediate(handle));
-}
-
-
-void Assembler::cmp(Register reg, const Operand& op) {
- EnsureSpace ensure_space(this);
- EMIT(0x3B);
- emit_operand(reg, op);
-}
-
-
-void Assembler::cmp(const Operand& op, const Immediate& imm) {
- EnsureSpace ensure_space(this);
- emit_arith(7, op, imm);
-}
-
-
-void Assembler::cmp(const Operand& op, Handle<Object> handle) {
- EnsureSpace ensure_space(this);
- emit_arith(7, op, Immediate(handle));
-}
-
-
-void Assembler::cmpb_al(const Operand& op) {
- EnsureSpace ensure_space(this);
- EMIT(0x38); // CMP r/m8, r8
- emit_operand(eax, op); // eax has same code as register al.
-}
-
-
-void Assembler::cmpw_ax(const Operand& op) {
- EnsureSpace ensure_space(this);
- EMIT(0x66);
- EMIT(0x39); // CMP r/m16, r16
- emit_operand(eax, op); // eax has same code as register ax.
-}
-
-
-void Assembler::dec_b(Register dst) {
- CHECK(dst.is_byte_register());
- EnsureSpace ensure_space(this);
- EMIT(0xFE);
- EMIT(0xC8 | dst.code());
-}
-
-
-void Assembler::dec_b(const Operand& dst) {
- EnsureSpace ensure_space(this);
- EMIT(0xFE);
- emit_operand(ecx, dst);
-}
-
-
-void Assembler::dec(Register dst) {
- EnsureSpace ensure_space(this);
- EMIT(0x48 | dst.code());
-}
-
-
-void Assembler::dec(const Operand& dst) {
- EnsureSpace ensure_space(this);
- EMIT(0xFF);
- emit_operand(ecx, dst);
-}
-
-
-void Assembler::cdq() {
- EnsureSpace ensure_space(this);
- EMIT(0x99);
-}
-
-
-void Assembler::idiv(Register src) {
- EnsureSpace ensure_space(this);
- EMIT(0xF7);
- EMIT(0xF8 | src.code());
-}
-
-
-void Assembler::imul(Register reg) {
- EnsureSpace ensure_space(this);
- EMIT(0xF7);
- EMIT(0xE8 | reg.code());
-}
-
-
-void Assembler::imul(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- EMIT(0x0F);
- EMIT(0xAF);
- emit_operand(dst, src);
-}
-
-
-void Assembler::imul(Register dst, Register src, int32_t imm32) {
- EnsureSpace ensure_space(this);
- if (is_int8(imm32)) {
- EMIT(0x6B);
- EMIT(0xC0 | dst.code() << 3 | src.code());
- EMIT(imm32);
- } else {
- EMIT(0x69);
- EMIT(0xC0 | dst.code() << 3 | src.code());
- emit(imm32);
- }
-}
-
-
-void Assembler::inc(Register dst) {
- EnsureSpace ensure_space(this);
- EMIT(0x40 | dst.code());
-}
-
-
-void Assembler::inc(const Operand& dst) {
- EnsureSpace ensure_space(this);
- EMIT(0xFF);
- emit_operand(eax, dst);
-}
-
-
-void Assembler::lea(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- EMIT(0x8D);
- emit_operand(dst, src);
-}
-
-
-void Assembler::mul(Register src) {
- EnsureSpace ensure_space(this);
- EMIT(0xF7);
- EMIT(0xE0 | src.code());
-}
-
-
-void Assembler::neg(Register dst) {
- EnsureSpace ensure_space(this);
- EMIT(0xF7);
- EMIT(0xD8 | dst.code());
-}
-
-
-void Assembler::not_(Register dst) {
- EnsureSpace ensure_space(this);
- EMIT(0xF7);
- EMIT(0xD0 | dst.code());
-}
-
-
-void Assembler::or_(Register dst, int32_t imm32) {
- EnsureSpace ensure_space(this);
- emit_arith(1, Operand(dst), Immediate(imm32));
-}
-
-
-void Assembler::or_(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- EMIT(0x0B);
- emit_operand(dst, src);
-}
-
-
-void Assembler::or_(const Operand& dst, const Immediate& x) {
- EnsureSpace ensure_space(this);
- emit_arith(1, dst, x);
-}
-
-
-void Assembler::or_(const Operand& dst, Register src) {
- EnsureSpace ensure_space(this);
- EMIT(0x09);
- emit_operand(src, dst);
-}
-
-
-void Assembler::rcl(Register dst, uint8_t imm8) {
- EnsureSpace ensure_space(this);
- ASSERT(is_uint5(imm8)); // illegal shift count
- if (imm8 == 1) {
- EMIT(0xD1);
- EMIT(0xD0 | dst.code());
- } else {
- EMIT(0xC1);
- EMIT(0xD0 | dst.code());
- EMIT(imm8);
- }
-}
-
-
-void Assembler::rcr(Register dst, uint8_t imm8) {
- EnsureSpace ensure_space(this);
- ASSERT(is_uint5(imm8)); // illegal shift count
- if (imm8 == 1) {
- EMIT(0xD1);
- EMIT(0xD8 | dst.code());
- } else {
- EMIT(0xC1);
- EMIT(0xD8 | dst.code());
- EMIT(imm8);
- }
-}
-
-void Assembler::ror(Register dst, uint8_t imm8) {
- EnsureSpace ensure_space(this);
- ASSERT(is_uint5(imm8)); // illegal shift count
- if (imm8 == 1) {
- EMIT(0xD1);
- EMIT(0xC8 | dst.code());
- } else {
- EMIT(0xC1);
- EMIT(0xC8 | dst.code());
- EMIT(imm8);
- }
-}
-
-void Assembler::ror_cl(Register dst) {
- EnsureSpace ensure_space(this);
- EMIT(0xD3);
- EMIT(0xC8 | dst.code());
-}
-
-
-void Assembler::sar(Register dst, uint8_t imm8) {
- EnsureSpace ensure_space(this);
- ASSERT(is_uint5(imm8)); // illegal shift count
- if (imm8 == 1) {
- EMIT(0xD1);
- EMIT(0xF8 | dst.code());
- } else {
- EMIT(0xC1);
- EMIT(0xF8 | dst.code());
- EMIT(imm8);
- }
-}
-
-
-void Assembler::sar_cl(Register dst) {
- EnsureSpace ensure_space(this);
- EMIT(0xD3);
- EMIT(0xF8 | dst.code());
-}
-
-
-void Assembler::sbb(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- EMIT(0x1B);
- emit_operand(dst, src);
-}
-
-
-void Assembler::shld(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- EMIT(0x0F);
- EMIT(0xA5);
- emit_operand(dst, src);
-}
-
-
-void Assembler::shl(Register dst, uint8_t imm8) {
- EnsureSpace ensure_space(this);
- ASSERT(is_uint5(imm8)); // illegal shift count
- if (imm8 == 1) {
- EMIT(0xD1);
- EMIT(0xE0 | dst.code());
- } else {
- EMIT(0xC1);
- EMIT(0xE0 | dst.code());
- EMIT(imm8);
- }
-}
-
-
-void Assembler::shl_cl(Register dst) {
- EnsureSpace ensure_space(this);
- EMIT(0xD3);
- EMIT(0xE0 | dst.code());
-}
-
-
-void Assembler::shrd(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- EMIT(0x0F);
- EMIT(0xAD);
- emit_operand(dst, src);
-}
-
-
-void Assembler::shr(Register dst, uint8_t imm8) {
- EnsureSpace ensure_space(this);
- ASSERT(is_uint5(imm8)); // illegal shift count
- if (imm8 == 1) {
- EMIT(0xD1);
- EMIT(0xE8 | dst.code());
- } else {
- EMIT(0xC1);
- EMIT(0xE8 | dst.code());
- EMIT(imm8);
- }
-}
-
-
-void Assembler::shr_cl(Register dst) {
- EnsureSpace ensure_space(this);
- EMIT(0xD3);
- EMIT(0xE8 | dst.code());
-}
-
-
-void Assembler::sub(const Operand& dst, const Immediate& x) {
- EnsureSpace ensure_space(this);
- emit_arith(5, dst, x);
-}
-
-
-void Assembler::sub(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- EMIT(0x2B);
- emit_operand(dst, src);
-}
-
-
-void Assembler::sub(const Operand& dst, Register src) {
- EnsureSpace ensure_space(this);
- EMIT(0x29);
- emit_operand(src, dst);
-}
-
-
-void Assembler::test(Register reg, const Immediate& imm) {
- EnsureSpace ensure_space(this);
- // Only use test against byte for registers that have a byte
- // variant: eax, ebx, ecx, and edx.
- if (RelocInfo::IsNone(imm.rmode_) &&
- is_uint8(imm.x_) &&
- reg.is_byte_register()) {
- uint8_t imm8 = imm.x_;
- if (reg.is(eax)) {
- EMIT(0xA8);
- EMIT(imm8);
- } else {
- emit_arith_b(0xF6, 0xC0, reg, imm8);
- }
- } else {
- // This is not using emit_arith because test doesn't support
- // sign-extension of 8-bit operands.
- if (reg.is(eax)) {
- EMIT(0xA9);
- } else {
- EMIT(0xF7);
- EMIT(0xC0 | reg.code());
- }
- emit(imm);
- }
-}
-
-
-void Assembler::test(Register reg, const Operand& op) {
- EnsureSpace ensure_space(this);
- EMIT(0x85);
- emit_operand(reg, op);
-}
-
-
-void Assembler::test_b(Register reg, const Operand& op) {
- CHECK(reg.is_byte_register());
- EnsureSpace ensure_space(this);
- EMIT(0x84);
- emit_operand(reg, op);
-}
-
-
-void Assembler::test(const Operand& op, const Immediate& imm) {
- EnsureSpace ensure_space(this);
- EMIT(0xF7);
- emit_operand(eax, op);
- emit(imm);
-}
-
-
-void Assembler::test_b(const Operand& op, uint8_t imm8) {
- if (op.is_reg_only() && !op.reg().is_byte_register()) {
- test(op, Immediate(imm8));
- return;
- }
- EnsureSpace ensure_space(this);
- EMIT(0xF6);
- emit_operand(eax, op);
- EMIT(imm8);
-}
-
-
-void Assembler::xor_(Register dst, int32_t imm32) {
- EnsureSpace ensure_space(this);
- emit_arith(6, Operand(dst), Immediate(imm32));
-}
-
-
-void Assembler::xor_(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- EMIT(0x33);
- emit_operand(dst, src);
-}
-
-
-void Assembler::xor_(const Operand& dst, Register src) {
- EnsureSpace ensure_space(this);
- EMIT(0x31);
- emit_operand(src, dst);
-}
-
-
-void Assembler::xor_(const Operand& dst, const Immediate& x) {
- EnsureSpace ensure_space(this);
- emit_arith(6, dst, x);
-}
-
-
-void Assembler::bt(const Operand& dst, Register src) {
- EnsureSpace ensure_space(this);
- EMIT(0x0F);
- EMIT(0xA3);
- emit_operand(src, dst);
-}
-
-
-void Assembler::bts(const Operand& dst, Register src) {
- EnsureSpace ensure_space(this);
- EMIT(0x0F);
- EMIT(0xAB);
- emit_operand(src, dst);
-}
-
-
-void Assembler::hlt() {
- EnsureSpace ensure_space(this);
- EMIT(0xF4);
-}
-
-
-void Assembler::int3() {
- EnsureSpace ensure_space(this);
- EMIT(0xCC);
-}
-
-
-void Assembler::nop() {
- EnsureSpace ensure_space(this);
- EMIT(0x90);
-}
-
-
-void Assembler::rdtsc() {
- ASSERT(CpuFeatures::IsEnabled(RDTSC));
- EnsureSpace ensure_space(this);
- EMIT(0x0F);
- EMIT(0x31);
-}
-
-
-void Assembler::ret(int imm16) {
- EnsureSpace ensure_space(this);
- ASSERT(is_uint16(imm16));
- if (imm16 == 0) {
- EMIT(0xC3);
- } else {
- EMIT(0xC2);
- EMIT(imm16 & 0xFF);
- EMIT((imm16 >> 8) & 0xFF);
- }
-}
-
-
-// Labels refer to positions in the (to be) generated code.
-// There are bound, linked, and unused labels.
-//
-// Bound labels refer to known positions in the already
-// generated code. pos() is the position the label refers to.
-//
-// Linked labels refer to unknown positions in the code
-// to be generated; pos() is the position of the 32bit
-// Displacement of the last instruction using the label.
-
-
-void Assembler::print(Label* L) {
- if (L->is_unused()) {
- PrintF("unused label\n");
- } else if (L->is_bound()) {
- PrintF("bound label to %d\n", L->pos());
- } else if (L->is_linked()) {
- Label l = *L;
- PrintF("unbound label");
- while (l.is_linked()) {
- Displacement disp = disp_at(&l);
- PrintF("@ %d ", l.pos());
- disp.print();
- PrintF("\n");
- disp.next(&l);
- }
- } else {
- PrintF("label in inconsistent state (pos = %d)\n", L->pos_);
- }
-}
-
-
-void Assembler::bind_to(Label* L, int pos) {
- EnsureSpace ensure_space(this);
- ASSERT(0 <= pos && pos <= pc_offset()); // must have a valid binding position
- while (L->is_linked()) {
- Displacement disp = disp_at(L);
- int fixup_pos = L->pos();
- if (disp.type() == Displacement::CODE_RELATIVE) {
- // Relative to Code* heap object pointer.
- long_at_put(fixup_pos, pos + Code::kHeaderSize - kHeapObjectTag);
- } else {
- if (disp.type() == Displacement::UNCONDITIONAL_JUMP) {
- ASSERT(byte_at(fixup_pos - 1) == 0xE9); // jmp expected
- }
- // Relative address, relative to point after address.
- int imm32 = pos - (fixup_pos + sizeof(int32_t));
- long_at_put(fixup_pos, imm32);
- }
- disp.next(L);
- }
- while (L->is_near_linked()) {
- int fixup_pos = L->near_link_pos();
- int offset_to_next =
- static_cast<int>(*reinterpret_cast<int8_t*>(addr_at(fixup_pos)));
- ASSERT(offset_to_next <= 0);
- // Relative address, relative to point after address.
- int disp = pos - fixup_pos - sizeof(int8_t);
- CHECK(0 <= disp && disp <= 127);
- set_byte_at(fixup_pos, disp);
- if (offset_to_next < 0) {
- L->link_to(fixup_pos + offset_to_next, Label::kNear);
- } else {
- L->UnuseNear();
- }
- }
- L->bind_to(pos);
-}
-
-
-void Assembler::bind(Label* L) {
- EnsureSpace ensure_space(this);
- ASSERT(!L->is_bound()); // label can only be bound once
- bind_to(L, pc_offset());
-}
-
-
-void Assembler::call(Label* L) {
- positions_recorder()->WriteRecordedPositions();
- EnsureSpace ensure_space(this);
- if (L->is_bound()) {
- const int long_size = 5;
- int offs = L->pos() - pc_offset();
- ASSERT(offs <= 0);
- // 1110 1000 #32-bit disp.
- EMIT(0xE8);
- emit(offs - long_size);
- } else {
- // 1110 1000 #32-bit disp.
- EMIT(0xE8);
- emit_disp(L, Displacement::OTHER);
- }
-}
-
-
-void Assembler::call(byte* entry, RelocInfo::Mode rmode) {
- positions_recorder()->WriteRecordedPositions();
- EnsureSpace ensure_space(this);
- ASSERT(!RelocInfo::IsCodeTarget(rmode));
- EMIT(0xE8);
- emit(entry - (pc_ + sizeof(int32_t)), rmode);
-}
-
-
-int Assembler::CallSize(const Operand& adr) {
- // Call size is 1 (opcode) + adr.len_ (operand).
- return 1 + adr.len_;
-}
-
-
-void Assembler::call(const Operand& adr) {
- positions_recorder()->WriteRecordedPositions();
- EnsureSpace ensure_space(this);
- EMIT(0xFF);
- emit_operand(edx, adr);
-}
-
-
-int Assembler::CallSize(Handle<Code> code, RelocInfo::Mode rmode) {
- return 1 /* EMIT */ + sizeof(uint32_t) /* emit */;
-}
-
-
-void Assembler::call(Handle<Code> code,
- RelocInfo::Mode rmode,
- TypeFeedbackId ast_id) {
- positions_recorder()->WriteRecordedPositions();
- EnsureSpace ensure_space(this);
- ASSERT(RelocInfo::IsCodeTarget(rmode));
- EMIT(0xE8);
- emit(reinterpret_cast<intptr_t>(code.location()), rmode, ast_id);
-}
-
-
-void Assembler::jmp(Label* L, Label::Distance distance) {
- EnsureSpace ensure_space(this);
- if (L->is_bound()) {
- const int short_size = 2;
- const int long_size = 5;
- int offs = L->pos() - pc_offset();
- ASSERT(offs <= 0);
- if (is_int8(offs - short_size)) {
- // 1110 1011 #8-bit disp.
- EMIT(0xEB);
- EMIT((offs - short_size) & 0xFF);
- } else {
- // 1110 1001 #32-bit disp.
- EMIT(0xE9);
- emit(offs - long_size);
- }
- } else if (distance == Label::kNear) {
- EMIT(0xEB);
- emit_near_disp(L);
- } else {
- // 1110 1001 #32-bit disp.
- EMIT(0xE9);
- emit_disp(L, Displacement::UNCONDITIONAL_JUMP);
- }
-}
-
-
-void Assembler::jmp(byte* entry, RelocInfo::Mode rmode) {
- EnsureSpace ensure_space(this);
- ASSERT(!RelocInfo::IsCodeTarget(rmode));
- EMIT(0xE9);
- emit(entry - (pc_ + sizeof(int32_t)), rmode);
-}
-
-
-void Assembler::jmp(const Operand& adr) {
- EnsureSpace ensure_space(this);
- EMIT(0xFF);
- emit_operand(esp, adr);
-}
-
-
-void Assembler::jmp(Handle<Code> code, RelocInfo::Mode rmode) {
- EnsureSpace ensure_space(this);
- ASSERT(RelocInfo::IsCodeTarget(rmode));
- EMIT(0xE9);
- emit(reinterpret_cast<intptr_t>(code.location()), rmode);
-}
-
-
-void Assembler::j(Condition cc, Label* L, Label::Distance distance) {
- EnsureSpace ensure_space(this);
- ASSERT(0 <= cc && static_cast<int>(cc) < 16);
- if (L->is_bound()) {
- const int short_size = 2;
- const int long_size = 6;
- int offs = L->pos() - pc_offset();
- ASSERT(offs <= 0);
- if (is_int8(offs - short_size)) {
- // 0111 tttn #8-bit disp
- EMIT(0x70 | cc);
- EMIT((offs - short_size) & 0xFF);
- } else {
- // 0000 1111 1000 tttn #32-bit disp
- EMIT(0x0F);
- EMIT(0x80 | cc);
- emit(offs - long_size);
- }
- } else if (distance == Label::kNear) {
- EMIT(0x70 | cc);
- emit_near_disp(L);
- } else {
- // 0000 1111 1000 tttn #32-bit disp
- // Note: could eliminate cond. jumps to this jump if condition
- // is the same however, seems to be rather unlikely case.
- EMIT(0x0F);
- EMIT(0x80 | cc);
- emit_disp(L, Displacement::OTHER);
- }
-}
-
-
-void Assembler::j(Condition cc, byte* entry, RelocInfo::Mode rmode) {
- EnsureSpace ensure_space(this);
- ASSERT((0 <= cc) && (static_cast<int>(cc) < 16));
- // 0000 1111 1000 tttn #32-bit disp.
- EMIT(0x0F);
- EMIT(0x80 | cc);
- emit(entry - (pc_ + sizeof(int32_t)), rmode);
-}
-
-
-void Assembler::j(Condition cc, Handle<Code> code) {
- EnsureSpace ensure_space(this);
- // 0000 1111 1000 tttn #32-bit disp
- EMIT(0x0F);
- EMIT(0x80 | cc);
- emit(reinterpret_cast<intptr_t>(code.location()), RelocInfo::CODE_TARGET);
-}
-
-
-// FPU instructions.
-
-void Assembler::fld(int i) {
- EnsureSpace ensure_space(this);
- emit_farith(0xD9, 0xC0, i);
-}
-
-
-void Assembler::fstp(int i) {
- EnsureSpace ensure_space(this);
- emit_farith(0xDD, 0xD8, i);
-}
-
-
-void Assembler::fld1() {
- EnsureSpace ensure_space(this);
- EMIT(0xD9);
- EMIT(0xE8);
-}
-
-
-void Assembler::fldpi() {
- EnsureSpace ensure_space(this);
- EMIT(0xD9);
- EMIT(0xEB);
-}
-
-
-void Assembler::fldz() {
- EnsureSpace ensure_space(this);
- EMIT(0xD9);
- EMIT(0xEE);
-}
-
-
-void Assembler::fldln2() {
- EnsureSpace ensure_space(this);
- EMIT(0xD9);
- EMIT(0xED);
-}
-
-
-void Assembler::fld_s(const Operand& adr) {
- EnsureSpace ensure_space(this);
- EMIT(0xD9);
- emit_operand(eax, adr);
-}
-
-
-void Assembler::fld_d(const Operand& adr) {
- EnsureSpace ensure_space(this);
- EMIT(0xDD);
- emit_operand(eax, adr);
-}
-
-
-void Assembler::fstp_s(const Operand& adr) {
- EnsureSpace ensure_space(this);
- EMIT(0xD9);
- emit_operand(ebx, adr);
-}
-
-
-void Assembler::fstp_d(const Operand& adr) {
- EnsureSpace ensure_space(this);
- EMIT(0xDD);
- emit_operand(ebx, adr);
-}
-
-
-void Assembler::fst_d(const Operand& adr) {
- EnsureSpace ensure_space(this);
- EMIT(0xDD);
- emit_operand(edx, adr);
-}
-
-
-void Assembler::fild_s(const Operand& adr) {
- EnsureSpace ensure_space(this);
- EMIT(0xDB);
- emit_operand(eax, adr);
-}
-
-
-void Assembler::fild_d(const Operand& adr) {
- EnsureSpace ensure_space(this);
- EMIT(0xDF);
- emit_operand(ebp, adr);
-}
-
-
-void Assembler::fistp_s(const Operand& adr) {
- EnsureSpace ensure_space(this);
- EMIT(0xDB);
- emit_operand(ebx, adr);
-}
-
-
-void Assembler::fisttp_s(const Operand& adr) {
- ASSERT(CpuFeatures::IsEnabled(SSE3));
- EnsureSpace ensure_space(this);
- EMIT(0xDB);
- emit_operand(ecx, adr);
-}
-
-
-void Assembler::fisttp_d(const Operand& adr) {
- ASSERT(CpuFeatures::IsEnabled(SSE3));
- EnsureSpace ensure_space(this);
- EMIT(0xDD);
- emit_operand(ecx, adr);
-}
-
-
-void Assembler::fist_s(const Operand& adr) {
- EnsureSpace ensure_space(this);
- EMIT(0xDB);
- emit_operand(edx, adr);
-}
-
-
-void Assembler::fistp_d(const Operand& adr) {
- EnsureSpace ensure_space(this);
- EMIT(0xDF);
- emit_operand(edi, adr);
-}
-
-
-void Assembler::fabs() {
- EnsureSpace ensure_space(this);
- EMIT(0xD9);
- EMIT(0xE1);
-}
-
-
-void Assembler::fchs() {
- EnsureSpace ensure_space(this);
- EMIT(0xD9);
- EMIT(0xE0);
-}
-
-
-void Assembler::fcos() {
- EnsureSpace ensure_space(this);
- EMIT(0xD9);
- EMIT(0xFF);
-}
-
-
-void Assembler::fsin() {
- EnsureSpace ensure_space(this);
- EMIT(0xD9);
- EMIT(0xFE);
-}
-
-
-void Assembler::fptan() {
- EnsureSpace ensure_space(this);
- EMIT(0xD9);
- EMIT(0xF2);
-}
-
-
-void Assembler::fyl2x() {
- EnsureSpace ensure_space(this);
- EMIT(0xD9);
- EMIT(0xF1);
-}
-
-
-void Assembler::f2xm1() {
- EnsureSpace ensure_space(this);
- EMIT(0xD9);
- EMIT(0xF0);
-}
-
-
-void Assembler::fscale() {
- EnsureSpace ensure_space(this);
- EMIT(0xD9);
- EMIT(0xFD);
-}
-
-
-void Assembler::fninit() {
- EnsureSpace ensure_space(this);
- EMIT(0xDB);
- EMIT(0xE3);
-}
-
-
-void Assembler::fadd(int i) {
- EnsureSpace ensure_space(this);
- emit_farith(0xDC, 0xC0, i);
-}
-
-
-void Assembler::fsub(int i) {
- EnsureSpace ensure_space(this);
- emit_farith(0xDC, 0xE8, i);
-}
-
-
-void Assembler::fisub_s(const Operand& adr) {
- EnsureSpace ensure_space(this);
- EMIT(0xDA);
- emit_operand(esp, adr);
-}
-
-
-void Assembler::fmul(int i) {
- EnsureSpace ensure_space(this);
- emit_farith(0xDC, 0xC8, i);
-}
-
-
-void Assembler::fdiv(int i) {
- EnsureSpace ensure_space(this);
- emit_farith(0xDC, 0xF8, i);
-}
-
-
-void Assembler::faddp(int i) {
- EnsureSpace ensure_space(this);
- emit_farith(0xDE, 0xC0, i);
-}
-
-
-void Assembler::fsubp(int i) {
- EnsureSpace ensure_space(this);
- emit_farith(0xDE, 0xE8, i);
-}
-
-
-void Assembler::fsubrp(int i) {
- EnsureSpace ensure_space(this);
- emit_farith(0xDE, 0xE0, i);
-}
-
-
-void Assembler::fmulp(int i) {
- EnsureSpace ensure_space(this);
- emit_farith(0xDE, 0xC8, i);
-}
-
-
-void Assembler::fdivp(int i) {
- EnsureSpace ensure_space(this);
- emit_farith(0xDE, 0xF8, i);
-}
-
-
-void Assembler::fprem() {
- EnsureSpace ensure_space(this);
- EMIT(0xD9);
- EMIT(0xF8);
-}
-
-
-void Assembler::fprem1() {
- EnsureSpace ensure_space(this);
- EMIT(0xD9);
- EMIT(0xF5);
-}
-
-
-void Assembler::fxch(int i) {
- EnsureSpace ensure_space(this);
- emit_farith(0xD9, 0xC8, i);
-}
-
-
-void Assembler::fincstp() {
- EnsureSpace ensure_space(this);
- EMIT(0xD9);
- EMIT(0xF7);
-}
-
-
-void Assembler::ffree(int i) {
- EnsureSpace ensure_space(this);
- emit_farith(0xDD, 0xC0, i);
-}
-
-
-void Assembler::ftst() {
- EnsureSpace ensure_space(this);
- EMIT(0xD9);
- EMIT(0xE4);
-}
-
-
-void Assembler::fucomp(int i) {
- EnsureSpace ensure_space(this);
- emit_farith(0xDD, 0xE8, i);
-}
-
-
-void Assembler::fucompp() {
- EnsureSpace ensure_space(this);
- EMIT(0xDA);
- EMIT(0xE9);
-}
-
-
-void Assembler::fucomi(int i) {
- EnsureSpace ensure_space(this);
- EMIT(0xDB);
- EMIT(0xE8 + i);
-}
-
-
-void Assembler::fucomip() {
- EnsureSpace ensure_space(this);
- EMIT(0xDF);
- EMIT(0xE9);
-}
-
-
-void Assembler::fcompp() {
- EnsureSpace ensure_space(this);
- EMIT(0xDE);
- EMIT(0xD9);
-}
-
-
-void Assembler::fnstsw_ax() {
- EnsureSpace ensure_space(this);
- EMIT(0xDF);
- EMIT(0xE0);
-}
-
-
-void Assembler::fwait() {
- EnsureSpace ensure_space(this);
- EMIT(0x9B);
-}
-
-
-void Assembler::frndint() {
- EnsureSpace ensure_space(this);
- EMIT(0xD9);
- EMIT(0xFC);
-}
-
-
-void Assembler::fnclex() {
- EnsureSpace ensure_space(this);
- EMIT(0xDB);
- EMIT(0xE2);
-}
-
-
-void Assembler::sahf() {
- EnsureSpace ensure_space(this);
- EMIT(0x9E);
-}
-
-
-void Assembler::setcc(Condition cc, Register reg) {
- ASSERT(reg.is_byte_register());
- EnsureSpace ensure_space(this);
- EMIT(0x0F);
- EMIT(0x90 | cc);
- EMIT(0xC0 | reg.code());
-}
-
-
-void Assembler::cvttss2si(Register dst, const Operand& src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- EMIT(0xF3);
- EMIT(0x0F);
- EMIT(0x2C);
- emit_operand(dst, src);
-}
-
-
-void Assembler::cvttsd2si(Register dst, const Operand& src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- EMIT(0xF2);
- EMIT(0x0F);
- EMIT(0x2C);
- emit_operand(dst, src);
-}
-
-
-void Assembler::cvtsd2si(Register dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- EMIT(0xF2);
- EMIT(0x0F);
- EMIT(0x2D);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::cvtsi2sd(XMMRegister dst, const Operand& src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- EMIT(0xF2);
- EMIT(0x0F);
- EMIT(0x2A);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::cvtss2sd(XMMRegister dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- EMIT(0xF3);
- EMIT(0x0F);
- EMIT(0x5A);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::cvtsd2ss(XMMRegister dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- EMIT(0xF2);
- EMIT(0x0F);
- EMIT(0x5A);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::addsd(XMMRegister dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- EMIT(0xF2);
- EMIT(0x0F);
- EMIT(0x58);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::addsd(XMMRegister dst, const Operand& src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- EMIT(0xF2);
- EMIT(0x0F);
- EMIT(0x58);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::mulsd(XMMRegister dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- EMIT(0xF2);
- EMIT(0x0F);
- EMIT(0x59);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::mulsd(XMMRegister dst, const Operand& src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- EMIT(0xF2);
- EMIT(0x0F);
- EMIT(0x59);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::subsd(XMMRegister dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- EMIT(0xF2);
- EMIT(0x0F);
- EMIT(0x5C);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::divsd(XMMRegister dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- EMIT(0xF2);
- EMIT(0x0F);
- EMIT(0x5E);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::xorpd(XMMRegister dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- EMIT(0x66);
- EMIT(0x0F);
- EMIT(0x57);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::xorps(XMMRegister dst, XMMRegister src) {
- EnsureSpace ensure_space(this);
- EMIT(0x0F);
- EMIT(0x57);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::sqrtsd(XMMRegister dst, XMMRegister src) {
- EnsureSpace ensure_space(this);
- EMIT(0xF2);
- EMIT(0x0F);
- EMIT(0x51);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::andpd(XMMRegister dst, XMMRegister src) {
- EnsureSpace ensure_space(this);
- EMIT(0x66);
- EMIT(0x0F);
- EMIT(0x54);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::orpd(XMMRegister dst, XMMRegister src) {
- EnsureSpace ensure_space(this);
- EMIT(0x66);
- EMIT(0x0F);
- EMIT(0x56);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::ucomisd(XMMRegister dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- EMIT(0x66);
- EMIT(0x0F);
- EMIT(0x2E);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::ucomisd(XMMRegister dst, const Operand& src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- EMIT(0x66);
- EMIT(0x0F);
- EMIT(0x2E);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode) {
- ASSERT(CpuFeatures::IsEnabled(SSE4_1));
- EnsureSpace ensure_space(this);
- EMIT(0x66);
- EMIT(0x0F);
- EMIT(0x3A);
- EMIT(0x0B);
- emit_sse_operand(dst, src);
- // Mask precision exeption.
- EMIT(static_cast<byte>(mode) | 0x8);
-}
-
-void Assembler::movmskpd(Register dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- EMIT(0x66);
- EMIT(0x0F);
- EMIT(0x50);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::movmskps(Register dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- EMIT(0x0F);
- EMIT(0x50);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::pcmpeqd(XMMRegister dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- EMIT(0x66);
- EMIT(0x0F);
- EMIT(0x76);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::cmpltsd(XMMRegister dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- EMIT(0xF2);
- EMIT(0x0F);
- EMIT(0xC2);
- emit_sse_operand(dst, src);
- EMIT(1); // LT == 1
-}
-
-
-void Assembler::movaps(XMMRegister dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- EMIT(0x0F);
- EMIT(0x28);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::movdqa(const Operand& dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- EMIT(0x66);
- EMIT(0x0F);
- EMIT(0x7F);
- emit_sse_operand(src, dst);
-}
-
-
-void Assembler::movdqa(XMMRegister dst, const Operand& src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- EMIT(0x66);
- EMIT(0x0F);
- EMIT(0x6F);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::movdqu(const Operand& dst, XMMRegister src ) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- EMIT(0xF3);
- EMIT(0x0F);
- EMIT(0x7F);
- emit_sse_operand(src, dst);
-}
-
-
-void Assembler::movdqu(XMMRegister dst, const Operand& src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- EMIT(0xF3);
- EMIT(0x0F);
- EMIT(0x6F);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::movntdqa(XMMRegister dst, const Operand& src) {
- ASSERT(CpuFeatures::IsEnabled(SSE4_1));
- EnsureSpace ensure_space(this);
- EMIT(0x66);
- EMIT(0x0F);
- EMIT(0x38);
- EMIT(0x2A);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::movntdq(const Operand& dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- EMIT(0x66);
- EMIT(0x0F);
- EMIT(0xE7);
- emit_sse_operand(src, dst);
-}
-
-
-void Assembler::prefetch(const Operand& src, int level) {
- ASSERT(is_uint2(level));
- EnsureSpace ensure_space(this);
- EMIT(0x0F);
- EMIT(0x18);
- // Emit hint number in Reg position of RegR/M.
- XMMRegister code = XMMRegister::from_code(level);
- emit_sse_operand(code, src);
-}
-
-
-void Assembler::movdbl(XMMRegister dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- movsd(dst, src);
-}
-
-
-void Assembler::movdbl(const Operand& dst, XMMRegister src) {
- EnsureSpace ensure_space(this);
- movsd(dst, src);
-}
-
-
-void Assembler::movsd(const Operand& dst, XMMRegister src ) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- EMIT(0xF2); // double
- EMIT(0x0F);
- EMIT(0x11); // store
- emit_sse_operand(src, dst);
-}
-
-
-void Assembler::movsd(XMMRegister dst, const Operand& src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- EMIT(0xF2); // double
- EMIT(0x0F);
- EMIT(0x10); // load
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::movsd(XMMRegister dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- EMIT(0xF2);
- EMIT(0x0F);
- EMIT(0x10);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::movss(const Operand& dst, XMMRegister src ) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- EMIT(0xF3); // float
- EMIT(0x0F);
- EMIT(0x11); // store
- emit_sse_operand(src, dst);
-}
-
-
-void Assembler::movss(XMMRegister dst, const Operand& src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- EMIT(0xF3); // float
- EMIT(0x0F);
- EMIT(0x10); // load
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::movss(XMMRegister dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- EMIT(0xF3);
- EMIT(0x0F);
- EMIT(0x10);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::movd(XMMRegister dst, const Operand& src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- EMIT(0x66);
- EMIT(0x0F);
- EMIT(0x6E);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::movd(const Operand& dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- EMIT(0x66);
- EMIT(0x0F);
- EMIT(0x7E);
- emit_sse_operand(src, dst);
-}
-
-
-void Assembler::extractps(Register dst, XMMRegister src, byte imm8) {
- ASSERT(CpuFeatures::IsSupported(SSE4_1));
- ASSERT(is_uint8(imm8));
- EnsureSpace ensure_space(this);
- EMIT(0x66);
- EMIT(0x0F);
- EMIT(0x3A);
- EMIT(0x17);
- emit_sse_operand(dst, src);
- EMIT(imm8);
-}
-
-
-void Assembler::pand(XMMRegister dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- EMIT(0x66);
- EMIT(0x0F);
- EMIT(0xDB);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::pxor(XMMRegister dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- EMIT(0x66);
- EMIT(0x0F);
- EMIT(0xEF);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::por(XMMRegister dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- EMIT(0x66);
- EMIT(0x0F);
- EMIT(0xEB);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::ptest(XMMRegister dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE4_1));
- EnsureSpace ensure_space(this);
- EMIT(0x66);
- EMIT(0x0F);
- EMIT(0x38);
- EMIT(0x17);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::psllq(XMMRegister reg, int8_t shift) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- EMIT(0x66);
- EMIT(0x0F);
- EMIT(0x73);
- emit_sse_operand(esi, reg); // esi == 6
- EMIT(shift);
-}
-
-
-void Assembler::psllq(XMMRegister dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- EMIT(0x66);
- EMIT(0x0F);
- EMIT(0xF3);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::psrlq(XMMRegister reg, int8_t shift) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- EMIT(0x66);
- EMIT(0x0F);
- EMIT(0x73);
- emit_sse_operand(edx, reg); // edx == 2
- EMIT(shift);
-}
-
-
-void Assembler::psrlq(XMMRegister dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- EMIT(0x66);
- EMIT(0x0F);
- EMIT(0xD3);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::pshufd(XMMRegister dst, XMMRegister src, uint8_t shuffle) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- EMIT(0x66);
- EMIT(0x0F);
- EMIT(0x70);
- emit_sse_operand(dst, src);
- EMIT(shuffle);
-}
-
-
-void Assembler::pextrd(const Operand& dst, XMMRegister src, int8_t offset) {
- ASSERT(CpuFeatures::IsEnabled(SSE4_1));
- EnsureSpace ensure_space(this);
- EMIT(0x66);
- EMIT(0x0F);
- EMIT(0x3A);
- EMIT(0x16);
- emit_sse_operand(src, dst);
- EMIT(offset);
-}
-
-
-void Assembler::pinsrd(XMMRegister dst, const Operand& src, int8_t offset) {
- ASSERT(CpuFeatures::IsEnabled(SSE4_1));
- EnsureSpace ensure_space(this);
- EMIT(0x66);
- EMIT(0x0F);
- EMIT(0x3A);
- EMIT(0x22);
- emit_sse_operand(dst, src);
- EMIT(offset);
-}
-
-
-void Assembler::emit_sse_operand(XMMRegister reg, const Operand& adr) {
- Register ireg = { reg.code() };
- emit_operand(ireg, adr);
-}
-
-
-void Assembler::emit_sse_operand(XMMRegister dst, XMMRegister src) {
- EMIT(0xC0 | dst.code() << 3 | src.code());
-}
-
-
-void Assembler::emit_sse_operand(Register dst, XMMRegister src) {
- EMIT(0xC0 | dst.code() << 3 | src.code());
-}
-
-
-void Assembler::Print() {
- Disassembler::Decode(isolate(), stdout, buffer_, pc_);
-}
-
-
-void Assembler::RecordJSReturn() {
- positions_recorder()->WriteRecordedPositions();
- EnsureSpace ensure_space(this);
- RecordRelocInfo(RelocInfo::JS_RETURN);
-}
-
-
-void Assembler::RecordDebugBreakSlot() {
- positions_recorder()->WriteRecordedPositions();
- EnsureSpace ensure_space(this);
- RecordRelocInfo(RelocInfo::DEBUG_BREAK_SLOT);
-}
-
-
-void Assembler::RecordComment(const char* msg, bool force) {
- if (FLAG_code_comments || force) {
- EnsureSpace ensure_space(this);
- RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
- }
-}
-
-
-void Assembler::GrowBuffer() {
- ASSERT(overflow());
- if (!own_buffer_) FATAL("external code buffer is too small");
-
- // Compute new buffer size.
- CodeDesc desc; // the new buffer
- if (buffer_size_ < 4*KB) {
- desc.buffer_size = 4*KB;
- } else {
- desc.buffer_size = 2*buffer_size_;
- }
- // Some internal data structures overflow for very large buffers,
- // they must ensure that kMaximalBufferSize is not too large.
- if ((desc.buffer_size > kMaximalBufferSize) ||
- (desc.buffer_size > isolate()->heap()->MaxOldGenerationSize())) {
- V8::FatalProcessOutOfMemory("Assembler::GrowBuffer");
- }
-
- // Set up new buffer.
- desc.buffer = NewArray<byte>(desc.buffer_size);
- desc.instr_size = pc_offset();
- desc.reloc_size = (buffer_ + buffer_size_) - (reloc_info_writer.pos());
-
- // Clear the buffer in debug mode. Use 'int3' instructions to make
- // sure to get into problems if we ever run uninitialized code.
-#ifdef DEBUG
- memset(desc.buffer, 0xCC, desc.buffer_size);
-#endif
-
- // Copy the data.
- int pc_delta = desc.buffer - buffer_;
- int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
- memmove(desc.buffer, buffer_, desc.instr_size);
- memmove(rc_delta + reloc_info_writer.pos(),
- reloc_info_writer.pos(), desc.reloc_size);
-
- // Switch buffers.
- if (isolate()->assembler_spare_buffer() == NULL &&
- buffer_size_ == kMinimalBufferSize) {
- isolate()->set_assembler_spare_buffer(buffer_);
- } else {
- DeleteArray(buffer_);
- }
- buffer_ = desc.buffer;
- buffer_size_ = desc.buffer_size;
- pc_ += pc_delta;
- reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
- reloc_info_writer.last_pc() + pc_delta);
-
- // Relocate runtime entries.
- for (RelocIterator it(desc); !it.done(); it.next()) {
- RelocInfo::Mode rmode = it.rinfo()->rmode();
- if (rmode == RelocInfo::RUNTIME_ENTRY) {
- int32_t* p = reinterpret_cast<int32_t*>(it.rinfo()->pc());
- *p -= pc_delta; // relocate entry
- } else if (rmode == RelocInfo::INTERNAL_REFERENCE) {
- int32_t* p = reinterpret_cast<int32_t*>(it.rinfo()->pc());
- if (*p != 0) { // 0 means uninitialized.
- *p += pc_delta;
- }
- }
- }
-
- ASSERT(!overflow());
-}
-
-
-void Assembler::emit_arith_b(int op1, int op2, Register dst, int imm8) {
- ASSERT(is_uint8(op1) && is_uint8(op2)); // wrong opcode
- ASSERT(is_uint8(imm8));
- ASSERT((op1 & 0x01) == 0); // should be 8bit operation
- EMIT(op1);
- EMIT(op2 | dst.code());
- EMIT(imm8);
-}
-
-
-void Assembler::emit_arith(int sel, Operand dst, const Immediate& x) {
- ASSERT((0 <= sel) && (sel <= 7));
- Register ireg = { sel };
- if (x.is_int8()) {
- EMIT(0x83); // using a sign-extended 8-bit immediate.
- emit_operand(ireg, dst);
- EMIT(x.x_ & 0xFF);
- } else if (dst.is_reg(eax)) {
- EMIT((sel << 3) | 0x05); // short form if the destination is eax.
- emit(x);
- } else {
- EMIT(0x81); // using a literal 32-bit immediate.
- emit_operand(ireg, dst);
- emit(x);
- }
-}
-
-
-void Assembler::emit_operand(Register reg, const Operand& adr) {
- const unsigned length = adr.len_;
- ASSERT(length > 0);
-
- // Emit updated ModRM byte containing the given register.
- pc_[0] = (adr.buf_[0] & ~0x38) | (reg.code() << 3);
-
- // Emit the rest of the encoded operand.
- for (unsigned i = 1; i < length; i++) pc_[i] = adr.buf_[i];
- pc_ += length;
-
- // Emit relocation information if necessary.
- if (length >= sizeof(int32_t) && !RelocInfo::IsNone(adr.rmode_)) {
- pc_ -= sizeof(int32_t); // pc_ must be *at* disp32
- RecordRelocInfo(adr.rmode_);
- pc_ += sizeof(int32_t);
- }
-}
-
-
-void Assembler::emit_farith(int b1, int b2, int i) {
- ASSERT(is_uint8(b1) && is_uint8(b2)); // wrong opcode
- ASSERT(0 <= i && i < 8); // illegal stack offset
- EMIT(b1);
- EMIT(b2 + i);
-}
-
-
-void Assembler::db(uint8_t data) {
- EnsureSpace ensure_space(this);
- EMIT(data);
-}
-
-
-void Assembler::dd(uint32_t data) {
- EnsureSpace ensure_space(this);
- emit(data);
-}
-
-
-void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
- ASSERT(!RelocInfo::IsNone(rmode));
- // Don't record external references unless the heap will be serialized.
- if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
-#ifdef DEBUG
- if (!Serializer::enabled()) {
- Serializer::TooLateToEnableNow();
- }
-#endif
- if (!Serializer::enabled() && !emit_debug_code()) {
- return;
- }
- }
- RelocInfo rinfo(pc_, rmode, data, NULL);
- reloc_info_writer.Write(&rinfo);
-}
-
-
-#ifdef GENERATED_CODE_COVERAGE
-static FILE* coverage_log = NULL;
-
-
-static void InitCoverageLog() {
- char* file_name = getenv("V8_GENERATED_CODE_COVERAGE_LOG");
- if (file_name != NULL) {
- coverage_log = fopen(file_name, "aw+");
- }
-}
-
-
-void LogGeneratedCodeCoverage(const char* file_line) {
- const char* return_address = (&file_line)[-1];
- char* push_insn = const_cast<char*>(return_address - 12);
- push_insn[0] = 0xeb; // Relative branch insn.
- push_insn[1] = 13; // Skip over coverage insns.
- if (coverage_log != NULL) {
- fprintf(coverage_log, "%s\n", file_line);
- fflush(coverage_log);
- }
-}
-
-#endif
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_IA32
diff --git a/src/3rdparty/v8/src/ia32/assembler-ia32.h b/src/3rdparty/v8/src/ia32/assembler-ia32.h
deleted file mode 100644
index 315bc17..0000000
--- a/src/3rdparty/v8/src/ia32/assembler-ia32.h
+++ /dev/null
@@ -1,1281 +0,0 @@
-// Copyright (c) 1994-2006 Sun Microsystems Inc.
-// All Rights Reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// - Redistributions of source code must retain the above copyright notice,
-// this list of conditions and the following disclaimer.
-//
-// - Redistribution in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// - Neither the name of Sun Microsystems or the names of contributors may
-// be used to endorse or promote products derived from this software without
-// specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
-// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
-// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// The original source code covered by the above license above has been
-// modified significantly by Google Inc.
-// Copyright 2011 the V8 project authors. All rights reserved.
-
-// A light-weight IA32 Assembler.
-
-#ifndef V8_IA32_ASSEMBLER_IA32_H_
-#define V8_IA32_ASSEMBLER_IA32_H_
-
-#include "isolate.h"
-#include "serialize.h"
-
-namespace v8 {
-namespace internal {
-
-// CPU Registers.
-//
-// 1) We would prefer to use an enum, but enum values are assignment-
-// compatible with int, which has caused code-generation bugs.
-//
-// 2) We would prefer to use a class instead of a struct but we don't like
-// the register initialization to depend on the particular initialization
-// order (which appears to be different on OS X, Linux, and Windows for the
-// installed versions of C++ we tried). Using a struct permits C-style
-// "initialization". Also, the Register objects cannot be const as this
-// forces initialization stubs in MSVC, making us dependent on initialization
-// order.
-//
-// 3) By not using an enum, we are possibly preventing the compiler from
-// doing certain constant folds, which may significantly reduce the
-// code generated for some assembly instructions (because they boil down
-// to a few constants). If this is a problem, we could change the code
-// such that we use an enum in optimized mode, and the struct in debug
-// mode. This way we get the compile-time error checking in debug mode
-// and best performance in optimized code.
-//
-struct Register {
- static const int kMaxNumAllocatableRegisters = 6;
- static int NumAllocatableRegisters() {
- return kMaxNumAllocatableRegisters;
- }
- static const int kNumRegisters = 8;
-
- static inline const char* AllocationIndexToString(int index);
-
- static inline int ToAllocationIndex(Register reg);
-
- static inline Register FromAllocationIndex(int index);
-
- static Register from_code(int code) {
- ASSERT(code >= 0);
- ASSERT(code < kNumRegisters);
- Register r = { code };
- return r;
- }
- bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; }
- bool is(Register reg) const { return code_ == reg.code_; }
- // eax, ebx, ecx and edx are byte registers, the rest are not.
- bool is_byte_register() const { return code_ <= 3; }
- int code() const {
- ASSERT(is_valid());
- return code_;
- }
- int bit() const {
- ASSERT(is_valid());
- return 1 << code_;
- }
-
- // Unfortunately we can't make this private in a struct.
- int code_;
-};
-
-const int kRegister_eax_Code = 0;
-const int kRegister_ecx_Code = 1;
-const int kRegister_edx_Code = 2;
-const int kRegister_ebx_Code = 3;
-const int kRegister_esp_Code = 4;
-const int kRegister_ebp_Code = 5;
-const int kRegister_esi_Code = 6;
-const int kRegister_edi_Code = 7;
-const int kRegister_no_reg_Code = -1;
-
-const Register eax = { kRegister_eax_Code };
-const Register ecx = { kRegister_ecx_Code };
-const Register edx = { kRegister_edx_Code };
-const Register ebx = { kRegister_ebx_Code };
-const Register esp = { kRegister_esp_Code };
-const Register ebp = { kRegister_ebp_Code };
-const Register esi = { kRegister_esi_Code };
-const Register edi = { kRegister_edi_Code };
-const Register no_reg = { kRegister_no_reg_Code };
-
-
-inline const char* Register::AllocationIndexToString(int index) {
- ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
- // This is the mapping of allocation indices to registers.
- const char* const kNames[] = { "eax", "ecx", "edx", "ebx", "esi", "edi" };
- return kNames[index];
-}
-
-
-inline int Register::ToAllocationIndex(Register reg) {
- ASSERT(reg.is_valid() && !reg.is(esp) && !reg.is(ebp));
- return (reg.code() >= 6) ? reg.code() - 2 : reg.code();
-}
-
-
-inline Register Register::FromAllocationIndex(int index) {
- ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
- return (index >= 4) ? from_code(index + 2) : from_code(index);
-}
-
-
-struct IntelDoubleRegister {
- static const int kMaxNumRegisters = 8;
- static const int kMaxNumAllocatableRegisters = 7;
- static int NumAllocatableRegisters();
- static int NumRegisters();
- static const char* AllocationIndexToString(int index);
-
- static int ToAllocationIndex(IntelDoubleRegister reg) {
- ASSERT(reg.code() != 0);
- return reg.code() - 1;
- }
-
- static IntelDoubleRegister FromAllocationIndex(int index) {
- ASSERT(index >= 0 && index < NumAllocatableRegisters());
- return from_code(index + 1);
- }
-
- static IntelDoubleRegister from_code(int code) {
- IntelDoubleRegister result = { code };
- return result;
- }
-
- bool is_valid() const {
- return 0 <= code_ && code_ < NumRegisters();
- }
- int code() const {
- ASSERT(is_valid());
- return code_;
- }
-
- int code_;
-};
-
-
-const IntelDoubleRegister double_register_0 = { 0 };
-const IntelDoubleRegister double_register_1 = { 1 };
-const IntelDoubleRegister double_register_2 = { 2 };
-const IntelDoubleRegister double_register_3 = { 3 };
-const IntelDoubleRegister double_register_4 = { 4 };
-const IntelDoubleRegister double_register_5 = { 5 };
-const IntelDoubleRegister double_register_6 = { 6 };
-const IntelDoubleRegister double_register_7 = { 7 };
-
-
-struct XMMRegister : IntelDoubleRegister {
- static const int kNumAllocatableRegisters = 7;
- static const int kNumRegisters = 8;
-
- static XMMRegister from_code(int code) {
- STATIC_ASSERT(sizeof(XMMRegister) == sizeof(IntelDoubleRegister));
- XMMRegister result;
- result.code_ = code;
- return result;
- }
-
- bool is(XMMRegister reg) const { return code_ == reg.code_; }
-
- static XMMRegister FromAllocationIndex(int index) {
- ASSERT(index >= 0 && index < NumAllocatableRegisters());
- return from_code(index + 1);
- }
-
- static const char* AllocationIndexToString(int index) {
- ASSERT(index >= 0 && index < kNumAllocatableRegisters);
- const char* const names[] = {
- "xmm1",
- "xmm2",
- "xmm3",
- "xmm4",
- "xmm5",
- "xmm6",
- "xmm7"
- };
- return names[index];
- }
-};
-
-
-#define xmm0 (static_cast<const XMMRegister&>(double_register_0))
-#define xmm1 (static_cast<const XMMRegister&>(double_register_1))
-#define xmm2 (static_cast<const XMMRegister&>(double_register_2))
-#define xmm3 (static_cast<const XMMRegister&>(double_register_3))
-#define xmm4 (static_cast<const XMMRegister&>(double_register_4))
-#define xmm5 (static_cast<const XMMRegister&>(double_register_5))
-#define xmm6 (static_cast<const XMMRegister&>(double_register_6))
-#define xmm7 (static_cast<const XMMRegister&>(double_register_7))
-
-
-struct X87TopOfStackRegister : IntelDoubleRegister {
- static const int kNumAllocatableRegisters = 1;
- static const int kNumRegisters = 1;
-
- bool is(X87TopOfStackRegister reg) const {
- return code_ == reg.code_;
- }
-
- static const char* AllocationIndexToString(int index) {
- ASSERT(index >= 0 && index < kNumAllocatableRegisters);
- const char* const names[] = {
- "st0",
- };
- return names[index];
- }
-
- static int ToAllocationIndex(X87TopOfStackRegister reg) {
- ASSERT(reg.code() == 0);
- return 0;
- }
-};
-
-#define x87tos \
- static_cast<const X87TopOfStackRegister&>(double_register_0)
-
-
-typedef IntelDoubleRegister DoubleRegister;
-
-
-enum Condition {
- // any value < 0 is considered no_condition
- no_condition = -1,
-
- overflow = 0,
- no_overflow = 1,
- below = 2,
- above_equal = 3,
- equal = 4,
- not_equal = 5,
- below_equal = 6,
- above = 7,
- negative = 8,
- positive = 9,
- parity_even = 10,
- parity_odd = 11,
- less = 12,
- greater_equal = 13,
- less_equal = 14,
- greater = 15,
-
- // aliases
- carry = below,
- not_carry = above_equal,
- zero = equal,
- not_zero = not_equal,
- sign = negative,
- not_sign = positive
-};
-
-
-// Returns the equivalent of !cc.
-// Negation of the default no_condition (-1) results in a non-default
-// no_condition value (-2). As long as tests for no_condition check
-// for condition < 0, this will work as expected.
-inline Condition NegateCondition(Condition cc) {
- return static_cast<Condition>(cc ^ 1);
-}
-
-
-// Corresponds to transposing the operands of a comparison.
-inline Condition ReverseCondition(Condition cc) {
- switch (cc) {
- case below:
- return above;
- case above:
- return below;
- case above_equal:
- return below_equal;
- case below_equal:
- return above_equal;
- case less:
- return greater;
- case greater:
- return less;
- case greater_equal:
- return less_equal;
- case less_equal:
- return greater_equal;
- default:
- return cc;
- };
-}
-
-
-// -----------------------------------------------------------------------------
-// Machine instruction Immediates
-
-class Immediate BASE_EMBEDDED {
- public:
- inline explicit Immediate(int x);
- inline explicit Immediate(const ExternalReference& ext);
- inline explicit Immediate(Handle<Object> handle);
- inline explicit Immediate(Smi* value);
- inline explicit Immediate(Address addr);
-
- static Immediate CodeRelativeOffset(Label* label) {
- return Immediate(label);
- }
-
- bool is_zero() const { return x_ == 0 && RelocInfo::IsNone(rmode_); }
- bool is_int8() const {
- return -128 <= x_ && x_ < 128 && RelocInfo::IsNone(rmode_);
- }
- bool is_int16() const {
- return -32768 <= x_ && x_ < 32768 && RelocInfo::IsNone(rmode_);
- }
-
- private:
- inline explicit Immediate(Label* value);
-
- int x_;
- RelocInfo::Mode rmode_;
-
- friend class Assembler;
- friend class MacroAssembler;
-};
-
-
-// -----------------------------------------------------------------------------
-// Machine instruction Operands
-
-enum ScaleFactor {
- times_1 = 0,
- times_2 = 1,
- times_4 = 2,
- times_8 = 3,
- times_int_size = times_4,
- times_half_pointer_size = times_2,
- times_pointer_size = times_4,
- times_twice_pointer_size = times_8
-};
-
-
-class Operand BASE_EMBEDDED {
- public:
- // XMM reg
- INLINE(explicit Operand(XMMRegister xmm_reg));
-
- // [disp/r]
- INLINE(explicit Operand(int32_t disp, RelocInfo::Mode rmode));
- // disp only must always be relocated
-
- // [base + disp/r]
- explicit Operand(Register base, int32_t disp,
- RelocInfo::Mode rmode = RelocInfo::NONE32);
-
- // [base + index*scale + disp/r]
- explicit Operand(Register base,
- Register index,
- ScaleFactor scale,
- int32_t disp,
- RelocInfo::Mode rmode = RelocInfo::NONE32);
-
- // [index*scale + disp/r]
- explicit Operand(Register index,
- ScaleFactor scale,
- int32_t disp,
- RelocInfo::Mode rmode = RelocInfo::NONE32);
-
- static Operand StaticVariable(const ExternalReference& ext) {
- return Operand(reinterpret_cast<int32_t>(ext.address()),
- RelocInfo::EXTERNAL_REFERENCE);
- }
-
- static Operand StaticArray(Register index,
- ScaleFactor scale,
- const ExternalReference& arr) {
- return Operand(index, scale, reinterpret_cast<int32_t>(arr.address()),
- RelocInfo::EXTERNAL_REFERENCE);
- }
-
- static Operand Cell(Handle<JSGlobalPropertyCell> cell) {
- return Operand(reinterpret_cast<int32_t>(cell.location()),
- RelocInfo::GLOBAL_PROPERTY_CELL);
- }
-
- // Returns true if this Operand is a wrapper for the specified register.
- bool is_reg(Register reg) const;
-
- // Returns true if this Operand is a wrapper for one register.
- bool is_reg_only() const;
-
- // Asserts that this Operand is a wrapper for one register and returns the
- // register.
- Register reg() const;
-
- private:
- // reg
- INLINE(explicit Operand(Register reg));
-
- // Set the ModRM byte without an encoded 'reg' register. The
- // register is encoded later as part of the emit_operand operation.
- inline void set_modrm(int mod, Register rm);
-
- inline void set_sib(ScaleFactor scale, Register index, Register base);
- inline void set_disp8(int8_t disp);
- inline void set_dispr(int32_t disp, RelocInfo::Mode rmode);
-
- byte buf_[6];
- // The number of bytes in buf_.
- unsigned int len_;
- // Only valid if len_ > 4.
- RelocInfo::Mode rmode_;
-
- friend class Assembler;
- friend class MacroAssembler;
- friend class LCodeGen;
-};
-
-
-// -----------------------------------------------------------------------------
-// A Displacement describes the 32bit immediate field of an instruction which
-// may be used together with a Label in order to refer to a yet unknown code
-// position. Displacements stored in the instruction stream are used to describe
-// the instruction and to chain a list of instructions using the same Label.
-// A Displacement contains 2 different fields:
-//
-// next field: position of next displacement in the chain (0 = end of list)
-// type field: instruction type
-//
-// A next value of null (0) indicates the end of a chain (note that there can
-// be no displacement at position zero, because there is always at least one
-// instruction byte before the displacement).
-//
-// Displacement _data field layout
-//
-// |31.....2|1......0|
-// [ next | type |
-
-class Displacement BASE_EMBEDDED {
- public:
- enum Type {
- UNCONDITIONAL_JUMP,
- CODE_RELATIVE,
- OTHER
- };
-
- int data() const { return data_; }
- Type type() const { return TypeField::decode(data_); }
- void next(Label* L) const {
- int n = NextField::decode(data_);
- n > 0 ? L->link_to(n) : L->Unuse();
- }
- void link_to(Label* L) { init(L, type()); }
-
- explicit Displacement(int data) { data_ = data; }
-
- Displacement(Label* L, Type type) { init(L, type); }
-
- void print() {
- PrintF("%s (%x) ", (type() == UNCONDITIONAL_JUMP ? "jmp" : "[other]"),
- NextField::decode(data_));
- }
-
- private:
- int data_;
-
- class TypeField: public BitField<Type, 0, 2> {};
- class NextField: public BitField<int, 2, 32-2> {};
-
- void init(Label* L, Type type);
-};
-
-
-
-// CpuFeatures keeps track of which features are supported by the target CPU.
-// Supported features must be enabled by a Scope before use.
-// Example:
-// if (CpuFeatures::IsSupported(SSE2)) {
-// CpuFeatures::Scope fscope(SSE2);
-// // Generate SSE2 floating point code.
-// } else {
-// // Generate standard x87 floating point code.
-// }
-class CpuFeatures : public AllStatic {
- public:
- // Detect features of the target CPU. Set safe defaults if the serializer
- // is enabled (snapshots must be portable).
- static void Probe();
-
- // Check whether a feature is supported by the target CPU.
- static bool IsSupported(CpuFeature f) {
- ASSERT(initialized_);
- if (f == SSE2 && !FLAG_enable_sse2) return false;
- if (f == SSE3 && !FLAG_enable_sse3) return false;
- if (f == SSE4_1 && !FLAG_enable_sse4_1) return false;
- if (f == CMOV && !FLAG_enable_cmov) return false;
- if (f == RDTSC && !FLAG_enable_rdtsc) return false;
- return (supported_ & (static_cast<uint64_t>(1) << f)) != 0;
- }
-
-#ifdef DEBUG
- // Check whether a feature is currently enabled.
- static bool IsEnabled(CpuFeature f) {
- ASSERT(initialized_);
- Isolate* isolate = Isolate::UncheckedCurrent();
- if (isolate == NULL) {
- // When no isolate is available, work as if we're running in
- // release mode.
- return IsSupported(f);
- }
- uint64_t enabled = isolate->enabled_cpu_features();
- return (enabled & (static_cast<uint64_t>(1) << f)) != 0;
- }
-#endif
-
- // Enable a specified feature within a scope.
- class Scope BASE_EMBEDDED {
-#ifdef DEBUG
-
- public:
- explicit Scope(CpuFeature f) {
- uint64_t mask = static_cast<uint64_t>(1) << f;
- ASSERT(CpuFeatures::IsSupported(f));
- ASSERT(!Serializer::enabled() ||
- (CpuFeatures::found_by_runtime_probing_ & mask) == 0);
- isolate_ = Isolate::UncheckedCurrent();
- old_enabled_ = 0;
- if (isolate_ != NULL) {
- old_enabled_ = isolate_->enabled_cpu_features();
- isolate_->set_enabled_cpu_features(old_enabled_ | mask);
- }
- }
- ~Scope() {
- ASSERT_EQ(Isolate::UncheckedCurrent(), isolate_);
- if (isolate_ != NULL) {
- isolate_->set_enabled_cpu_features(old_enabled_);
- }
- }
-
- private:
- Isolate* isolate_;
- uint64_t old_enabled_;
-#else
-
- public:
- explicit Scope(CpuFeature f) {}
-#endif
- };
-
- class TryForceFeatureScope BASE_EMBEDDED {
- public:
- explicit TryForceFeatureScope(CpuFeature f)
- : old_supported_(CpuFeatures::supported_) {
- if (CanForce()) {
- CpuFeatures::supported_ |= (static_cast<uint64_t>(1) << f);
- }
- }
-
- ~TryForceFeatureScope() {
- if (CanForce()) {
- CpuFeatures::supported_ = old_supported_;
- }
- }
-
- private:
- static bool CanForce() {
- // It's only safe to temporarily force support of CPU features
- // when there's only a single isolate, which is guaranteed when
- // the serializer is enabled.
- return Serializer::enabled();
- }
-
- const uint64_t old_supported_;
- };
-
- private:
-#ifdef DEBUG
- static bool initialized_;
-#endif
- static uint64_t supported_;
- static uint64_t found_by_runtime_probing_;
-
- friend class ExternalReference;
- DISALLOW_COPY_AND_ASSIGN(CpuFeatures);
-};
-
-
-class Assembler : public AssemblerBase {
- private:
- // We check before assembling an instruction that there is sufficient
- // space to write an instruction and its relocation information.
- // The relocation writer's position must be kGap bytes above the end of
- // the generated instructions. This leaves enough space for the
- // longest possible ia32 instruction, 15 bytes, and the longest possible
- // relocation information encoding, RelocInfoWriter::kMaxLength == 16.
- // (There is a 15 byte limit on ia32 instruction length that rules out some
- // otherwise valid instructions.)
- // This allows for a single, fast space check per instruction.
- static const int kGap = 32;
-
- public:
- // Create an assembler. Instructions and relocation information are emitted
- // into a buffer, with the instructions starting from the beginning and the
- // relocation information starting from the end of the buffer. See CodeDesc
- // for a detailed comment on the layout (globals.h).
- //
- // If the provided buffer is NULL, the assembler allocates and grows its own
- // buffer, and buffer_size determines the initial buffer size. The buffer is
- // owned by the assembler and deallocated upon destruction of the assembler.
- //
- // If the provided buffer is not NULL, the assembler uses the provided buffer
- // for code generation and assumes its size to be buffer_size. If the buffer
- // is too small, a fatal error occurs. No deallocation of the buffer is done
- // upon destruction of the assembler.
- // TODO(vitalyr): the assembler does not need an isolate.
- Assembler(Isolate* isolate, void* buffer, int buffer_size);
- virtual ~Assembler() { }
-
- // GetCode emits any pending (non-emitted) code and fills the descriptor
- // desc. GetCode() is idempotent; it returns the same result if no other
- // Assembler functions are invoked in between GetCode() calls.
- void GetCode(CodeDesc* desc);
-
- // Read/Modify the code target in the branch/call instruction at pc.
- inline static Address target_address_at(Address pc);
- inline static void set_target_address_at(Address pc, Address target);
-
- // Return the code target address at a call site from the return address
- // of that call in the instruction stream.
- inline static Address target_address_from_return_address(Address pc);
-
- // This sets the branch destination (which is in the instruction on x86).
- // This is for calls and branches within generated code.
- inline static void deserialization_set_special_target_at(
- Address instruction_payload, Address target) {
- set_target_address_at(instruction_payload, target);
- }
-
- // This sets the branch destination (which is in the instruction on x86).
- // This is for calls and branches to runtime code.
- inline static void set_external_target_at(Address instruction_payload,
- Address target) {
- set_target_address_at(instruction_payload, target);
- }
-
- static const int kSpecialTargetSize = kPointerSize;
-
- // Distance between the address of the code target in the call instruction
- // and the return address
- static const int kCallTargetAddressOffset = kPointerSize;
- // Distance between start of patched return sequence and the emitted address
- // to jump to.
- static const int kPatchReturnSequenceAddressOffset = 1; // JMP imm32.
-
- // Distance between start of patched debug break slot and the emitted address
- // to jump to.
- static const int kPatchDebugBreakSlotAddressOffset = 1; // JMP imm32.
-
- static const int kCallInstructionLength = 5;
- static const int kPatchDebugBreakSlotReturnOffset = kPointerSize;
- static const int kJSReturnSequenceLength = 6;
-
- // The debug break slot must be able to contain a call instruction.
- static const int kDebugBreakSlotLength = kCallInstructionLength;
-
- // One byte opcode for test al, 0xXX.
- static const byte kTestAlByte = 0xA8;
- // One byte opcode for nop.
- static const byte kNopByte = 0x90;
-
- // One byte opcode for a short unconditional jump.
- static const byte kJmpShortOpcode = 0xEB;
- // One byte prefix for a short conditional jump.
- static const byte kJccShortPrefix = 0x70;
- static const byte kJncShortOpcode = kJccShortPrefix | not_carry;
- static const byte kJcShortOpcode = kJccShortPrefix | carry;
- static const byte kJnzShortOpcode = kJccShortPrefix | not_zero;
- static const byte kJzShortOpcode = kJccShortPrefix | zero;
-
-
- // ---------------------------------------------------------------------------
- // Code generation
- //
- // - function names correspond one-to-one to ia32 instruction mnemonics
- // - unless specified otherwise, instructions operate on 32bit operands
- // - instructions on 8bit (byte) operands/registers have a trailing '_b'
- // - instructions on 16bit (word) operands/registers have a trailing '_w'
- // - naming conflicts with C++ keywords are resolved via a trailing '_'
-
- // NOTE ON INTERFACE: Currently, the interface is not very consistent
- // in the sense that some operations (e.g. mov()) can be called in more
- // the one way to generate the same instruction: The Register argument
- // can in some cases be replaced with an Operand(Register) argument.
- // This should be cleaned up and made more orthogonal. The questions
- // is: should we always use Operands instead of Registers where an
- // Operand is possible, or should we have a Register (overloaded) form
- // instead? We must be careful to make sure that the selected instruction
- // is obvious from the parameters to avoid hard-to-find code generation
- // bugs.
-
- // Insert the smallest number of nop instructions
- // possible to align the pc offset to a multiple
- // of m. m must be a power of 2.
- void Align(int m);
- void Nop(int bytes = 1);
- // Aligns code to something that's optimal for a jump target for the platform.
- void CodeTargetAlign();
-
- // Stack
- void pushad();
- void popad();
-
- void pushfd();
- void popfd();
-
- void push(const Immediate& x);
- void push_imm32(int32_t imm32);
- void push(Register src);
- void push(const Operand& src);
-
- void pop(Register dst);
- void pop(const Operand& dst);
-
- void enter(const Immediate& size);
- void leave();
-
- // Moves
- void mov_b(Register dst, Register src) { mov_b(dst, Operand(src)); }
- void mov_b(Register dst, const Operand& src);
- void mov_b(Register dst, int8_t imm8) { mov_b(Operand(dst), imm8); }
- void mov_b(const Operand& dst, int8_t imm8);
- void mov_b(const Operand& dst, Register src);
-
- void mov_w(Register dst, const Operand& src);
- void mov_w(const Operand& dst, Register src);
-
- void mov(Register dst, int32_t imm32);
- void mov(Register dst, const Immediate& x);
- void mov(Register dst, Handle<Object> handle);
- void mov(Register dst, const Operand& src);
- void mov(Register dst, Register src);
- void mov(const Operand& dst, const Immediate& x);
- void mov(const Operand& dst, Handle<Object> handle);
- void mov(const Operand& dst, Register src);
-
- void movsx_b(Register dst, Register src) { movsx_b(dst, Operand(src)); }
- void movsx_b(Register dst, const Operand& src);
-
- void movsx_w(Register dst, Register src) { movsx_w(dst, Operand(src)); }
- void movsx_w(Register dst, const Operand& src);
-
- void movzx_b(Register dst, Register src) { movzx_b(dst, Operand(src)); }
- void movzx_b(Register dst, const Operand& src);
-
- void movzx_w(Register dst, Register src) { movzx_w(dst, Operand(src)); }
- void movzx_w(Register dst, const Operand& src);
-
- // Conditional moves
- void cmov(Condition cc, Register dst, Register src) {
- cmov(cc, dst, Operand(src));
- }
- void cmov(Condition cc, Register dst, const Operand& src);
-
- // Flag management.
- void cld();
-
- // Repetitive string instructions.
- void rep_movs();
- void rep_stos();
- void stos();
-
- // Exchange two registers
- void xchg(Register dst, Register src);
-
- // Arithmetics
- void adc(Register dst, int32_t imm32);
- void adc(Register dst, const Operand& src);
-
- void add(Register dst, Register src) { add(dst, Operand(src)); }
- void add(Register dst, const Operand& src);
- void add(const Operand& dst, Register src);
- void add(Register dst, const Immediate& imm) { add(Operand(dst), imm); }
- void add(const Operand& dst, const Immediate& x);
-
- void and_(Register dst, int32_t imm32);
- void and_(Register dst, const Immediate& x);
- void and_(Register dst, Register src) { and_(dst, Operand(src)); }
- void and_(Register dst, const Operand& src);
- void and_(const Operand& dst, Register src);
- void and_(const Operand& dst, const Immediate& x);
-
- void cmpb(Register reg, int8_t imm8) { cmpb(Operand(reg), imm8); }
- void cmpb(const Operand& op, int8_t imm8);
- void cmpb(Register reg, const Operand& op);
- void cmpb(const Operand& op, Register reg);
- void cmpb_al(const Operand& op);
- void cmpw_ax(const Operand& op);
- void cmpw(const Operand& op, Immediate imm16);
- void cmp(Register reg, int32_t imm32);
- void cmp(Register reg, Handle<Object> handle);
- void cmp(Register reg0, Register reg1) { cmp(reg0, Operand(reg1)); }
- void cmp(Register reg, const Operand& op);
- void cmp(Register reg, const Immediate& imm) { cmp(Operand(reg), imm); }
- void cmp(const Operand& op, const Immediate& imm);
- void cmp(const Operand& op, Handle<Object> handle);
-
- void dec_b(Register dst);
- void dec_b(const Operand& dst);
-
- void dec(Register dst);
- void dec(const Operand& dst);
-
- void cdq();
-
- void idiv(Register src);
-
- // Signed multiply instructions.
- void imul(Register src); // edx:eax = eax * src.
- void imul(Register dst, Register src) { imul(dst, Operand(src)); }
- void imul(Register dst, const Operand& src); // dst = dst * src.
- void imul(Register dst, Register src, int32_t imm32); // dst = src * imm32.
-
- void inc(Register dst);
- void inc(const Operand& dst);
-
- void lea(Register dst, const Operand& src);
-
- // Unsigned multiply instruction.
- void mul(Register src); // edx:eax = eax * reg.
-
- void neg(Register dst);
-
- void not_(Register dst);
-
- void or_(Register dst, int32_t imm32);
- void or_(Register dst, Register src) { or_(dst, Operand(src)); }
- void or_(Register dst, const Operand& src);
- void or_(const Operand& dst, Register src);
- void or_(Register dst, const Immediate& imm) { or_(Operand(dst), imm); }
- void or_(const Operand& dst, const Immediate& x);
-
- void rcl(Register dst, uint8_t imm8);
- void rcr(Register dst, uint8_t imm8);
- void ror(Register dst, uint8_t imm8);
- void ror_cl(Register dst);
-
- void sar(Register dst, uint8_t imm8);
- void sar_cl(Register dst);
-
- void sbb(Register dst, const Operand& src);
-
- void shld(Register dst, Register src) { shld(dst, Operand(src)); }
- void shld(Register dst, const Operand& src);
-
- void shl(Register dst, uint8_t imm8);
- void shl_cl(Register dst);
-
- void shrd(Register dst, Register src) { shrd(dst, Operand(src)); }
- void shrd(Register dst, const Operand& src);
-
- void shr(Register dst, uint8_t imm8);
- void shr_cl(Register dst);
-
- void sub(Register dst, const Immediate& imm) { sub(Operand(dst), imm); }
- void sub(const Operand& dst, const Immediate& x);
- void sub(Register dst, Register src) { sub(dst, Operand(src)); }
- void sub(Register dst, const Operand& src);
- void sub(const Operand& dst, Register src);
-
- void test(Register reg, const Immediate& imm);
- void test(Register reg0, Register reg1) { test(reg0, Operand(reg1)); }
- void test(Register reg, const Operand& op);
- void test_b(Register reg, const Operand& op);
- void test(const Operand& op, const Immediate& imm);
- void test_b(Register reg, uint8_t imm8) { test_b(Operand(reg), imm8); }
- void test_b(const Operand& op, uint8_t imm8);
-
- void xor_(Register dst, int32_t imm32);
- void xor_(Register dst, Register src) { xor_(dst, Operand(src)); }
- void xor_(Register dst, const Operand& src);
- void xor_(const Operand& dst, Register src);
- void xor_(Register dst, const Immediate& imm) { xor_(Operand(dst), imm); }
- void xor_(const Operand& dst, const Immediate& x);
-
- // Bit operations.
- void bt(const Operand& dst, Register src);
- void bts(Register dst, Register src) { bts(Operand(dst), src); }
- void bts(const Operand& dst, Register src);
-
- // Miscellaneous
- void hlt();
- void int3();
- void nop();
- void rdtsc();
- void ret(int imm16);
-
- // Label operations & relative jumps (PPUM Appendix D)
- //
- // Takes a branch opcode (cc) and a label (L) and generates
- // either a backward branch or a forward branch and links it
- // to the label fixup chain. Usage:
- //
- // Label L; // unbound label
- // j(cc, &L); // forward branch to unbound label
- // bind(&L); // bind label to the current pc
- // j(cc, &L); // backward branch to bound label
- // bind(&L); // illegal: a label may be bound only once
- //
- // Note: The same Label can be used for forward and backward branches
- // but it may be bound only once.
-
- void bind(Label* L); // binds an unbound label L to the current code position
-
- // Calls
- void call(Label* L);
- void call(byte* entry, RelocInfo::Mode rmode);
- int CallSize(const Operand& adr);
- void call(Register reg) { call(Operand(reg)); }
- void call(const Operand& adr);
- int CallSize(Handle<Code> code, RelocInfo::Mode mode);
- void call(Handle<Code> code,
- RelocInfo::Mode rmode,
- TypeFeedbackId id = TypeFeedbackId::None());
-
- // Jumps
- // unconditional jump to L
- void jmp(Label* L, Label::Distance distance = Label::kFar);
- void jmp(byte* entry, RelocInfo::Mode rmode);
- void jmp(Register reg) { jmp(Operand(reg)); }
- void jmp(const Operand& adr);
- void jmp(Handle<Code> code, RelocInfo::Mode rmode);
-
- // Conditional jumps
- void j(Condition cc,
- Label* L,
- Label::Distance distance = Label::kFar);
- void j(Condition cc, byte* entry, RelocInfo::Mode rmode);
- void j(Condition cc, Handle<Code> code);
-
- // Floating-point operations
- void fld(int i);
- void fstp(int i);
-
- void fld1();
- void fldz();
- void fldpi();
- void fldln2();
-
- void fld_s(const Operand& adr);
- void fld_d(const Operand& adr);
-
- void fstp_s(const Operand& adr);
- void fstp_d(const Operand& adr);
- void fst_d(const Operand& adr);
-
- void fild_s(const Operand& adr);
- void fild_d(const Operand& adr);
-
- void fist_s(const Operand& adr);
-
- void fistp_s(const Operand& adr);
- void fistp_d(const Operand& adr);
-
- // The fisttp instructions require SSE3.
- void fisttp_s(const Operand& adr);
- void fisttp_d(const Operand& adr);
-
- void fabs();
- void fchs();
- void fcos();
- void fsin();
- void fptan();
- void fyl2x();
- void f2xm1();
- void fscale();
- void fninit();
-
- void fadd(int i);
- void fsub(int i);
- void fmul(int i);
- void fdiv(int i);
-
- void fisub_s(const Operand& adr);
-
- void faddp(int i = 1);
- void fsubp(int i = 1);
- void fsubrp(int i = 1);
- void fmulp(int i = 1);
- void fdivp(int i = 1);
- void fprem();
- void fprem1();
-
- void fxch(int i = 1);
- void fincstp();
- void ffree(int i = 0);
-
- void ftst();
- void fucomp(int i);
- void fucompp();
- void fucomi(int i);
- void fucomip();
- void fcompp();
- void fnstsw_ax();
- void fwait();
- void fnclex();
-
- void frndint();
-
- void sahf();
- void setcc(Condition cc, Register reg);
-
- void cpuid();
-
- // SSE2 instructions
- void cvttss2si(Register dst, const Operand& src);
- void cvttsd2si(Register dst, const Operand& src);
- void cvtsd2si(Register dst, XMMRegister src);
-
- void cvtsi2sd(XMMRegister dst, Register src) { cvtsi2sd(dst, Operand(src)); }
- void cvtsi2sd(XMMRegister dst, const Operand& src);
- void cvtss2sd(XMMRegister dst, XMMRegister src);
- void cvtsd2ss(XMMRegister dst, XMMRegister src);
-
- void addsd(XMMRegister dst, XMMRegister src);
- void addsd(XMMRegister dst, const Operand& src);
- void subsd(XMMRegister dst, XMMRegister src);
- void mulsd(XMMRegister dst, XMMRegister src);
- void mulsd(XMMRegister dst, const Operand& src);
- void divsd(XMMRegister dst, XMMRegister src);
- void xorpd(XMMRegister dst, XMMRegister src);
- void xorps(XMMRegister dst, XMMRegister src);
- void sqrtsd(XMMRegister dst, XMMRegister src);
-
- void andpd(XMMRegister dst, XMMRegister src);
- void orpd(XMMRegister dst, XMMRegister src);
-
- void ucomisd(XMMRegister dst, XMMRegister src);
- void ucomisd(XMMRegister dst, const Operand& src);
-
- enum RoundingMode {
- kRoundToNearest = 0x0,
- kRoundDown = 0x1,
- kRoundUp = 0x2,
- kRoundToZero = 0x3
- };
-
- void roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode);
-
- void movmskpd(Register dst, XMMRegister src);
- void movmskps(Register dst, XMMRegister src);
-
- void cmpltsd(XMMRegister dst, XMMRegister src);
- void pcmpeqd(XMMRegister dst, XMMRegister src);
-
- void movaps(XMMRegister dst, XMMRegister src);
-
- void movdqa(XMMRegister dst, const Operand& src);
- void movdqa(const Operand& dst, XMMRegister src);
- void movdqu(XMMRegister dst, const Operand& src);
- void movdqu(const Operand& dst, XMMRegister src);
-
- // Use either movsd or movlpd.
- void movdbl(XMMRegister dst, const Operand& src);
- void movdbl(const Operand& dst, XMMRegister src);
-
- void movd(XMMRegister dst, Register src) { movd(dst, Operand(src)); }
- void movd(XMMRegister dst, const Operand& src);
- void movd(Register dst, XMMRegister src) { movd(Operand(dst), src); }
- void movd(const Operand& dst, XMMRegister src);
- void movsd(XMMRegister dst, XMMRegister src);
-
- void movss(XMMRegister dst, const Operand& src);
- void movss(const Operand& dst, XMMRegister src);
- void movss(XMMRegister dst, XMMRegister src);
- void extractps(Register dst, XMMRegister src, byte imm8);
-
- void pand(XMMRegister dst, XMMRegister src);
- void pxor(XMMRegister dst, XMMRegister src);
- void por(XMMRegister dst, XMMRegister src);
- void ptest(XMMRegister dst, XMMRegister src);
-
- void psllq(XMMRegister reg, int8_t shift);
- void psllq(XMMRegister dst, XMMRegister src);
- void psrlq(XMMRegister reg, int8_t shift);
- void psrlq(XMMRegister dst, XMMRegister src);
- void pshufd(XMMRegister dst, XMMRegister src, uint8_t shuffle);
- void pextrd(Register dst, XMMRegister src, int8_t offset) {
- pextrd(Operand(dst), src, offset);
- }
- void pextrd(const Operand& dst, XMMRegister src, int8_t offset);
- void pinsrd(XMMRegister dst, Register src, int8_t offset) {
- pinsrd(dst, Operand(src), offset);
- }
- void pinsrd(XMMRegister dst, const Operand& src, int8_t offset);
-
- // Parallel XMM operations.
- void movntdqa(XMMRegister dst, const Operand& src);
- void movntdq(const Operand& dst, XMMRegister src);
- // Prefetch src position into cache level.
- // Level 1, 2 or 3 specifies CPU cache level. Level 0 specifies a
- // non-temporal
- void prefetch(const Operand& src, int level);
- // TODO(lrn): Need SFENCE for movnt?
-
- // Debugging
- void Print();
-
- // Check the code size generated from label to here.
- int SizeOfCodeGeneratedSince(Label* label) {
- return pc_offset() - label->pos();
- }
-
- // Mark address of the ExitJSFrame code.
- void RecordJSReturn();
-
- // Mark address of a debug break slot.
- void RecordDebugBreakSlot();
-
- // Record a comment relocation entry that can be used by a disassembler.
- // Use --code-comments to enable, or provide "force = true" flag to always
- // write a comment.
- void RecordComment(const char* msg, bool force = false);
-
- // Writes a single byte or word of data in the code stream. Used for
- // inline tables, e.g., jump-tables.
- void db(uint8_t data);
- void dd(uint32_t data);
-
- // Check if there is less than kGap bytes available in the buffer.
- // If this is the case, we need to grow the buffer before emitting
- // an instruction or relocation information.
- inline bool overflow() const { return pc_ >= reloc_info_writer.pos() - kGap; }
-
- // Get the number of bytes available in the buffer.
- inline int available_space() const { return reloc_info_writer.pos() - pc_; }
-
- static bool IsNop(Address addr);
-
- PositionsRecorder* positions_recorder() { return &positions_recorder_; }
-
- int relocation_writer_size() {
- return (buffer_ + buffer_size_) - reloc_info_writer.pos();
- }
-
- // Avoid overflows for displacements etc.
- static const int kMaximalBufferSize = 512*MB;
-
- byte byte_at(int pos) { return buffer_[pos]; }
- void set_byte_at(int pos, byte value) { buffer_[pos] = value; }
-
- protected:
- void movsd(XMMRegister dst, const Operand& src);
- void movsd(const Operand& dst, XMMRegister src);
-
- void emit_sse_operand(XMMRegister reg, const Operand& adr);
- void emit_sse_operand(XMMRegister dst, XMMRegister src);
- void emit_sse_operand(Register dst, XMMRegister src);
-
- byte* addr_at(int pos) { return buffer_ + pos; }
-
-
- private:
- uint32_t long_at(int pos) {
- return *reinterpret_cast<uint32_t*>(addr_at(pos));
- }
- void long_at_put(int pos, uint32_t x) {
- *reinterpret_cast<uint32_t*>(addr_at(pos)) = x;
- }
-
- // code emission
- void GrowBuffer();
- inline void emit(uint32_t x);
- inline void emit(Handle<Object> handle);
- inline void emit(uint32_t x,
- RelocInfo::Mode rmode,
- TypeFeedbackId id = TypeFeedbackId::None());
- inline void emit(const Immediate& x);
- inline void emit_w(const Immediate& x);
-
- // Emit the code-object-relative offset of the label's position
- inline void emit_code_relative_offset(Label* label);
-
- // instruction generation
- void emit_arith_b(int op1, int op2, Register dst, int imm8);
-
- // Emit a basic arithmetic instruction (i.e. first byte of the family is 0x81)
- // with a given destination expression and an immediate operand. It attempts
- // to use the shortest encoding possible.
- // sel specifies the /n in the modrm byte (see the Intel PRM).
- void emit_arith(int sel, Operand dst, const Immediate& x);
-
- void emit_operand(Register reg, const Operand& adr);
-
- void emit_farith(int b1, int b2, int i);
-
- // labels
- void print(Label* L);
- void bind_to(Label* L, int pos);
-
- // displacements
- inline Displacement disp_at(Label* L);
- inline void disp_at_put(Label* L, Displacement disp);
- inline void emit_disp(Label* L, Displacement::Type type);
- inline void emit_near_disp(Label* L);
-
- // record reloc info for current pc_
- void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
-
- friend class CodePatcher;
- friend class EnsureSpace;
-
- // code generation
- RelocInfoWriter reloc_info_writer;
-
- PositionsRecorder positions_recorder_;
- friend class PositionsRecorder;
-};
-
-
-// Helper class that ensures that there is enough space for generating
-// instructions and relocation information. The constructor makes
-// sure that there is enough space and (in debug mode) the destructor
-// checks that we did not generate too much.
-class EnsureSpace BASE_EMBEDDED {
- public:
- explicit EnsureSpace(Assembler* assembler) : assembler_(assembler) {
- if (assembler_->overflow()) assembler_->GrowBuffer();
-#ifdef DEBUG
- space_before_ = assembler_->available_space();
-#endif
- }
-
-#ifdef DEBUG
- ~EnsureSpace() {
- int bytes_generated = space_before_ - assembler_->available_space();
- ASSERT(bytes_generated < assembler_->kGap);
- }
-#endif
-
- private:
- Assembler* assembler_;
-#ifdef DEBUG
- int space_before_;
-#endif
-};
-
-} } // namespace v8::internal
-
-#endif // V8_IA32_ASSEMBLER_IA32_H_
diff --git a/src/3rdparty/v8/src/ia32/builtins-ia32.cc b/src/3rdparty/v8/src/ia32/builtins-ia32.cc
deleted file mode 100644
index e3b2b7b..0000000
--- a/src/3rdparty/v8/src/ia32/builtins-ia32.cc
+++ /dev/null
@@ -1,1869 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_IA32)
-
-#include "codegen.h"
-#include "deoptimizer.h"
-#include "full-codegen.h"
-
-namespace v8 {
-namespace internal {
-
-
-#define __ ACCESS_MASM(masm)
-
-
-void Builtins::Generate_Adaptor(MacroAssembler* masm,
- CFunctionId id,
- BuiltinExtraArguments extra_args) {
- // ----------- S t a t e -------------
- // -- eax : number of arguments excluding receiver
- // -- edi : called function (only guaranteed when
- // extra_args requires it)
- // -- esi : context
- // -- esp[0] : return address
- // -- esp[4] : last argument
- // -- ...
- // -- esp[4 * argc] : first argument (argc == eax)
- // -- esp[4 * (argc +1)] : receiver
- // -----------------------------------
-
- // Insert extra arguments.
- int num_extra_args = 0;
- if (extra_args == NEEDS_CALLED_FUNCTION) {
- num_extra_args = 1;
- Register scratch = ebx;
- __ pop(scratch); // Save return address.
- __ push(edi);
- __ push(scratch); // Restore return address.
- } else {
- ASSERT(extra_args == NO_EXTRA_ARGUMENTS);
- }
-
- // JumpToExternalReference expects eax to contain the number of arguments
- // including the receiver and the extra arguments.
- __ add(eax, Immediate(num_extra_args + 1));
- __ JumpToExternalReference(ExternalReference(id, masm->isolate()));
-}
-
-
-static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
- __ mov(eax, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ mov(eax, FieldOperand(eax, SharedFunctionInfo::kCodeOffset));
- __ lea(eax, FieldOperand(eax, Code::kHeaderSize));
- __ jmp(eax);
-}
-
-
-void Builtins::Generate_InRecompileQueue(MacroAssembler* masm) {
- GenerateTailCallToSharedCode(masm);
-}
-
-
-void Builtins::Generate_ParallelRecompile(MacroAssembler* masm) {
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Push a copy of the function onto the stack.
- __ push(edi);
- // Push call kind information.
- __ push(ecx);
-
- __ push(edi); // Function is also the parameter to the runtime call.
- __ CallRuntime(Runtime::kParallelRecompile, 1);
-
- // Restore call kind information.
- __ pop(ecx);
- // Restore receiver.
- __ pop(edi);
-
- // Tear down internal frame.
- }
-
- GenerateTailCallToSharedCode(masm);
-}
-
-
-static void Generate_JSConstructStubHelper(MacroAssembler* masm,
- bool is_api_function,
- bool count_constructions) {
- // ----------- S t a t e -------------
- // -- eax: number of arguments
- // -- edi: constructor function
- // -----------------------------------
-
- // Should never count constructions for api objects.
- ASSERT(!is_api_function || !count_constructions);
-
- // Enter a construct frame.
- {
- FrameScope scope(masm, StackFrame::CONSTRUCT);
-
- // Store a smi-tagged arguments count on the stack.
- __ SmiTag(eax);
- __ push(eax);
-
- // Push the function to invoke on the stack.
- __ push(edi);
-
- // Try to allocate the object without transitioning into C code. If any of
- // the preconditions is not met, the code bails out to the runtime call.
- Label rt_call, allocated;
- if (FLAG_inline_new) {
- Label undo_allocation;
-#ifdef ENABLE_DEBUGGER_SUPPORT
- ExternalReference debug_step_in_fp =
- ExternalReference::debug_step_in_fp_address(masm->isolate());
- __ cmp(Operand::StaticVariable(debug_step_in_fp), Immediate(0));
- __ j(not_equal, &rt_call);
-#endif
-
- // Verified that the constructor is a JSFunction.
- // Load the initial map and verify that it is in fact a map.
- // edi: constructor
- __ mov(eax, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
- // Will both indicate a NULL and a Smi
- __ JumpIfSmi(eax, &rt_call);
- // edi: constructor
- // eax: initial map (if proven valid below)
- __ CmpObjectType(eax, MAP_TYPE, ebx);
- __ j(not_equal, &rt_call);
-
- // Check that the constructor is not constructing a JSFunction (see
- // comments in Runtime_NewObject in runtime.cc). In which case the
- // initial map's instance type would be JS_FUNCTION_TYPE.
- // edi: constructor
- // eax: initial map
- __ CmpInstanceType(eax, JS_FUNCTION_TYPE);
- __ j(equal, &rt_call);
-
- if (count_constructions) {
- Label allocate;
- // Decrease generous allocation count.
- __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ dec_b(FieldOperand(ecx,
- SharedFunctionInfo::kConstructionCountOffset));
- __ j(not_zero, &allocate);
-
- __ push(eax);
- __ push(edi);
-
- __ push(edi); // constructor
- // The call will replace the stub, so the countdown is only done once.
- __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
-
- __ pop(edi);
- __ pop(eax);
-
- __ bind(&allocate);
- }
-
- // Now allocate the JSObject on the heap.
- // edi: constructor
- // eax: initial map
- __ movzx_b(edi, FieldOperand(eax, Map::kInstanceSizeOffset));
- __ shl(edi, kPointerSizeLog2);
- __ AllocateInNewSpace(
- edi, ebx, edi, no_reg, &rt_call, NO_ALLOCATION_FLAGS);
- // Allocated the JSObject, now initialize the fields.
- // eax: initial map
- // ebx: JSObject
- // edi: start of next object
- __ mov(Operand(ebx, JSObject::kMapOffset), eax);
- Factory* factory = masm->isolate()->factory();
- __ mov(ecx, factory->empty_fixed_array());
- __ mov(Operand(ebx, JSObject::kPropertiesOffset), ecx);
- __ mov(Operand(ebx, JSObject::kElementsOffset), ecx);
- // Set extra fields in the newly allocated object.
- // eax: initial map
- // ebx: JSObject
- // edi: start of next object
- __ lea(ecx, Operand(ebx, JSObject::kHeaderSize));
- __ mov(edx, factory->undefined_value());
- if (count_constructions) {
- __ movzx_b(esi,
- FieldOperand(eax, Map::kPreAllocatedPropertyFieldsOffset));
- __ lea(esi,
- Operand(ebx, esi, times_pointer_size, JSObject::kHeaderSize));
- // esi: offset of first field after pre-allocated fields
- if (FLAG_debug_code) {
- __ cmp(esi, edi);
- __ Assert(less_equal,
- "Unexpected number of pre-allocated property fields.");
- }
- __ InitializeFieldsWithFiller(ecx, esi, edx);
- __ mov(edx, factory->one_pointer_filler_map());
- }
- __ InitializeFieldsWithFiller(ecx, edi, edx);
-
- // Add the object tag to make the JSObject real, so that we can continue
- // and jump into the continuation code at any time from now on. Any
- // failures need to undo the allocation, so that the heap is in a
- // consistent state and verifiable.
- // eax: initial map
- // ebx: JSObject
- // edi: start of next object
- __ or_(ebx, Immediate(kHeapObjectTag));
-
- // Check if a non-empty properties array is needed.
- // Allocate and initialize a FixedArray if it is.
- // eax: initial map
- // ebx: JSObject
- // edi: start of next object
- // Calculate the total number of properties described by the map.
- __ movzx_b(edx, FieldOperand(eax, Map::kUnusedPropertyFieldsOffset));
- __ movzx_b(ecx,
- FieldOperand(eax, Map::kPreAllocatedPropertyFieldsOffset));
- __ add(edx, ecx);
- // Calculate unused properties past the end of the in-object properties.
- __ movzx_b(ecx, FieldOperand(eax, Map::kInObjectPropertiesOffset));
- __ sub(edx, ecx);
- // Done if no extra properties are to be allocated.
- __ j(zero, &allocated);
- __ Assert(positive, "Property allocation count failed.");
-
- // Scale the number of elements by pointer size and add the header for
- // FixedArrays to the start of the next object calculation from above.
- // ebx: JSObject
- // edi: start of next object (will be start of FixedArray)
- // edx: number of elements in properties array
- __ AllocateInNewSpace(FixedArray::kHeaderSize,
- times_pointer_size,
- edx,
- REGISTER_VALUE_IS_INT32,
- edi,
- ecx,
- no_reg,
- &undo_allocation,
- RESULT_CONTAINS_TOP);
-
- // Initialize the FixedArray.
- // ebx: JSObject
- // edi: FixedArray
- // edx: number of elements
- // ecx: start of next object
- __ mov(eax, factory->fixed_array_map());
- __ mov(Operand(edi, FixedArray::kMapOffset), eax); // setup the map
- __ SmiTag(edx);
- __ mov(Operand(edi, FixedArray::kLengthOffset), edx); // and length
-
- // Initialize the fields to undefined.
- // ebx: JSObject
- // edi: FixedArray
- // ecx: start of next object
- { Label loop, entry;
- __ mov(edx, factory->undefined_value());
- __ lea(eax, Operand(edi, FixedArray::kHeaderSize));
- __ jmp(&entry);
- __ bind(&loop);
- __ mov(Operand(eax, 0), edx);
- __ add(eax, Immediate(kPointerSize));
- __ bind(&entry);
- __ cmp(eax, ecx);
- __ j(below, &loop);
- }
-
- // Store the initialized FixedArray into the properties field of
- // the JSObject
- // ebx: JSObject
- // edi: FixedArray
- __ or_(edi, Immediate(kHeapObjectTag)); // add the heap tag
- __ mov(FieldOperand(ebx, JSObject::kPropertiesOffset), edi);
-
-
- // Continue with JSObject being successfully allocated
- // ebx: JSObject
- __ jmp(&allocated);
-
- // Undo the setting of the new top so that the heap is verifiable. For
- // example, the map's unused properties potentially do not match the
- // allocated objects unused properties.
- // ebx: JSObject (previous new top)
- __ bind(&undo_allocation);
- __ UndoAllocationInNewSpace(ebx);
- }
-
- // Allocate the new receiver object using the runtime call.
- __ bind(&rt_call);
- // Must restore edi (constructor) before calling runtime.
- __ mov(edi, Operand(esp, 0));
- // edi: function (constructor)
- __ push(edi);
- __ CallRuntime(Runtime::kNewObject, 1);
- __ mov(ebx, eax); // store result in ebx
-
- // New object allocated.
- // ebx: newly allocated object
- __ bind(&allocated);
- // Retrieve the function from the stack.
- __ pop(edi);
-
- // Retrieve smi-tagged arguments count from the stack.
- __ mov(eax, Operand(esp, 0));
- __ SmiUntag(eax);
-
- // Push the allocated receiver to the stack. We need two copies
- // because we may have to return the original one and the calling
- // conventions dictate that the called function pops the receiver.
- __ push(ebx);
- __ push(ebx);
-
- // Set up pointer to last argument.
- __ lea(ebx, Operand(ebp, StandardFrameConstants::kCallerSPOffset));
-
- // Copy arguments and receiver to the expression stack.
- Label loop, entry;
- __ mov(ecx, eax);
- __ jmp(&entry);
- __ bind(&loop);
- __ push(Operand(ebx, ecx, times_4, 0));
- __ bind(&entry);
- __ dec(ecx);
- __ j(greater_equal, &loop);
-
- // Call the function.
- if (is_api_function) {
- __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
- Handle<Code> code =
- masm->isolate()->builtins()->HandleApiCallConstruct();
- ParameterCount expected(0);
- __ InvokeCode(code, expected, expected, RelocInfo::CODE_TARGET,
- CALL_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
- } else {
- ParameterCount actual(eax);
- __ InvokeFunction(edi, actual, CALL_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
- }
-
- // Store offset of return address for deoptimizer.
- if (!is_api_function && !count_constructions) {
- masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset());
- }
-
- // Restore context from the frame.
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
-
- // If the result is an object (in the ECMA sense), we should get rid
- // of the receiver and use the result; see ECMA-262 section 13.2.2-7
- // on page 74.
- Label use_receiver, exit;
-
- // If the result is a smi, it is *not* an object in the ECMA sense.
- __ JumpIfSmi(eax, &use_receiver);
-
- // If the type of the result (stored in its map) is less than
- // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense.
- __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
- __ j(above_equal, &exit);
-
- // Symbols are "objects".
- __ CmpInstanceType(ecx, SYMBOL_TYPE);
- __ j(equal, &exit);
-
- // Throw away the result of the constructor invocation and use the
- // on-stack receiver as the result.
- __ bind(&use_receiver);
- __ mov(eax, Operand(esp, 0));
-
- // Restore the arguments count and leave the construct frame.
- __ bind(&exit);
- __ mov(ebx, Operand(esp, kPointerSize)); // Get arguments count.
-
- // Leave construct frame.
- }
-
- // Remove caller arguments from the stack and return.
- STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
- __ pop(ecx);
- __ lea(esp, Operand(esp, ebx, times_2, 1 * kPointerSize)); // 1 ~ receiver
- __ push(ecx);
- __ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1);
- __ ret(0);
-}
-
-
-void Builtins::Generate_JSConstructStubCountdown(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, true);
-}
-
-
-void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, false);
-}
-
-
-void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, true, false);
-}
-
-
-static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
- bool is_construct) {
- // Clear the context before we push it when entering the internal frame.
- __ Set(esi, Immediate(0));
-
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Load the previous frame pointer (ebx) to access C arguments
- __ mov(ebx, Operand(ebp, 0));
-
- // Get the function from the frame and setup the context.
- __ mov(ecx, Operand(ebx, EntryFrameConstants::kFunctionArgOffset));
- __ mov(esi, FieldOperand(ecx, JSFunction::kContextOffset));
-
- // Push the function and the receiver onto the stack.
- __ push(ecx);
- __ push(Operand(ebx, EntryFrameConstants::kReceiverArgOffset));
-
- // Load the number of arguments and setup pointer to the arguments.
- __ mov(eax, Operand(ebx, EntryFrameConstants::kArgcOffset));
- __ mov(ebx, Operand(ebx, EntryFrameConstants::kArgvOffset));
-
- // Copy arguments to the stack in a loop.
- Label loop, entry;
- __ Set(ecx, Immediate(0));
- __ jmp(&entry);
- __ bind(&loop);
- __ mov(edx, Operand(ebx, ecx, times_4, 0)); // push parameter from argv
- __ push(Operand(edx, 0)); // dereference handle
- __ inc(ecx);
- __ bind(&entry);
- __ cmp(ecx, eax);
- __ j(not_equal, &loop);
-
- // Get the function from the stack and call it.
- // kPointerSize for the receiver.
- __ mov(edi, Operand(esp, eax, times_4, kPointerSize));
-
- // Invoke the code.
- if (is_construct) {
- CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
- __ CallStub(&stub);
- } else {
- ParameterCount actual(eax);
- __ InvokeFunction(edi, actual, CALL_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
- }
-
- // Exit the internal frame. Notice that this also removes the empty.
- // context and the function left on the stack by the code
- // invocation.
- }
- __ ret(kPointerSize); // Remove receiver.
-}
-
-
-void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) {
- Generate_JSEntryTrampolineHelper(masm, false);
-}
-
-
-void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
- Generate_JSEntryTrampolineHelper(masm, true);
-}
-
-
-void Builtins::Generate_LazyCompile(MacroAssembler* masm) {
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Push a copy of the function.
- __ push(edi);
- // Push call kind information.
- __ push(ecx);
-
- __ push(edi); // Function is also the parameter to the runtime call.
- __ CallRuntime(Runtime::kLazyCompile, 1);
-
- // Restore call kind information.
- __ pop(ecx);
- // Restore receiver.
- __ pop(edi);
-
- // Tear down internal frame.
- }
-
- // Do a tail-call of the compiled function.
- __ lea(eax, FieldOperand(eax, Code::kHeaderSize));
- __ jmp(eax);
-}
-
-
-void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Push a copy of the function onto the stack.
- __ push(edi);
- // Push call kind information.
- __ push(ecx);
-
- __ push(edi); // Function is also the parameter to the runtime call.
- __ CallRuntime(Runtime::kLazyRecompile, 1);
-
- // Restore call kind information.
- __ pop(ecx);
- // Restore receiver.
- __ pop(edi);
-
- // Tear down internal frame.
- }
-
- // Do a tail-call of the compiled function.
- __ lea(eax, FieldOperand(eax, Code::kHeaderSize));
- __ jmp(eax);
-}
-
-
-static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
- // For now, we are relying on the fact that make_code_young doesn't do any
- // garbage collection which allows us to save/restore the registers without
- // worrying about which of them contain pointers. We also don't build an
- // internal frame to make the code faster, since we shouldn't have to do stack
- // crawls in MakeCodeYoung. This seems a bit fragile.
-
- // Re-execute the code that was patched back to the young age when
- // the stub returns.
- __ sub(Operand(esp, 0), Immediate(5));
- __ pushad();
- __ mov(eax, Operand(esp, 8 * kPointerSize));
- {
- FrameScope scope(masm, StackFrame::MANUAL);
- __ PrepareCallCFunction(1, ebx);
- __ mov(Operand(esp, 0), eax);
- __ CallCFunction(
- ExternalReference::get_make_code_young_function(masm->isolate()), 1);
- }
- __ popad();
- __ ret(0);
-}
-
-#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C) \
-void Builtins::Generate_Make##C##CodeYoungAgainEvenMarking( \
- MacroAssembler* masm) { \
- GenerateMakeCodeYoungAgainCommon(masm); \
-} \
-void Builtins::Generate_Make##C##CodeYoungAgainOddMarking( \
- MacroAssembler* masm) { \
- GenerateMakeCodeYoungAgainCommon(masm); \
-}
-CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
-#undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
-
-
-void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
- // Enter an internal frame.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Preserve registers across notification, this is important for compiled
- // stubs that tail call the runtime on deopts passing their parameters in
- // registers.
- __ pushad();
- __ CallRuntime(Runtime::kNotifyStubFailure, 0);
- __ popad();
- // Tear down internal frame.
- }
-
- __ pop(MemOperand(esp, 0)); // Ignore state offset
- __ ret(0); // Return to IC Miss stub, continuation still on stack.
-}
-
-
-static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
- Deoptimizer::BailoutType type) {
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Pass deoptimization type to the runtime system.
- __ push(Immediate(Smi::FromInt(static_cast<int>(type))));
- __ CallRuntime(Runtime::kNotifyDeoptimized, 1);
-
- // Tear down internal frame.
- }
-
- // Get the full codegen state from the stack and untag it.
- __ mov(ecx, Operand(esp, 1 * kPointerSize));
- __ SmiUntag(ecx);
-
- // Switch on the state.
- Label not_no_registers, not_tos_eax;
- __ cmp(ecx, FullCodeGenerator::NO_REGISTERS);
- __ j(not_equal, &not_no_registers, Label::kNear);
- __ ret(1 * kPointerSize); // Remove state.
-
- __ bind(&not_no_registers);
- __ mov(eax, Operand(esp, 2 * kPointerSize));
- __ cmp(ecx, FullCodeGenerator::TOS_REG);
- __ j(not_equal, &not_tos_eax, Label::kNear);
- __ ret(2 * kPointerSize); // Remove state, eax.
-
- __ bind(&not_tos_eax);
- __ Abort("no cases left");
-}
-
-
-void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
- Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
-}
-
-
-void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
- Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
-}
-
-
-void Builtins::Generate_NotifyOSR(MacroAssembler* masm) {
- // TODO(kasperl): Do we need to save/restore the XMM registers too?
-
- // For now, we are relying on the fact that Runtime::NotifyOSR
- // doesn't do any garbage collection which allows us to save/restore
- // the registers without worrying about which of them contain
- // pointers. This seems a bit fragile.
- __ pushad();
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kNotifyOSR, 0);
- }
- __ popad();
- __ ret(0);
-}
-
-
-void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
- Factory* factory = masm->isolate()->factory();
-
- // 1. Make sure we have at least one argument.
- { Label done;
- __ test(eax, eax);
- __ j(not_zero, &done);
- __ pop(ebx);
- __ push(Immediate(factory->undefined_value()));
- __ push(ebx);
- __ inc(eax);
- __ bind(&done);
- }
-
- // 2. Get the function to call (passed as receiver) from the stack, check
- // if it is a function.
- Label slow, non_function;
- // 1 ~ return address.
- __ mov(edi, Operand(esp, eax, times_4, 1 * kPointerSize));
- __ JumpIfSmi(edi, &non_function);
- __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
- __ j(not_equal, &slow);
-
-
- // 3a. Patch the first argument if necessary when calling a function.
- Label shift_arguments;
- __ Set(edx, Immediate(0)); // indicate regular JS_FUNCTION
- { Label convert_to_object, use_global_receiver, patch_receiver;
- // Change context eagerly in case we need the global receiver.
- __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
-
- // Do not transform the receiver for strict mode functions.
- __ mov(ebx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ test_b(FieldOperand(ebx, SharedFunctionInfo::kStrictModeByteOffset),
- 1 << SharedFunctionInfo::kStrictModeBitWithinByte);
- __ j(not_equal, &shift_arguments);
-
- // Do not transform the receiver for natives (shared already in ebx).
- __ test_b(FieldOperand(ebx, SharedFunctionInfo::kNativeByteOffset),
- 1 << SharedFunctionInfo::kNativeBitWithinByte);
- __ j(not_equal, &shift_arguments);
-
- // Compute the receiver in non-strict mode.
- __ mov(ebx, Operand(esp, eax, times_4, 0)); // First argument.
-
- // Call ToObject on the receiver if it is not an object, or use the
- // global object if it is null or undefined.
- __ JumpIfSmi(ebx, &convert_to_object);
- __ cmp(ebx, factory->null_value());
- __ j(equal, &use_global_receiver);
- __ cmp(ebx, factory->undefined_value());
- __ j(equal, &use_global_receiver);
- STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
- __ CmpObjectType(ebx, FIRST_SPEC_OBJECT_TYPE, ecx);
- __ j(above_equal, &shift_arguments);
-
- __ bind(&convert_to_object);
-
- { // In order to preserve argument count.
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ SmiTag(eax);
- __ push(eax);
-
- __ push(ebx);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
- __ mov(ebx, eax);
- __ Set(edx, Immediate(0)); // restore
-
- __ pop(eax);
- __ SmiUntag(eax);
- }
-
- // Restore the function to edi.
- __ mov(edi, Operand(esp, eax, times_4, 1 * kPointerSize));
- __ jmp(&patch_receiver);
-
- // Use the global receiver object from the called function as the
- // receiver.
- __ bind(&use_global_receiver);
- const int kGlobalIndex =
- Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
- __ mov(ebx, FieldOperand(esi, kGlobalIndex));
- __ mov(ebx, FieldOperand(ebx, GlobalObject::kNativeContextOffset));
- __ mov(ebx, FieldOperand(ebx, kGlobalIndex));
- __ mov(ebx, FieldOperand(ebx, GlobalObject::kGlobalReceiverOffset));
-
- __ bind(&patch_receiver);
- __ mov(Operand(esp, eax, times_4, 0), ebx);
-
- __ jmp(&shift_arguments);
- }
-
- // 3b. Check for function proxy.
- __ bind(&slow);
- __ Set(edx, Immediate(1)); // indicate function proxy
- __ CmpInstanceType(ecx, JS_FUNCTION_PROXY_TYPE);
- __ j(equal, &shift_arguments);
- __ bind(&non_function);
- __ Set(edx, Immediate(2)); // indicate non-function
-
- // 3c. Patch the first argument when calling a non-function. The
- // CALL_NON_FUNCTION builtin expects the non-function callee as
- // receiver, so overwrite the first argument which will ultimately
- // become the receiver.
- __ mov(Operand(esp, eax, times_4, 0), edi);
-
- // 4. Shift arguments and return address one slot down on the stack
- // (overwriting the original receiver). Adjust argument count to make
- // the original first argument the new receiver.
- __ bind(&shift_arguments);
- { Label loop;
- __ mov(ecx, eax);
- __ bind(&loop);
- __ mov(ebx, Operand(esp, ecx, times_4, 0));
- __ mov(Operand(esp, ecx, times_4, kPointerSize), ebx);
- __ dec(ecx);
- __ j(not_sign, &loop); // While non-negative (to copy return address).
- __ pop(ebx); // Discard copy of return address.
- __ dec(eax); // One fewer argument (first argument is new receiver).
- }
-
- // 5a. Call non-function via tail call to CALL_NON_FUNCTION builtin,
- // or a function proxy via CALL_FUNCTION_PROXY.
- { Label function, non_proxy;
- __ test(edx, edx);
- __ j(zero, &function);
- __ Set(ebx, Immediate(0));
- __ cmp(edx, Immediate(1));
- __ j(not_equal, &non_proxy);
-
- __ pop(edx); // return address
- __ push(edi); // re-add proxy object as additional argument
- __ push(edx);
- __ inc(eax);
- __ SetCallKind(ecx, CALL_AS_FUNCTION);
- __ GetBuiltinEntry(edx, Builtins::CALL_FUNCTION_PROXY);
- __ jmp(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
- RelocInfo::CODE_TARGET);
-
- __ bind(&non_proxy);
- __ SetCallKind(ecx, CALL_AS_METHOD);
- __ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION);
- __ jmp(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
- RelocInfo::CODE_TARGET);
- __ bind(&function);
- }
-
- // 5b. Get the code to call from the function and check that the number of
- // expected arguments matches what we're providing. If so, jump
- // (tail-call) to the code in register edx without checking arguments.
- __ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ mov(ebx,
- FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset));
- __ mov(edx, FieldOperand(edi, JSFunction::kCodeEntryOffset));
- __ SmiUntag(ebx);
- __ SetCallKind(ecx, CALL_AS_METHOD);
- __ cmp(eax, ebx);
- __ j(not_equal,
- masm->isolate()->builtins()->ArgumentsAdaptorTrampoline());
-
- ParameterCount expected(0);
- __ InvokeCode(edx, expected, expected, JUMP_FUNCTION, NullCallWrapper(),
- CALL_AS_METHOD);
-}
-
-
-void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
- static const int kArgumentsOffset = 2 * kPointerSize;
- static const int kReceiverOffset = 3 * kPointerSize;
- static const int kFunctionOffset = 4 * kPointerSize;
- {
- FrameScope frame_scope(masm, StackFrame::INTERNAL);
-
- __ push(Operand(ebp, kFunctionOffset)); // push this
- __ push(Operand(ebp, kArgumentsOffset)); // push arguments
- __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
-
- // Check the stack for overflow. We are not trying to catch
- // interruptions (e.g. debug break and preemption) here, so the "real stack
- // limit" is checked.
- Label okay;
- ExternalReference real_stack_limit =
- ExternalReference::address_of_real_stack_limit(masm->isolate());
- __ mov(edi, Operand::StaticVariable(real_stack_limit));
- // Make ecx the space we have left. The stack might already be overflowed
- // here which will cause ecx to become negative.
- __ mov(ecx, esp);
- __ sub(ecx, edi);
- // Make edx the space we need for the array when it is unrolled onto the
- // stack.
- __ mov(edx, eax);
- __ shl(edx, kPointerSizeLog2 - kSmiTagSize);
- // Check if the arguments will overflow the stack.
- __ cmp(ecx, edx);
- __ j(greater, &okay); // Signed comparison.
-
- // Out of stack space.
- __ push(Operand(ebp, 4 * kPointerSize)); // push this
- __ push(eax);
- __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
- __ bind(&okay);
- // End of stack check.
-
- // Push current index and limit.
- const int kLimitOffset =
- StandardFrameConstants::kExpressionsOffset - 1 * kPointerSize;
- const int kIndexOffset = kLimitOffset - 1 * kPointerSize;
- __ push(eax); // limit
- __ push(Immediate(0)); // index
-
- // Get the receiver.
- __ mov(ebx, Operand(ebp, kReceiverOffset));
-
- // Check that the function is a JS function (otherwise it must be a proxy).
- Label push_receiver;
- __ mov(edi, Operand(ebp, kFunctionOffset));
- __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
- __ j(not_equal, &push_receiver);
-
- // Change context eagerly to get the right global object if necessary.
- __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
-
- // Compute the receiver.
- // Do not transform the receiver for strict mode functions.
- Label call_to_object, use_global_receiver;
- __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ test_b(FieldOperand(ecx, SharedFunctionInfo::kStrictModeByteOffset),
- 1 << SharedFunctionInfo::kStrictModeBitWithinByte);
- __ j(not_equal, &push_receiver);
-
- Factory* factory = masm->isolate()->factory();
-
- // Do not transform the receiver for natives (shared already in ecx).
- __ test_b(FieldOperand(ecx, SharedFunctionInfo::kNativeByteOffset),
- 1 << SharedFunctionInfo::kNativeBitWithinByte);
- __ j(not_equal, &push_receiver);
-
- // Compute the receiver in non-strict mode.
- // Call ToObject on the receiver if it is not an object, or use the
- // global object if it is null or undefined.
- __ JumpIfSmi(ebx, &call_to_object);
- __ cmp(ebx, factory->null_value());
- __ j(equal, &use_global_receiver);
- __ cmp(ebx, factory->undefined_value());
- __ j(equal, &use_global_receiver);
- STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
- __ CmpObjectType(ebx, FIRST_SPEC_OBJECT_TYPE, ecx);
- __ j(above_equal, &push_receiver);
-
- __ bind(&call_to_object);
- __ push(ebx);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
- __ mov(ebx, eax);
- __ jmp(&push_receiver);
-
- // Use the current global receiver object as the receiver.
- __ bind(&use_global_receiver);
- const int kGlobalOffset =
- Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
- __ mov(ebx, FieldOperand(esi, kGlobalOffset));
- __ mov(ebx, FieldOperand(ebx, GlobalObject::kNativeContextOffset));
- __ mov(ebx, FieldOperand(ebx, kGlobalOffset));
- __ mov(ebx, FieldOperand(ebx, GlobalObject::kGlobalReceiverOffset));
-
- // Push the receiver.
- __ bind(&push_receiver);
- __ push(ebx);
-
- // Copy all arguments from the array to the stack.
- Label entry, loop;
- __ mov(ecx, Operand(ebp, kIndexOffset));
- __ jmp(&entry);
- __ bind(&loop);
- __ mov(edx, Operand(ebp, kArgumentsOffset)); // load arguments
-
- // Use inline caching to speed up access to arguments.
- Handle<Code> ic = masm->isolate()->builtins()->KeyedLoadIC_Initialize();
- __ call(ic, RelocInfo::CODE_TARGET);
- // It is important that we do not have a test instruction after the
- // call. A test instruction after the call is used to indicate that
- // we have generated an inline version of the keyed load. In this
- // case, we know that we are not generating a test instruction next.
-
- // Push the nth argument.
- __ push(eax);
-
- // Update the index on the stack and in register eax.
- __ mov(ecx, Operand(ebp, kIndexOffset));
- __ add(ecx, Immediate(1 << kSmiTagSize));
- __ mov(Operand(ebp, kIndexOffset), ecx);
-
- __ bind(&entry);
- __ cmp(ecx, Operand(ebp, kLimitOffset));
- __ j(not_equal, &loop);
-
- // Invoke the function.
- Label call_proxy;
- __ mov(eax, ecx);
- ParameterCount actual(eax);
- __ SmiUntag(eax);
- __ mov(edi, Operand(ebp, kFunctionOffset));
- __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
- __ j(not_equal, &call_proxy);
- __ InvokeFunction(edi, actual, CALL_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
-
- frame_scope.GenerateLeaveFrame();
- __ ret(3 * kPointerSize); // remove this, receiver, and arguments
-
- // Invoke the function proxy.
- __ bind(&call_proxy);
- __ push(edi); // add function proxy as last argument
- __ inc(eax);
- __ Set(ebx, Immediate(0));
- __ SetCallKind(ecx, CALL_AS_METHOD);
- __ GetBuiltinEntry(edx, Builtins::CALL_FUNCTION_PROXY);
- __ call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
- RelocInfo::CODE_TARGET);
-
- // Leave internal frame.
- }
- __ ret(3 * kPointerSize); // remove this, receiver, and arguments
-}
-
-
-// Allocate an empty JSArray. The allocated array is put into the result
-// register. If the parameter initial_capacity is larger than zero an elements
-// backing store is allocated with this size and filled with the hole values.
-// Otherwise the elements backing store is set to the empty FixedArray.
-static void AllocateEmptyJSArray(MacroAssembler* masm,
- Register array_function,
- Register result,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* gc_required) {
- const int initial_capacity = JSArray::kPreallocatedArrayElements;
- STATIC_ASSERT(initial_capacity >= 0);
-
- __ LoadInitialArrayMap(array_function, scratch2, scratch1, false);
-
- // Allocate the JSArray object together with space for a fixed array with the
- // requested elements.
- int size = JSArray::kSize;
- if (initial_capacity > 0) {
- size += FixedArray::SizeFor(initial_capacity);
- }
- __ AllocateInNewSpace(size,
- result,
- scratch2,
- scratch3,
- gc_required,
- TAG_OBJECT);
-
- // Allocated the JSArray. Now initialize the fields except for the elements
- // array.
- // result: JSObject
- // scratch1: initial map
- // scratch2: start of next object
- __ mov(FieldOperand(result, JSObject::kMapOffset), scratch1);
- Factory* factory = masm->isolate()->factory();
- __ mov(FieldOperand(result, JSArray::kPropertiesOffset),
- factory->empty_fixed_array());
- // Field JSArray::kElementsOffset is initialized later.
- __ mov(FieldOperand(result, JSArray::kLengthOffset), Immediate(0));
-
- // If no storage is requested for the elements array just set the empty
- // fixed array.
- if (initial_capacity == 0) {
- __ mov(FieldOperand(result, JSArray::kElementsOffset),
- factory->empty_fixed_array());
- return;
- }
-
- // Calculate the location of the elements array and set elements array member
- // of the JSArray.
- // result: JSObject
- // scratch2: start of next object
- __ lea(scratch1, Operand(result, JSArray::kSize));
- __ mov(FieldOperand(result, JSArray::kElementsOffset), scratch1);
-
- // Initialize the FixedArray and fill it with holes. FixedArray length is
- // stored as a smi.
- // result: JSObject
- // scratch1: elements array
- // scratch2: start of next object
- __ mov(FieldOperand(scratch1, FixedArray::kMapOffset),
- factory->fixed_array_map());
- __ mov(FieldOperand(scratch1, FixedArray::kLengthOffset),
- Immediate(Smi::FromInt(initial_capacity)));
-
- // Fill the FixedArray with the hole value. Inline the code if short.
- // Reconsider loop unfolding if kPreallocatedArrayElements gets changed.
- static const int kLoopUnfoldLimit = 4;
- if (initial_capacity <= kLoopUnfoldLimit) {
- // Use a scratch register here to have only one reloc info when unfolding
- // the loop.
- __ mov(scratch3, factory->the_hole_value());
- for (int i = 0; i < initial_capacity; i++) {
- __ mov(FieldOperand(scratch1,
- FixedArray::kHeaderSize + i * kPointerSize),
- scratch3);
- }
- } else {
- Label loop, entry;
- __ mov(scratch2, Immediate(initial_capacity));
- __ jmp(&entry);
- __ bind(&loop);
- __ mov(FieldOperand(scratch1,
- scratch2,
- times_pointer_size,
- FixedArray::kHeaderSize),
- factory->the_hole_value());
- __ bind(&entry);
- __ dec(scratch2);
- __ j(not_sign, &loop);
- }
-}
-
-
-// Allocate a JSArray with the number of elements stored in a register. The
-// register array_function holds the built-in Array function and the register
-// array_size holds the size of the array as a smi. The allocated array is put
-// into the result register and beginning and end of the FixedArray elements
-// storage is put into registers elements_array and elements_array_end (see
-// below for when that is not the case). If the parameter fill_with_holes is
-// true the allocated elements backing store is filled with the hole values
-// otherwise it is left uninitialized. When the backing store is filled the
-// register elements_array is scratched.
-static void AllocateJSArray(MacroAssembler* masm,
- Register array_function, // Array function.
- Register array_size, // As a smi, cannot be 0.
- Register result,
- Register elements_array,
- Register elements_array_end,
- Register scratch,
- bool fill_with_hole,
- Label* gc_required) {
- ASSERT(scratch.is(edi)); // rep stos destination
- ASSERT(!fill_with_hole || array_size.is(ecx)); // rep stos count
- ASSERT(!fill_with_hole || !result.is(eax)); // result is never eax
-
- __ LoadInitialArrayMap(array_function, scratch,
- elements_array, fill_with_hole);
-
- // Allocate the JSArray object together with space for a FixedArray with the
- // requested elements.
- STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
- __ AllocateInNewSpace(JSArray::kSize + FixedArray::kHeaderSize,
- times_pointer_size,
- array_size,
- REGISTER_VALUE_IS_SMI,
- result,
- elements_array_end,
- scratch,
- gc_required,
- TAG_OBJECT);
-
- // Allocated the JSArray. Now initialize the fields except for the elements
- // array.
- // result: JSObject
- // elements_array: initial map
- // elements_array_end: start of next object
- // array_size: size of array (smi)
- __ mov(FieldOperand(result, JSObject::kMapOffset), elements_array);
- Factory* factory = masm->isolate()->factory();
- __ mov(elements_array, factory->empty_fixed_array());
- __ mov(FieldOperand(result, JSArray::kPropertiesOffset), elements_array);
- // Field JSArray::kElementsOffset is initialized later.
- __ mov(FieldOperand(result, JSArray::kLengthOffset), array_size);
-
- // Calculate the location of the elements array and set elements array member
- // of the JSArray.
- // result: JSObject
- // elements_array_end: start of next object
- // array_size: size of array (smi)
- __ lea(elements_array, Operand(result, JSArray::kSize));
- __ mov(FieldOperand(result, JSArray::kElementsOffset), elements_array);
-
- // Initialize the fixed array. FixedArray length is stored as a smi.
- // result: JSObject
- // elements_array: elements array
- // elements_array_end: start of next object
- // array_size: size of array (smi)
- __ mov(FieldOperand(elements_array, FixedArray::kMapOffset),
- factory->fixed_array_map());
- // For non-empty JSArrays the length of the FixedArray and the JSArray is the
- // same.
- __ mov(FieldOperand(elements_array, FixedArray::kLengthOffset), array_size);
-
- // Fill the allocated FixedArray with the hole value if requested.
- // result: JSObject
- // elements_array: elements array
- if (fill_with_hole) {
- __ SmiUntag(array_size);
- __ lea(edi, Operand(elements_array,
- FixedArray::kHeaderSize - kHeapObjectTag));
- __ mov(eax, factory->the_hole_value());
- __ cld();
- // Do not use rep stos when filling less than kRepStosThreshold
- // words.
- const int kRepStosThreshold = 16;
- Label loop, entry, done;
- __ cmp(ecx, kRepStosThreshold);
- __ j(below, &loop); // Note: ecx > 0.
- __ rep_stos();
- __ jmp(&done);
- __ bind(&loop);
- __ stos();
- __ bind(&entry);
- __ cmp(edi, elements_array_end);
- __ j(below, &loop);
- __ bind(&done);
- }
-}
-
-
-// Create a new array for the built-in Array function. This function allocates
-// the JSArray object and the FixedArray elements array and initializes these.
-// If the Array cannot be constructed in native code the runtime is called. This
-// function assumes the following state:
-// edi: constructor (built-in Array function)
-// eax: argc
-// esp[0]: return address
-// esp[4]: last argument
-// This function is used for both construct and normal calls of Array. Whether
-// it is a construct call or not is indicated by the construct_call parameter.
-// The only difference between handling a construct call and a normal call is
-// that for a construct call the constructor function in edi needs to be
-// preserved for entering the generic code. In both cases argc in eax needs to
-// be preserved.
-static void ArrayNativeCode(MacroAssembler* masm,
- bool construct_call,
- Label* call_generic_code) {
- Label argc_one_or_more, argc_two_or_more, prepare_generic_code_call,
- empty_array, not_empty_array, finish, cant_transition_map, not_double;
-
- // Push the constructor and argc. No need to tag argc as a smi, as there will
- // be no garbage collection with this on the stack.
- int push_count = 0;
- if (construct_call) {
- push_count++;
- __ push(edi);
- }
- push_count++;
- __ push(eax);
-
- // Check for array construction with zero arguments.
- __ test(eax, eax);
- __ j(not_zero, &argc_one_or_more);
-
- __ bind(&empty_array);
- // Handle construction of an empty array.
- AllocateEmptyJSArray(masm,
- edi,
- eax,
- ebx,
- ecx,
- edi,
- &prepare_generic_code_call);
- __ IncrementCounter(masm->isolate()->counters()->array_function_native(), 1);
- __ pop(ebx);
- if (construct_call) {
- __ pop(edi);
- }
- __ ret(kPointerSize);
-
- // Check for one argument. Bail out if argument is not smi or if it is
- // negative.
- __ bind(&argc_one_or_more);
- __ cmp(eax, 1);
- __ j(not_equal, &argc_two_or_more);
- STATIC_ASSERT(kSmiTag == 0);
- __ mov(ecx, Operand(esp, (push_count + 1) * kPointerSize));
- __ test(ecx, ecx);
- __ j(not_zero, &not_empty_array);
-
- // The single argument passed is zero, so we jump to the code above used to
- // handle the case of no arguments passed. To adapt the stack for that we move
- // the return address and the pushed constructor (if pushed) one stack slot up
- // thereby removing the passed argument. Argc is also on the stack - at the
- // bottom - and it needs to be changed from 1 to 0 to have the call into the
- // runtime system work in case a GC is required.
- for (int i = push_count; i > 0; i--) {
- __ mov(eax, Operand(esp, i * kPointerSize));
- __ mov(Operand(esp, (i + 1) * kPointerSize), eax);
- }
- __ Drop(2); // Drop two stack slots.
- __ push(Immediate(0)); // Treat this as a call with argc of zero.
- __ jmp(&empty_array);
-
- __ bind(&not_empty_array);
- __ test(ecx, Immediate(kIntptrSignBit | kSmiTagMask));
- __ j(not_zero, &prepare_generic_code_call);
-
- // Handle construction of an empty array of a certain size. Get the size from
- // the stack and bail out if size is to large to actually allocate an elements
- // array.
- __ cmp(ecx, JSObject::kInitialMaxFastElementArray << kSmiTagSize);
- __ j(greater_equal, &prepare_generic_code_call);
-
- // edx: array_size (smi)
- // edi: constructor
- // esp[0]: argc (cannot be 0 here)
- // esp[4]: constructor (only if construct_call)
- // esp[8]: return address
- // esp[C]: argument
- AllocateJSArray(masm,
- edi,
- ecx,
- ebx,
- eax,
- edx,
- edi,
- true,
- &prepare_generic_code_call);
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->array_function_native(), 1);
- __ mov(eax, ebx);
- __ pop(ebx);
- if (construct_call) {
- __ pop(edi);
- }
- __ ret(2 * kPointerSize);
-
- // Handle construction of an array from a list of arguments.
- __ bind(&argc_two_or_more);
- STATIC_ASSERT(kSmiTag == 0);
- __ SmiTag(eax); // Convet argc to a smi.
- // eax: array_size (smi)
- // edi: constructor
- // esp[0] : argc
- // esp[4]: constructor (only if construct_call)
- // esp[8] : return address
- // esp[C] : last argument
- AllocateJSArray(masm,
- edi,
- eax,
- ebx,
- ecx,
- edx,
- edi,
- false,
- &prepare_generic_code_call);
- __ IncrementCounter(counters->array_function_native(), 1);
- __ push(ebx);
- __ mov(ebx, Operand(esp, kPointerSize));
- // ebx: argc
- // edx: elements_array_end (untagged)
- // esp[0]: JSArray
- // esp[4]: argc
- // esp[8]: constructor (only if construct_call)
- // esp[12]: return address
- // esp[16]: last argument
-
- // Location of the last argument
- int last_arg_offset = (construct_call ? 4 : 3) * kPointerSize;
- __ lea(edi, Operand(esp, last_arg_offset));
-
- // Location of the first array element (Parameter fill_with_holes to
- // AllocateJSArray is false, so the FixedArray is returned in ecx).
- __ lea(edx, Operand(ecx, FixedArray::kHeaderSize - kHeapObjectTag));
-
- Label has_non_smi_element;
-
- // ebx: argc
- // edx: location of the first array element
- // edi: location of the last argument
- // esp[0]: JSArray
- // esp[4]: argc
- // esp[8]: constructor (only if construct_call)
- // esp[12]: return address
- // esp[16]: last argument
- Label loop, entry;
- __ mov(ecx, ebx);
- __ jmp(&entry);
- __ bind(&loop);
- __ mov(eax, Operand(edi, ecx, times_pointer_size, 0));
- if (FLAG_smi_only_arrays) {
- __ JumpIfNotSmi(eax, &has_non_smi_element);
- }
- __ mov(Operand(edx, 0), eax);
- __ add(edx, Immediate(kPointerSize));
- __ bind(&entry);
- __ dec(ecx);
- __ j(greater_equal, &loop);
-
- // Remove caller arguments from the stack and return.
- // ebx: argc
- // esp[0]: JSArray
- // esp[4]: argc
- // esp[8]: constructor (only if construct_call)
- // esp[12]: return address
- // esp[16]: last argument
- __ bind(&finish);
- __ mov(ecx, Operand(esp, last_arg_offset - kPointerSize));
- __ pop(eax);
- __ pop(ebx);
- __ lea(esp, Operand(esp, ebx, times_pointer_size,
- last_arg_offset - kPointerSize));
- __ jmp(ecx);
-
- __ bind(&has_non_smi_element);
- // Double values are handled by the runtime.
- __ CheckMap(eax,
- masm->isolate()->factory()->heap_number_map(),
- &not_double,
- DONT_DO_SMI_CHECK);
- __ bind(&cant_transition_map);
- // Throw away the array that's only been partially constructed.
- __ pop(eax);
- __ UndoAllocationInNewSpace(eax);
- __ jmp(&prepare_generic_code_call);
-
- __ bind(&not_double);
- // Transition FAST_SMI_ELEMENTS to FAST_ELEMENTS.
- __ mov(ebx, Operand(esp, 0));
- __ mov(edi, FieldOperand(ebx, HeapObject::kMapOffset));
- __ LoadTransitionedArrayMapConditional(
- FAST_SMI_ELEMENTS,
- FAST_ELEMENTS,
- edi,
- eax,
- &cant_transition_map);
- __ mov(FieldOperand(ebx, HeapObject::kMapOffset), edi);
- __ RecordWriteField(ebx, HeapObject::kMapOffset, edi, eax,
- kDontSaveFPRegs, OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-
- // Prepare to re-enter the loop
- __ lea(edi, Operand(esp, last_arg_offset));
-
- // Finish the array initialization loop.
- Label loop2;
- __ bind(&loop2);
- __ mov(eax, Operand(edi, ecx, times_pointer_size, 0));
- __ mov(Operand(edx, 0), eax);
- __ add(edx, Immediate(kPointerSize));
- __ dec(ecx);
- __ j(greater_equal, &loop2);
- __ jmp(&finish);
-
- // Restore argc and constructor before running the generic code.
- __ bind(&prepare_generic_code_call);
- __ pop(eax);
- if (construct_call) {
- __ pop(edi);
- }
- __ jmp(call_generic_code);
-}
-
-
-void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax : argc
- // -- esp[0] : return address
- // -- esp[4] : last argument
- // -----------------------------------
- Label generic_array_code;
-
- // Get the InternalArray function.
- __ LoadGlobalFunction(Context::INTERNAL_ARRAY_FUNCTION_INDEX, edi);
-
- if (FLAG_debug_code) {
- // Initial map for the builtin InternalArray function should be a map.
- __ mov(ebx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
- // Will both indicate a NULL and a Smi.
- __ test(ebx, Immediate(kSmiTagMask));
- __ Assert(not_zero, "Unexpected initial map for InternalArray function");
- __ CmpObjectType(ebx, MAP_TYPE, ecx);
- __ Assert(equal, "Unexpected initial map for InternalArray function");
- }
-
- // Run the native code for the InternalArray function called as a normal
- // function.
- ArrayNativeCode(masm, false, &generic_array_code);
-
- // Jump to the generic internal array code in case the specialized code cannot
- // handle the construction.
- __ bind(&generic_array_code);
- Handle<Code> array_code =
- masm->isolate()->builtins()->InternalArrayCodeGeneric();
- __ jmp(array_code, RelocInfo::CODE_TARGET);
-}
-
-
-void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax : argc
- // -- esp[0] : return address
- // -- esp[4] : last argument
- // -----------------------------------
- Label generic_array_code;
-
- // Get the Array function.
- __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, edi);
-
- if (FLAG_debug_code) {
- // Initial map for the builtin Array function should be a map.
- __ mov(ebx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
- // Will both indicate a NULL and a Smi.
- __ test(ebx, Immediate(kSmiTagMask));
- __ Assert(not_zero, "Unexpected initial map for Array function");
- __ CmpObjectType(ebx, MAP_TYPE, ecx);
- __ Assert(equal, "Unexpected initial map for Array function");
- }
-
- // Run the native code for the Array function called as a normal function.
- ArrayNativeCode(masm, false, &generic_array_code);
-
- // Jump to the generic array code in case the specialized code cannot handle
- // the construction.
- __ bind(&generic_array_code);
- Handle<Code> array_code =
- masm->isolate()->builtins()->ArrayCodeGeneric();
- __ jmp(array_code, RelocInfo::CODE_TARGET);
-}
-
-
-void Builtins::Generate_ArrayConstructCode(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax : argc
- // -- ebx : type info cell
- // -- edi : constructor
- // -- esp[0] : return address
- // -- esp[4] : last argument
- // -----------------------------------
- if (FLAG_debug_code) {
- // The array construct code is only set for the global and natives
- // builtin Array functions which always have maps.
-
- // Initial map for the builtin Array function should be a map.
- __ mov(ecx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
- // Will both indicate a NULL and a Smi.
- __ test(ecx, Immediate(kSmiTagMask));
- __ Assert(not_zero, "Unexpected initial map for Array function");
- __ CmpObjectType(ecx, MAP_TYPE, ecx);
- __ Assert(equal, "Unexpected initial map for Array function");
-
- if (FLAG_optimize_constructed_arrays) {
- // We should either have undefined in ebx or a valid jsglobalpropertycell
- Label okay_here;
- Handle<Object> undefined_sentinel(
- masm->isolate()->heap()->undefined_value(), masm->isolate());
- Handle<Map> global_property_cell_map(
- masm->isolate()->heap()->global_property_cell_map());
- __ cmp(ebx, Immediate(undefined_sentinel));
- __ j(equal, &okay_here);
- __ cmp(FieldOperand(ebx, 0), Immediate(global_property_cell_map));
- __ Assert(equal, "Expected property cell in register ebx");
- __ bind(&okay_here);
- }
- }
-
- if (FLAG_optimize_constructed_arrays) {
- Label not_zero_case, not_one_case;
- __ test(eax, eax);
- __ j(not_zero, &not_zero_case);
- ArrayNoArgumentConstructorStub no_argument_stub;
- __ TailCallStub(&no_argument_stub);
-
- __ bind(&not_zero_case);
- __ cmp(eax, 1);
- __ j(greater, &not_one_case);
- ArraySingleArgumentConstructorStub single_argument_stub;
- __ TailCallStub(&single_argument_stub);
-
- __ bind(&not_one_case);
- ArrayNArgumentsConstructorStub n_argument_stub;
- __ TailCallStub(&n_argument_stub);
- } else {
- Label generic_constructor;
- // Run the native code for the Array function called as constructor.
- ArrayNativeCode(masm, true, &generic_constructor);
-
- // Jump to the generic construct code in case the specialized code cannot
- // handle the construction.
- __ bind(&generic_constructor);
- Handle<Code> generic_construct_stub =
- masm->isolate()->builtins()->JSConstructStubGeneric();
- __ jmp(generic_construct_stub, RelocInfo::CODE_TARGET);
- }
-}
-
-
-void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax : number of arguments
- // -- edi : constructor function
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- esp[(argc + 1) * 4] : receiver
- // -----------------------------------
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->string_ctor_calls(), 1);
-
- if (FLAG_debug_code) {
- __ LoadGlobalFunction(Context::STRING_FUNCTION_INDEX, ecx);
- __ cmp(edi, ecx);
- __ Assert(equal, "Unexpected String function");
- }
-
- // Load the first argument into eax and get rid of the rest
- // (including the receiver).
- Label no_arguments;
- __ test(eax, eax);
- __ j(zero, &no_arguments);
- __ mov(ebx, Operand(esp, eax, times_pointer_size, 0));
- __ pop(ecx);
- __ lea(esp, Operand(esp, eax, times_pointer_size, kPointerSize));
- __ push(ecx);
- __ mov(eax, ebx);
-
- // Lookup the argument in the number to string cache.
- Label not_cached, argument_is_string;
- NumberToStringStub::GenerateLookupNumberStringCache(
- masm,
- eax, // Input.
- ebx, // Result.
- ecx, // Scratch 1.
- edx, // Scratch 2.
- false, // Input is known to be smi?
- &not_cached);
- __ IncrementCounter(counters->string_ctor_cached_number(), 1);
- __ bind(&argument_is_string);
- // ----------- S t a t e -------------
- // -- ebx : argument converted to string
- // -- edi : constructor function
- // -- esp[0] : return address
- // -----------------------------------
-
- // Allocate a JSValue and put the tagged pointer into eax.
- Label gc_required;
- __ AllocateInNewSpace(JSValue::kSize,
- eax, // Result.
- ecx, // New allocation top (we ignore it).
- no_reg,
- &gc_required,
- TAG_OBJECT);
-
- // Set the map.
- __ LoadGlobalFunctionInitialMap(edi, ecx);
- if (FLAG_debug_code) {
- __ cmpb(FieldOperand(ecx, Map::kInstanceSizeOffset),
- JSValue::kSize >> kPointerSizeLog2);
- __ Assert(equal, "Unexpected string wrapper instance size");
- __ cmpb(FieldOperand(ecx, Map::kUnusedPropertyFieldsOffset), 0);
- __ Assert(equal, "Unexpected unused properties of string wrapper");
- }
- __ mov(FieldOperand(eax, HeapObject::kMapOffset), ecx);
-
- // Set properties and elements.
- Factory* factory = masm->isolate()->factory();
- __ Set(ecx, Immediate(factory->empty_fixed_array()));
- __ mov(FieldOperand(eax, JSObject::kPropertiesOffset), ecx);
- __ mov(FieldOperand(eax, JSObject::kElementsOffset), ecx);
-
- // Set the value.
- __ mov(FieldOperand(eax, JSValue::kValueOffset), ebx);
-
- // Ensure the object is fully initialized.
- STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
-
- // We're done. Return.
- __ ret(0);
-
- // The argument was not found in the number to string cache. Check
- // if it's a string already before calling the conversion builtin.
- Label convert_argument;
- __ bind(&not_cached);
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfSmi(eax, &convert_argument);
- Condition is_string = masm->IsObjectStringType(eax, ebx, ecx);
- __ j(NegateCondition(is_string), &convert_argument);
- __ mov(ebx, eax);
- __ IncrementCounter(counters->string_ctor_string_value(), 1);
- __ jmp(&argument_is_string);
-
- // Invoke the conversion builtin and put the result into ebx.
- __ bind(&convert_argument);
- __ IncrementCounter(counters->string_ctor_conversions(), 1);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(edi); // Preserve the function.
- __ push(eax);
- __ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION);
- __ pop(edi);
- }
- __ mov(ebx, eax);
- __ jmp(&argument_is_string);
-
- // Load the empty string into ebx, remove the receiver from the
- // stack, and jump back to the case where the argument is a string.
- __ bind(&no_arguments);
- __ Set(ebx, Immediate(factory->empty_string()));
- __ pop(ecx);
- __ lea(esp, Operand(esp, kPointerSize));
- __ push(ecx);
- __ jmp(&argument_is_string);
-
- // At this point the argument is already a string. Call runtime to
- // create a string wrapper.
- __ bind(&gc_required);
- __ IncrementCounter(counters->string_ctor_gc_required(), 1);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(ebx);
- __ CallRuntime(Runtime::kNewStringWrapper, 1);
- }
- __ ret(0);
-}
-
-
-static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
- __ push(ebp);
- __ mov(ebp, esp);
-
- // Store the arguments adaptor context sentinel.
- __ push(Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-
- // Push the function on the stack.
- __ push(edi);
-
- // Preserve the number of arguments on the stack. Must preserve eax,
- // ebx and ecx because these registers are used when copying the
- // arguments and the receiver.
- STATIC_ASSERT(kSmiTagSize == 1);
- __ lea(edi, Operand(eax, eax, times_1, kSmiTag));
- __ push(edi);
-}
-
-
-static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
- // Retrieve the number of arguments from the stack.
- __ mov(ebx, Operand(ebp, ArgumentsAdaptorFrameConstants::kLengthOffset));
-
- // Leave the frame.
- __ leave();
-
- // Remove caller arguments from the stack.
- STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
- __ pop(ecx);
- __ lea(esp, Operand(esp, ebx, times_2, 1 * kPointerSize)); // 1 ~ receiver
- __ push(ecx);
-}
-
-
-void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax : actual number of arguments
- // -- ebx : expected number of arguments
- // -- ecx : call kind information
- // -- edx : code entry to call
- // -----------------------------------
-
- Label invoke, dont_adapt_arguments;
- __ IncrementCounter(masm->isolate()->counters()->arguments_adaptors(), 1);
-
- Label enough, too_few;
- __ cmp(eax, ebx);
- __ j(less, &too_few);
- __ cmp(ebx, SharedFunctionInfo::kDontAdaptArgumentsSentinel);
- __ j(equal, &dont_adapt_arguments);
-
- { // Enough parameters: Actual >= expected.
- __ bind(&enough);
- EnterArgumentsAdaptorFrame(masm);
-
- // Copy receiver and all expected arguments.
- const int offset = StandardFrameConstants::kCallerSPOffset;
- __ lea(eax, Operand(ebp, eax, times_4, offset));
- __ mov(edi, -1); // account for receiver
-
- Label copy;
- __ bind(&copy);
- __ inc(edi);
- __ push(Operand(eax, 0));
- __ sub(eax, Immediate(kPointerSize));
- __ cmp(edi, ebx);
- __ j(less, &copy);
- __ jmp(&invoke);
- }
-
- { // Too few parameters: Actual < expected.
- __ bind(&too_few);
- EnterArgumentsAdaptorFrame(masm);
-
- // Copy receiver and all actual arguments.
- const int offset = StandardFrameConstants::kCallerSPOffset;
- __ lea(edi, Operand(ebp, eax, times_4, offset));
- // ebx = expected - actual.
- __ sub(ebx, eax);
- // eax = -actual - 1
- __ neg(eax);
- __ sub(eax, Immediate(1));
-
- Label copy;
- __ bind(&copy);
- __ inc(eax);
- __ push(Operand(edi, 0));
- __ sub(edi, Immediate(kPointerSize));
- __ test(eax, eax);
- __ j(not_zero, &copy);
-
- // Fill remaining expected arguments with undefined values.
- Label fill;
- __ bind(&fill);
- __ inc(eax);
- __ push(Immediate(masm->isolate()->factory()->undefined_value()));
- __ cmp(eax, ebx);
- __ j(less, &fill);
- }
-
- // Call the entry point.
- __ bind(&invoke);
- // Restore function pointer.
- __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- __ call(edx);
-
- // Store offset of return address for deoptimizer.
- masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
-
- // Leave frame and return.
- LeaveArgumentsAdaptorFrame(masm);
- __ ret(0);
-
- // -------------------------------------------
- // Dont adapt arguments.
- // -------------------------------------------
- __ bind(&dont_adapt_arguments);
- __ jmp(edx);
-}
-
-
-void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
- CpuFeatures::TryForceFeatureScope scope(SSE2);
- if (!CpuFeatures::IsSupported(SSE2) && FLAG_debug_code) {
- __ Abort("Unreachable code: Cannot optimize without SSE2 support.");
- return;
- }
-
- // Get the loop depth of the stack guard check. This is recorded in
- // a test(eax, depth) instruction right after the call.
- Label stack_check;
- __ mov(ebx, Operand(esp, 0)); // return address
- if (FLAG_debug_code) {
- __ cmpb(Operand(ebx, 0), Assembler::kTestAlByte);
- __ Assert(equal, "test eax instruction not found after loop stack check");
- }
- __ movzx_b(ebx, Operand(ebx, 1)); // depth
-
- // Get the loop nesting level at which we allow OSR from the
- // unoptimized code and check if we want to do OSR yet. If not we
- // should perform a stack guard check so we can get interrupts while
- // waiting for on-stack replacement.
- __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- __ mov(ecx, FieldOperand(eax, JSFunction::kSharedFunctionInfoOffset));
- __ mov(ecx, FieldOperand(ecx, SharedFunctionInfo::kCodeOffset));
- __ cmpb(ebx, FieldOperand(ecx, Code::kAllowOSRAtLoopNestingLevelOffset));
- __ j(greater, &stack_check);
-
- // Pass the function to optimize as the argument to the on-stack
- // replacement runtime function.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(eax);
- __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
- }
-
- // If the result was -1 it means that we couldn't optimize the
- // function. Just return and continue in the unoptimized version.
- Label skip;
- __ cmp(eax, Immediate(Smi::FromInt(-1)));
- __ j(not_equal, &skip, Label::kNear);
- __ ret(0);
-
- // Insert a stack guard check so that if we decide not to perform
- // on-stack replacement right away, the function calling this stub can
- // still be interrupted.
- __ bind(&stack_check);
- Label ok;
- ExternalReference stack_limit =
- ExternalReference::address_of_stack_limit(masm->isolate());
- __ cmp(esp, Operand::StaticVariable(stack_limit));
- __ j(above_equal, &ok, Label::kNear);
- StackCheckStub stub;
- __ TailCallStub(&stub);
- if (FLAG_debug_code) {
- __ Abort("Unreachable code: returned from tail call.");
- }
- __ bind(&ok);
- __ ret(0);
-
- __ bind(&skip);
- // Untag the AST id and push it on the stack.
- __ SmiUntag(eax);
- __ push(eax);
-
- // Generate the code for doing the frame-to-frame translation using
- // the deoptimizer infrastructure.
- Deoptimizer::EntryGenerator generator(masm, Deoptimizer::OSR);
- generator.Generate();
-}
-
-
-#undef __
-}
-} // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_IA32
diff --git a/src/3rdparty/v8/src/ia32/code-stubs-ia32.cc b/src/3rdparty/v8/src/ia32/code-stubs-ia32.cc
deleted file mode 100644
index 44df82a..0000000
--- a/src/3rdparty/v8/src/ia32/code-stubs-ia32.cc
+++ /dev/null
@@ -1,7936 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_IA32)
-
-#include "bootstrapper.h"
-#include "code-stubs.h"
-#include "isolate.h"
-#include "jsregexp.h"
-#include "regexp-macro-assembler.h"
-#include "runtime.h"
-#include "stub-cache.h"
-#include "codegen.h"
-#include "runtime.h"
-
-namespace v8 {
-namespace internal {
-
-
-void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- static Register registers[] = { eax, ebx, ecx, edx };
- descriptor->register_param_count_ = 4;
- descriptor->register_params_ = registers;
- descriptor->stack_parameter_count_ = NULL;
- descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kCreateObjectLiteralShallow)->entry;
-}
-
-
-void KeyedLoadFastElementStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- static Register registers[] = { edx, ecx };
- descriptor->register_param_count_ = 2;
- descriptor->register_params_ = registers;
- descriptor->stack_parameter_count_ = NULL;
- descriptor->deoptimization_handler_ =
- FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure);
-}
-
-
-void TransitionElementsKindStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- static Register registers[] = { eax, ebx };
- descriptor->register_param_count_ = 2;
- descriptor->register_params_ = registers;
- descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kTransitionElementsKind)->entry;
-}
-
-
-static void InitializeArrayConstructorDescriptor(Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- // register state
- // edi -- constructor function
- // ebx -- type info cell with elements kind
- // eax -- number of arguments to the constructor function
- static Register registers[] = { edi, ebx };
- descriptor->register_param_count_ = 2;
- // stack param count needs (constructor pointer, and single argument)
- descriptor->stack_parameter_count_ = &eax;
- descriptor->register_params_ = registers;
- descriptor->extra_expression_stack_count_ = 1;
- descriptor->deoptimization_handler_ =
- FUNCTION_ADDR(ArrayConstructor_StubFailure);
-}
-
-
-void ArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- InitializeArrayConstructorDescriptor(isolate, descriptor);
-}
-
-
-void ArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- InitializeArrayConstructorDescriptor(isolate, descriptor);
-}
-
-
-void ArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- InitializeArrayConstructorDescriptor(isolate, descriptor);
-}
-
-
-#define __ ACCESS_MASM(masm)
-
-void ToNumberStub::Generate(MacroAssembler* masm) {
- // The ToNumber stub takes one argument in eax.
- Label check_heap_number, call_builtin;
- __ JumpIfNotSmi(eax, &check_heap_number, Label::kNear);
- __ ret(0);
-
- __ bind(&check_heap_number);
- __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
- Factory* factory = masm->isolate()->factory();
- __ cmp(ebx, Immediate(factory->heap_number_map()));
- __ j(not_equal, &call_builtin, Label::kNear);
- __ ret(0);
-
- __ bind(&call_builtin);
- __ pop(ecx); // Pop return address.
- __ push(eax);
- __ push(ecx); // Push return address.
- __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION);
-}
-
-
-void FastNewClosureStub::Generate(MacroAssembler* masm) {
- // Create a new closure from the given function info in new
- // space. Set the context to the current context in esi.
- Counters* counters = masm->isolate()->counters();
-
- Label gc;
- __ AllocateInNewSpace(JSFunction::kSize, eax, ebx, ecx, &gc, TAG_OBJECT);
-
- __ IncrementCounter(counters->fast_new_closure_total(), 1);
-
- // Get the function info from the stack.
- __ mov(edx, Operand(esp, 1 * kPointerSize));
-
- int map_index = (language_mode_ == CLASSIC_MODE)
- ? Context::FUNCTION_MAP_INDEX
- : Context::STRICT_MODE_FUNCTION_MAP_INDEX;
-
- // Compute the function map in the current native context and set that
- // as the map of the allocated object.
- __ mov(ecx, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ mov(ecx, FieldOperand(ecx, GlobalObject::kNativeContextOffset));
- __ mov(ebx, Operand(ecx, Context::SlotOffset(map_index)));
- __ mov(FieldOperand(eax, JSObject::kMapOffset), ebx);
-
- // Initialize the rest of the function. We don't have to update the
- // write barrier because the allocated object is in new space.
- Factory* factory = masm->isolate()->factory();
- __ mov(ebx, Immediate(factory->empty_fixed_array()));
- __ mov(FieldOperand(eax, JSObject::kPropertiesOffset), ebx);
- __ mov(FieldOperand(eax, JSObject::kElementsOffset), ebx);
- __ mov(FieldOperand(eax, JSFunction::kPrototypeOrInitialMapOffset),
- Immediate(factory->the_hole_value()));
- __ mov(FieldOperand(eax, JSFunction::kSharedFunctionInfoOffset), edx);
- __ mov(FieldOperand(eax, JSFunction::kContextOffset), esi);
- __ mov(FieldOperand(eax, JSFunction::kLiteralsOffset), ebx);
-
- // Initialize the code pointer in the function to be the one
- // found in the shared function info object.
- // But first check if there is an optimized version for our context.
- Label check_optimized;
- Label install_unoptimized;
- if (FLAG_cache_optimized_code) {
- __ mov(ebx, FieldOperand(edx, SharedFunctionInfo::kOptimizedCodeMapOffset));
- __ test(ebx, ebx);
- __ j(not_zero, &check_optimized, Label::kNear);
- }
- __ bind(&install_unoptimized);
- __ mov(FieldOperand(eax, JSFunction::kNextFunctionLinkOffset),
- Immediate(factory->undefined_value()));
- __ mov(edx, FieldOperand(edx, SharedFunctionInfo::kCodeOffset));
- __ lea(edx, FieldOperand(edx, Code::kHeaderSize));
- __ mov(FieldOperand(eax, JSFunction::kCodeEntryOffset), edx);
-
- // Return and remove the on-stack parameter.
- __ ret(1 * kPointerSize);
-
- __ bind(&check_optimized);
-
- __ IncrementCounter(counters->fast_new_closure_try_optimized(), 1);
-
- // ecx holds native context, ebx points to fixed array of 3-element entries
- // (native context, optimized code, literals).
- // Map must never be empty, so check the first elements.
- Label install_optimized;
- // Speculatively move code object into edx.
- __ mov(edx, FieldOperand(ebx, FixedArray::kHeaderSize + kPointerSize));
- __ cmp(ecx, FieldOperand(ebx, FixedArray::kHeaderSize));
- __ j(equal, &install_optimized);
-
- // Iterate through the rest of map backwards. edx holds an index as a Smi.
- Label loop;
- Label restore;
- __ mov(edx, FieldOperand(ebx, FixedArray::kLengthOffset));
- __ bind(&loop);
- // Do not double check first entry.
- __ cmp(edx, Immediate(Smi::FromInt(SharedFunctionInfo::kEntryLength)));
- __ j(equal, &restore);
- __ sub(edx, Immediate(Smi::FromInt(
- SharedFunctionInfo::kEntryLength))); // Skip an entry.
- __ cmp(ecx, CodeGenerator::FixedArrayElementOperand(ebx, edx, 0));
- __ j(not_equal, &loop, Label::kNear);
- // Hit: fetch the optimized code.
- __ mov(edx, CodeGenerator::FixedArrayElementOperand(ebx, edx, 1));
-
- __ bind(&install_optimized);
- __ IncrementCounter(counters->fast_new_closure_install_optimized(), 1);
-
- // TODO(fschneider): Idea: store proper code pointers in the optimized code
- // map and either unmangle them on marking or do nothing as the whole map is
- // discarded on major GC anyway.
- __ lea(edx, FieldOperand(edx, Code::kHeaderSize));
- __ mov(FieldOperand(eax, JSFunction::kCodeEntryOffset), edx);
-
- // Now link a function into a list of optimized functions.
- __ mov(edx, ContextOperand(ecx, Context::OPTIMIZED_FUNCTIONS_LIST));
-
- __ mov(FieldOperand(eax, JSFunction::kNextFunctionLinkOffset), edx);
- // No need for write barrier as JSFunction (eax) is in the new space.
-
- __ mov(ContextOperand(ecx, Context::OPTIMIZED_FUNCTIONS_LIST), eax);
- // Store JSFunction (eax) into edx before issuing write barrier as
- // it clobbers all the registers passed.
- __ mov(edx, eax);
- __ RecordWriteContextSlot(
- ecx,
- Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST),
- edx,
- ebx,
- kDontSaveFPRegs);
-
- // Return and remove the on-stack parameter.
- __ ret(1 * kPointerSize);
-
- __ bind(&restore);
- // Restore SharedFunctionInfo into edx.
- __ mov(edx, Operand(esp, 1 * kPointerSize));
- __ jmp(&install_unoptimized);
-
- // Create a new closure through the slower runtime call.
- __ bind(&gc);
- __ pop(ecx); // Temporarily remove return address.
- __ pop(edx);
- __ push(esi);
- __ push(edx);
- __ push(Immediate(factory->false_value()));
- __ push(ecx); // Restore return address.
- __ TailCallRuntime(Runtime::kNewClosure, 3, 1);
-}
-
-
-void FastNewContextStub::Generate(MacroAssembler* masm) {
- // Try to allocate the context in new space.
- Label gc;
- int length = slots_ + Context::MIN_CONTEXT_SLOTS;
- __ AllocateInNewSpace((length * kPointerSize) + FixedArray::kHeaderSize,
- eax, ebx, ecx, &gc, TAG_OBJECT);
-
- // Get the function from the stack.
- __ mov(ecx, Operand(esp, 1 * kPointerSize));
-
- // Set up the object header.
- Factory* factory = masm->isolate()->factory();
- __ mov(FieldOperand(eax, HeapObject::kMapOffset),
- factory->function_context_map());
- __ mov(FieldOperand(eax, Context::kLengthOffset),
- Immediate(Smi::FromInt(length)));
-
- // Set up the fixed slots.
- __ Set(ebx, Immediate(0)); // Set to NULL.
- __ mov(Operand(eax, Context::SlotOffset(Context::CLOSURE_INDEX)), ecx);
- __ mov(Operand(eax, Context::SlotOffset(Context::PREVIOUS_INDEX)), esi);
- __ mov(Operand(eax, Context::SlotOffset(Context::EXTENSION_INDEX)), ebx);
-
- // Copy the global object from the previous context.
- __ mov(ebx, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ mov(Operand(eax, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)), ebx);
-
- // Copy the qml global object from the previous context.
- __ mov(ebx,
- Operand(esi, Context::SlotOffset(Context::QML_GLOBAL_OBJECT_INDEX)));
- __ mov(Operand(eax, Context::SlotOffset(Context::QML_GLOBAL_OBJECT_INDEX)),
- ebx);
-
-
- // Initialize the rest of the slots to undefined.
- __ mov(ebx, factory->undefined_value());
- for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
- __ mov(Operand(eax, Context::SlotOffset(i)), ebx);
- }
-
- // Return and remove the on-stack parameter.
- __ mov(esi, eax);
- __ ret(1 * kPointerSize);
-
- // Need to collect. Call into runtime system.
- __ bind(&gc);
- __ TailCallRuntime(Runtime::kNewFunctionContext, 1, 1);
-}
-
-
-void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
- // Stack layout on entry:
- //
- // [esp + (1 * kPointerSize)]: function
- // [esp + (2 * kPointerSize)]: serialized scope info
-
- // Try to allocate the context in new space.
- Label gc;
- int length = slots_ + Context::MIN_CONTEXT_SLOTS;
- __ AllocateInNewSpace(FixedArray::SizeFor(length),
- eax, ebx, ecx, &gc, TAG_OBJECT);
-
- // Get the function or sentinel from the stack.
- __ mov(ecx, Operand(esp, 1 * kPointerSize));
-
- // Get the serialized scope info from the stack.
- __ mov(ebx, Operand(esp, 2 * kPointerSize));
-
- // Set up the object header.
- Factory* factory = masm->isolate()->factory();
- __ mov(FieldOperand(eax, HeapObject::kMapOffset),
- factory->block_context_map());
- __ mov(FieldOperand(eax, Context::kLengthOffset),
- Immediate(Smi::FromInt(length)));
-
- // If this block context is nested in the native context we get a smi
- // sentinel instead of a function. The block context should get the
- // canonical empty function of the native context as its closure which
- // we still have to look up.
- Label after_sentinel;
- __ JumpIfNotSmi(ecx, &after_sentinel, Label::kNear);
- if (FLAG_debug_code) {
- const char* message = "Expected 0 as a Smi sentinel";
- __ cmp(ecx, 0);
- __ Assert(equal, message);
- }
- __ mov(ecx, GlobalObjectOperand());
- __ mov(ecx, FieldOperand(ecx, GlobalObject::kNativeContextOffset));
- __ mov(ecx, ContextOperand(ecx, Context::CLOSURE_INDEX));
- __ bind(&after_sentinel);
-
- // Set up the fixed slots.
- __ mov(ContextOperand(eax, Context::CLOSURE_INDEX), ecx);
- __ mov(ContextOperand(eax, Context::PREVIOUS_INDEX), esi);
- __ mov(ContextOperand(eax, Context::EXTENSION_INDEX), ebx);
-
- // Copy the global object from the previous context.
- __ mov(ebx, ContextOperand(esi, Context::GLOBAL_OBJECT_INDEX));
- __ mov(ContextOperand(eax, Context::GLOBAL_OBJECT_INDEX), ebx);
-
- // Copy the qml global object from the previous context.
- __ mov(ebx, ContextOperand(esi, Context::QML_GLOBAL_OBJECT_INDEX));
- __ mov(ContextOperand(eax, Context::QML_GLOBAL_OBJECT_INDEX), ebx);
-
- // Initialize the rest of the slots to the hole value.
- if (slots_ == 1) {
- __ mov(ContextOperand(eax, Context::MIN_CONTEXT_SLOTS),
- factory->the_hole_value());
- } else {
- __ mov(ebx, factory->the_hole_value());
- for (int i = 0; i < slots_; i++) {
- __ mov(ContextOperand(eax, i + Context::MIN_CONTEXT_SLOTS), ebx);
- }
- }
-
- // Return and remove the on-stack parameters.
- __ mov(esi, eax);
- __ ret(2 * kPointerSize);
-
- // Need to collect. Call into runtime system.
- __ bind(&gc);
- __ TailCallRuntime(Runtime::kPushBlockContext, 2, 1);
-}
-
-
-static void GenerateFastCloneShallowArrayCommon(
- MacroAssembler* masm,
- int length,
- FastCloneShallowArrayStub::Mode mode,
- AllocationSiteMode allocation_site_mode,
- Label* fail) {
- // Registers on entry:
- //
- // ecx: boilerplate literal array.
- ASSERT(mode != FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS);
-
- // All sizes here are multiples of kPointerSize.
- int elements_size = 0;
- if (length > 0) {
- elements_size = mode == FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
- ? FixedDoubleArray::SizeFor(length)
- : FixedArray::SizeFor(length);
- }
- int size = JSArray::kSize;
- int allocation_info_start = size;
- if (allocation_site_mode == TRACK_ALLOCATION_SITE) {
- size += AllocationSiteInfo::kSize;
- }
- size += elements_size;
-
- // Allocate both the JS array and the elements array in one big
- // allocation. This avoids multiple limit checks.
- AllocationFlags flags = TAG_OBJECT;
- if (mode == FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS) {
- flags = static_cast<AllocationFlags>(DOUBLE_ALIGNMENT | flags);
- }
- __ AllocateInNewSpace(size, eax, ebx, edx, fail, flags);
-
- if (allocation_site_mode == TRACK_ALLOCATION_SITE) {
- __ mov(FieldOperand(eax, allocation_info_start),
- Immediate(Handle<Map>(masm->isolate()->heap()->
- allocation_site_info_map())));
- __ mov(FieldOperand(eax, allocation_info_start + kPointerSize), ecx);
- }
-
- // Copy the JS array part.
- for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
- if ((i != JSArray::kElementsOffset) || (length == 0)) {
- __ mov(ebx, FieldOperand(ecx, i));
- __ mov(FieldOperand(eax, i), ebx);
- }
- }
-
- if (length > 0) {
- // Get hold of the elements array of the boilerplate and setup the
- // elements pointer in the resulting object.
- __ mov(ecx, FieldOperand(ecx, JSArray::kElementsOffset));
- if (allocation_site_mode == TRACK_ALLOCATION_SITE) {
- __ lea(edx, Operand(eax, JSArray::kSize + AllocationSiteInfo::kSize));
- } else {
- __ lea(edx, Operand(eax, JSArray::kSize));
- }
- __ mov(FieldOperand(eax, JSArray::kElementsOffset), edx);
-
- // Copy the elements array.
- if (mode == FastCloneShallowArrayStub::CLONE_ELEMENTS) {
- for (int i = 0; i < elements_size; i += kPointerSize) {
- __ mov(ebx, FieldOperand(ecx, i));
- __ mov(FieldOperand(edx, i), ebx);
- }
- } else {
- ASSERT(mode == FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS);
- int i;
- for (i = 0; i < FixedDoubleArray::kHeaderSize; i += kPointerSize) {
- __ mov(ebx, FieldOperand(ecx, i));
- __ mov(FieldOperand(edx, i), ebx);
- }
- while (i < elements_size) {
- __ fld_d(FieldOperand(ecx, i));
- __ fstp_d(FieldOperand(edx, i));
- i += kDoubleSize;
- }
- ASSERT(i == elements_size);
- }
- }
-}
-
-
-void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
- // Stack layout on entry:
- //
- // [esp + kPointerSize]: constant elements.
- // [esp + (2 * kPointerSize)]: literal index.
- // [esp + (3 * kPointerSize)]: literals array.
-
- // Load boilerplate object into ecx and check if we need to create a
- // boilerplate.
- __ mov(ecx, Operand(esp, 3 * kPointerSize));
- __ mov(eax, Operand(esp, 2 * kPointerSize));
- STATIC_ASSERT(kPointerSize == 4);
- STATIC_ASSERT(kSmiTagSize == 1);
- STATIC_ASSERT(kSmiTag == 0);
- __ mov(ecx, FieldOperand(ecx, eax, times_half_pointer_size,
- FixedArray::kHeaderSize));
- Factory* factory = masm->isolate()->factory();
- __ cmp(ecx, factory->undefined_value());
- Label slow_case;
- __ j(equal, &slow_case);
-
- FastCloneShallowArrayStub::Mode mode = mode_;
- // ecx is boilerplate object.
- if (mode == CLONE_ANY_ELEMENTS) {
- Label double_elements, check_fast_elements;
- __ mov(ebx, FieldOperand(ecx, JSArray::kElementsOffset));
- __ CheckMap(ebx, factory->fixed_cow_array_map(),
- &check_fast_elements, DONT_DO_SMI_CHECK);
- GenerateFastCloneShallowArrayCommon(masm, 0, COPY_ON_WRITE_ELEMENTS,
- allocation_site_mode_,
- &slow_case);
- __ ret(3 * kPointerSize);
-
- __ bind(&check_fast_elements);
- __ CheckMap(ebx, factory->fixed_array_map(),
- &double_elements, DONT_DO_SMI_CHECK);
- GenerateFastCloneShallowArrayCommon(masm, length_, CLONE_ELEMENTS,
- allocation_site_mode_,
- &slow_case);
- __ ret(3 * kPointerSize);
-
- __ bind(&double_elements);
- mode = CLONE_DOUBLE_ELEMENTS;
- // Fall through to generate the code to handle double elements.
- }
-
- if (FLAG_debug_code) {
- const char* message;
- Handle<Map> expected_map;
- if (mode == CLONE_ELEMENTS) {
- message = "Expected (writable) fixed array";
- expected_map = factory->fixed_array_map();
- } else if (mode == CLONE_DOUBLE_ELEMENTS) {
- message = "Expected (writable) fixed double array";
- expected_map = factory->fixed_double_array_map();
- } else {
- ASSERT(mode == COPY_ON_WRITE_ELEMENTS);
- message = "Expected copy-on-write fixed array";
- expected_map = factory->fixed_cow_array_map();
- }
- __ push(ecx);
- __ mov(ecx, FieldOperand(ecx, JSArray::kElementsOffset));
- __ cmp(FieldOperand(ecx, HeapObject::kMapOffset), expected_map);
- __ Assert(equal, message);
- __ pop(ecx);
- }
-
- GenerateFastCloneShallowArrayCommon(masm, length_, mode,
- allocation_site_mode_,
- &slow_case);
-
- // Return and remove the on-stack parameters.
- __ ret(3 * kPointerSize);
-
- __ bind(&slow_case);
- __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1);
-}
-
-
-// The stub expects its argument on the stack and returns its result in tos_:
-// zero for false, and a non-zero value for true.
-void ToBooleanStub::Generate(MacroAssembler* masm) {
- // This stub overrides SometimesSetsUpAFrame() to return false. That means
- // we cannot call anything that could cause a GC from this stub.
- Label patch;
- Factory* factory = masm->isolate()->factory();
- const Register argument = eax;
- const Register map = edx;
-
- if (!types_.IsEmpty()) {
- __ mov(argument, Operand(esp, 1 * kPointerSize));
- }
-
- // undefined -> false
- CheckOddball(masm, UNDEFINED, Heap::kUndefinedValueRootIndex, false);
-
- // Boolean -> its value
- CheckOddball(masm, BOOLEAN, Heap::kFalseValueRootIndex, false);
- CheckOddball(masm, BOOLEAN, Heap::kTrueValueRootIndex, true);
-
- // 'null' -> false.
- CheckOddball(masm, NULL_TYPE, Heap::kNullValueRootIndex, false);
-
- if (types_.Contains(SMI)) {
- // Smis: 0 -> false, all other -> true
- Label not_smi;
- __ JumpIfNotSmi(argument, &not_smi, Label::kNear);
- // argument contains the correct return value already.
- if (!tos_.is(argument)) {
- __ mov(tos_, argument);
- }
- __ ret(1 * kPointerSize);
- __ bind(&not_smi);
- } else if (types_.NeedsMap()) {
- // If we need a map later and have a Smi -> patch.
- __ JumpIfSmi(argument, &patch, Label::kNear);
- }
-
- if (types_.NeedsMap()) {
- __ mov(map, FieldOperand(argument, HeapObject::kMapOffset));
-
- if (types_.CanBeUndetectable()) {
- __ test_b(FieldOperand(map, Map::kBitFieldOffset),
- 1 << Map::kIsUndetectable);
- // Undetectable -> false.
- Label not_undetectable;
- __ j(zero, &not_undetectable, Label::kNear);
- __ Set(tos_, Immediate(0));
- __ ret(1 * kPointerSize);
- __ bind(&not_undetectable);
- }
- }
-
- if (types_.Contains(SPEC_OBJECT)) {
- // spec object -> true.
- Label not_js_object;
- __ CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
- __ j(below, &not_js_object, Label::kNear);
- // argument contains the correct return value already.
- if (!tos_.is(argument)) {
- __ Set(tos_, Immediate(1));
- }
- __ ret(1 * kPointerSize);
- __ bind(&not_js_object);
- }
-
- if (types_.Contains(STRING)) {
- // String value -> false iff empty.
- Label not_string;
- __ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
- __ j(above_equal, &not_string, Label::kNear);
- __ mov(tos_, FieldOperand(argument, String::kLengthOffset));
- __ ret(1 * kPointerSize); // the string length is OK as the return value
- __ bind(&not_string);
- }
-
- if (types_.Contains(HEAP_NUMBER)) {
- // heap number -> false iff +0, -0, or NaN.
- Label not_heap_number, false_result;
- __ cmp(map, factory->heap_number_map());
- __ j(not_equal, &not_heap_number, Label::kNear);
- __ fldz();
- __ fld_d(FieldOperand(argument, HeapNumber::kValueOffset));
- __ FCmp();
- __ j(zero, &false_result, Label::kNear);
- // argument contains the correct return value already.
- if (!tos_.is(argument)) {
- __ Set(tos_, Immediate(1));
- }
- __ ret(1 * kPointerSize);
- __ bind(&false_result);
- __ Set(tos_, Immediate(0));
- __ ret(1 * kPointerSize);
- __ bind(&not_heap_number);
- }
-
- __ bind(&patch);
- GenerateTypeTransition(masm);
-}
-
-
-void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
- // We don't allow a GC during a store buffer overflow so there is no need to
- // store the registers in any particular way, but we do have to store and
- // restore them.
- __ pushad();
- if (save_doubles_ == kSaveFPRegs) {
- CpuFeatures::Scope scope(SSE2);
- __ sub(esp, Immediate(kDoubleSize * XMMRegister::kNumRegisters));
- for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
- XMMRegister reg = XMMRegister::from_code(i);
- __ movdbl(Operand(esp, i * kDoubleSize), reg);
- }
- }
- const int argument_count = 1;
-
- AllowExternalCallThatCantCauseGC scope(masm);
- __ PrepareCallCFunction(argument_count, ecx);
- __ mov(Operand(esp, 0 * kPointerSize),
- Immediate(ExternalReference::isolate_address()));
- __ CallCFunction(
- ExternalReference::store_buffer_overflow_function(masm->isolate()),
- argument_count);
- if (save_doubles_ == kSaveFPRegs) {
- CpuFeatures::Scope scope(SSE2);
- for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
- XMMRegister reg = XMMRegister::from_code(i);
- __ movdbl(reg, Operand(esp, i * kDoubleSize));
- }
- __ add(esp, Immediate(kDoubleSize * XMMRegister::kNumRegisters));
- }
- __ popad();
- __ ret(0);
-}
-
-
-void ToBooleanStub::CheckOddball(MacroAssembler* masm,
- Type type,
- Heap::RootListIndex value,
- bool result) {
- const Register argument = eax;
- if (types_.Contains(type)) {
- // If we see an expected oddball, return its ToBoolean value tos_.
- Label different_value;
- __ CompareRoot(argument, value);
- __ j(not_equal, &different_value, Label::kNear);
- if (!result) {
- // If we have to return zero, there is no way around clearing tos_.
- __ Set(tos_, Immediate(0));
- } else if (!tos_.is(argument)) {
- // If we have to return non-zero, we can re-use the argument if it is the
- // same register as the result, because we never see Smi-zero here.
- __ Set(tos_, Immediate(1));
- }
- __ ret(1 * kPointerSize);
- __ bind(&different_value);
- }
-}
-
-
-void ToBooleanStub::GenerateTypeTransition(MacroAssembler* masm) {
- __ pop(ecx); // Get return address, operand is now on top of stack.
- __ push(Immediate(Smi::FromInt(tos_.code())));
- __ push(Immediate(Smi::FromInt(types_.ToByte())));
- __ push(ecx); // Push return address.
- // Patch the caller to an appropriate specialized stub and return the
- // operation result to the caller of the stub.
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kToBoolean_Patch), masm->isolate()),
- 3,
- 1);
-}
-
-
-class FloatingPointHelper : public AllStatic {
- public:
- enum ArgLocation {
- ARGS_ON_STACK,
- ARGS_IN_REGISTERS
- };
-
- // Code pattern for loading a floating point value. Input value must
- // be either a smi or a heap number object (fp value). Requirements:
- // operand in register number. Returns operand as floating point number
- // on FPU stack.
- static void LoadFloatOperand(MacroAssembler* masm, Register number);
-
- // Code pattern for loading floating point values. Input values must
- // be either smi or heap number objects (fp values). Requirements:
- // operand_1 on TOS+1 or in edx, operand_2 on TOS+2 or in eax.
- // Returns operands as floating point numbers on FPU stack.
- static void LoadFloatOperands(MacroAssembler* masm,
- Register scratch,
- ArgLocation arg_location = ARGS_ON_STACK);
-
- // Similar to LoadFloatOperand but assumes that both operands are smis.
- // Expects operands in edx, eax.
- static void LoadFloatSmis(MacroAssembler* masm, Register scratch);
-
- // Test if operands are smi or number objects (fp). Requirements:
- // operand_1 in eax, operand_2 in edx; falls through on float
- // operands, jumps to the non_float label otherwise.
- static void CheckFloatOperands(MacroAssembler* masm,
- Label* non_float,
- Register scratch);
-
- // Takes the operands in edx and eax and loads them as integers in eax
- // and ecx.
- static void LoadUnknownsAsIntegers(MacroAssembler* masm,
- bool use_sse3,
- BinaryOpIC::TypeInfo left_type,
- BinaryOpIC::TypeInfo right_type,
- Label* operand_conversion_failure);
-
- // Assumes that operands are smis or heap numbers and loads them
- // into xmm0 and xmm1. Operands are in edx and eax.
- // Leaves operands unchanged.
- static void LoadSSE2Operands(MacroAssembler* masm);
-
- // Test if operands are numbers (smi or HeapNumber objects), and load
- // them into xmm0 and xmm1 if they are. Jump to label not_numbers if
- // either operand is not a number. Operands are in edx and eax.
- // Leaves operands unchanged.
- static void LoadSSE2Operands(MacroAssembler* masm, Label* not_numbers);
-
- // Similar to LoadSSE2Operands but assumes that both operands are smis.
- // Expects operands in edx, eax.
- static void LoadSSE2Smis(MacroAssembler* masm, Register scratch);
-
- // Checks that the two floating point numbers loaded into xmm0 and xmm1
- // have int32 values.
- static void CheckSSE2OperandsAreInt32(MacroAssembler* masm,
- Label* non_int32,
- Register scratch);
-
- // Checks that |operand| has an int32 value. If |int32_result| is different
- // from |scratch|, it will contain that int32 value.
- static void CheckSSE2OperandIsInt32(MacroAssembler* masm,
- Label* non_int32,
- XMMRegister operand,
- Register int32_result,
- Register scratch,
- XMMRegister xmm_scratch);
-};
-
-
-// Get the integer part of a heap number. Surprisingly, all this bit twiddling
-// is faster than using the built-in instructions on floating point registers.
-// Trashes edi and ebx. Dest is ecx. Source cannot be ecx or one of the
-// trashed registers.
-static void IntegerConvert(MacroAssembler* masm,
- Register source,
- bool use_sse3,
- Label* conversion_failure) {
- ASSERT(!source.is(ecx) && !source.is(edi) && !source.is(ebx));
- Label done, right_exponent, normal_exponent;
- Register scratch = ebx;
- Register scratch2 = edi;
- // Get exponent word.
- __ mov(scratch, FieldOperand(source, HeapNumber::kExponentOffset));
- // Get exponent alone in scratch2.
- __ mov(scratch2, scratch);
- __ and_(scratch2, HeapNumber::kExponentMask);
- __ shr(scratch2, HeapNumber::kExponentShift);
- __ sub(scratch2, Immediate(HeapNumber::kExponentBias));
- // Load ecx with zero. We use this either for the final shift or
- // for the answer.
- __ xor_(ecx, ecx);
- // If the exponent is above 83, the number contains no significant
- // bits in the range 0..2^31, so the result is zero.
- static const uint32_t kResultIsZeroExponent = 83;
- __ cmp(scratch2, Immediate(kResultIsZeroExponent));
- __ j(above, &done);
- if (use_sse3) {
- CpuFeatures::Scope scope(SSE3);
- // Check whether the exponent is too big for a 64 bit signed integer.
- static const uint32_t kTooBigExponent = 63;
- __ cmp(scratch2, Immediate(kTooBigExponent));
- __ j(greater_equal, conversion_failure);
- // Load x87 register with heap number.
- __ fld_d(FieldOperand(source, HeapNumber::kValueOffset));
- // Reserve space for 64 bit answer.
- __ sub(esp, Immediate(sizeof(uint64_t))); // Nolint.
- // Do conversion, which cannot fail because we checked the exponent.
- __ fisttp_d(Operand(esp, 0));
- __ mov(ecx, Operand(esp, 0)); // Load low word of answer into ecx.
- __ add(esp, Immediate(sizeof(uint64_t))); // Nolint.
- } else {
- // Check whether the exponent matches a 32 bit signed int that cannot be
- // represented by a Smi. A non-smi 32 bit integer is 1.xxx * 2^30 so the
- // exponent is 30 (biased). This is the exponent that we are fastest at and
- // also the highest exponent we can handle here.
- const uint32_t non_smi_exponent = 30;
- __ cmp(scratch2, Immediate(non_smi_exponent));
- // If we have a match of the int32-but-not-Smi exponent then skip some
- // logic.
- __ j(equal, &right_exponent, Label::kNear);
- // If the exponent is higher than that then go to slow case. This catches
- // numbers that don't fit in a signed int32, infinities and NaNs.
- __ j(less, &normal_exponent, Label::kNear);
-
- {
- // Handle a big exponent. The only reason we have this code is that the
- // >>> operator has a tendency to generate numbers with an exponent of 31.
- const uint32_t big_non_smi_exponent = 31;
- __ cmp(scratch2, Immediate(big_non_smi_exponent));
- __ j(not_equal, conversion_failure);
- // We have the big exponent, typically from >>>. This means the number is
- // in the range 2^31 to 2^32 - 1. Get the top bits of the mantissa.
- __ mov(scratch2, scratch);
- __ and_(scratch2, HeapNumber::kMantissaMask);
- // Put back the implicit 1.
- __ or_(scratch2, 1 << HeapNumber::kExponentShift);
- // Shift up the mantissa bits to take up the space the exponent used to
- // take. We just orred in the implicit bit so that took care of one and
- // we want to use the full unsigned range so we subtract 1 bit from the
- // shift distance.
- const int big_shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 1;
- __ shl(scratch2, big_shift_distance);
- // Get the second half of the double.
- __ mov(ecx, FieldOperand(source, HeapNumber::kMantissaOffset));
- // Shift down 21 bits to get the most significant 11 bits or the low
- // mantissa word.
- __ shr(ecx, 32 - big_shift_distance);
- __ or_(ecx, scratch2);
- // We have the answer in ecx, but we may need to negate it.
- __ test(scratch, scratch);
- __ j(positive, &done, Label::kNear);
- __ neg(ecx);
- __ jmp(&done, Label::kNear);
- }
-
- __ bind(&normal_exponent);
- // Exponent word in scratch, exponent in scratch2. Zero in ecx.
- // We know that 0 <= exponent < 30.
- __ mov(ecx, Immediate(30));
- __ sub(ecx, scratch2);
-
- __ bind(&right_exponent);
- // Here ecx is the shift, scratch is the exponent word.
- // Get the top bits of the mantissa.
- __ and_(scratch, HeapNumber::kMantissaMask);
- // Put back the implicit 1.
- __ or_(scratch, 1 << HeapNumber::kExponentShift);
- // Shift up the mantissa bits to take up the space the exponent used to
- // take. We have kExponentShift + 1 significant bits int he low end of the
- // word. Shift them to the top bits.
- const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
- __ shl(scratch, shift_distance);
- // Get the second half of the double. For some exponents we don't
- // actually need this because the bits get shifted out again, but
- // it's probably slower to test than just to do it.
- __ mov(scratch2, FieldOperand(source, HeapNumber::kMantissaOffset));
- // Shift down 22 bits to get the most significant 10 bits or the low
- // mantissa word.
- __ shr(scratch2, 32 - shift_distance);
- __ or_(scratch2, scratch);
- // Move down according to the exponent.
- __ shr_cl(scratch2);
- // Now the unsigned answer is in scratch2. We need to move it to ecx and
- // we may need to fix the sign.
- Label negative;
- __ xor_(ecx, ecx);
- __ cmp(ecx, FieldOperand(source, HeapNumber::kExponentOffset));
- __ j(greater, &negative, Label::kNear);
- __ mov(ecx, scratch2);
- __ jmp(&done, Label::kNear);
- __ bind(&negative);
- __ sub(ecx, scratch2);
- }
- __ bind(&done);
-}
-
-
-// Uses SSE2 to convert the heap number in |source| to an integer. Jumps to
-// |conversion_failure| if the heap number did not contain an int32 value.
-// Result is in ecx. Trashes ebx, xmm0, and xmm1.
-static void ConvertHeapNumberToInt32(MacroAssembler* masm,
- Register source,
- Label* conversion_failure) {
- __ movdbl(xmm0, FieldOperand(source, HeapNumber::kValueOffset));
- FloatingPointHelper::CheckSSE2OperandIsInt32(
- masm, conversion_failure, xmm0, ecx, ebx, xmm1);
-}
-
-
-void UnaryOpStub::PrintName(StringStream* stream) {
- const char* op_name = Token::Name(op_);
- const char* overwrite_name = NULL; // Make g++ happy.
- switch (mode_) {
- case UNARY_NO_OVERWRITE: overwrite_name = "Alloc"; break;
- case UNARY_OVERWRITE: overwrite_name = "Overwrite"; break;
- }
- stream->Add("UnaryOpStub_%s_%s_%s",
- op_name,
- overwrite_name,
- UnaryOpIC::GetName(operand_type_));
-}
-
-
-// TODO(svenpanne): Use virtual functions instead of switch.
-void UnaryOpStub::Generate(MacroAssembler* masm) {
- switch (operand_type_) {
- case UnaryOpIC::UNINITIALIZED:
- GenerateTypeTransition(masm);
- break;
- case UnaryOpIC::SMI:
- GenerateSmiStub(masm);
- break;
- case UnaryOpIC::NUMBER:
- GenerateNumberStub(masm);
- break;
- case UnaryOpIC::GENERIC:
- GenerateGenericStub(masm);
- break;
- }
-}
-
-
-void UnaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
- __ pop(ecx); // Save return address.
-
- __ push(eax); // the operand
- __ push(Immediate(Smi::FromInt(op_)));
- __ push(Immediate(Smi::FromInt(mode_)));
- __ push(Immediate(Smi::FromInt(operand_type_)));
-
- __ push(ecx); // Push return address.
-
- // Patch the caller to an appropriate specialized stub and return the
- // operation result to the caller of the stub.
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kUnaryOp_Patch), masm->isolate()), 4, 1);
-}
-
-
-// TODO(svenpanne): Use virtual functions instead of switch.
-void UnaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
- switch (op_) {
- case Token::SUB:
- GenerateSmiStubSub(masm);
- break;
- case Token::BIT_NOT:
- GenerateSmiStubBitNot(masm);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void UnaryOpStub::GenerateSmiStubSub(MacroAssembler* masm) {
- Label non_smi, undo, slow;
- GenerateSmiCodeSub(masm, &non_smi, &undo, &slow,
- Label::kNear, Label::kNear, Label::kNear);
- __ bind(&undo);
- GenerateSmiCodeUndo(masm);
- __ bind(&non_smi);
- __ bind(&slow);
- GenerateTypeTransition(masm);
-}
-
-
-void UnaryOpStub::GenerateSmiStubBitNot(MacroAssembler* masm) {
- Label non_smi;
- GenerateSmiCodeBitNot(masm, &non_smi);
- __ bind(&non_smi);
- GenerateTypeTransition(masm);
-}
-
-
-void UnaryOpStub::GenerateSmiCodeSub(MacroAssembler* masm,
- Label* non_smi,
- Label* undo,
- Label* slow,
- Label::Distance non_smi_near,
- Label::Distance undo_near,
- Label::Distance slow_near) {
- // Check whether the value is a smi.
- __ JumpIfNotSmi(eax, non_smi, non_smi_near);
-
- // We can't handle -0 with smis, so use a type transition for that case.
- __ test(eax, eax);
- __ j(zero, slow, slow_near);
-
- // Try optimistic subtraction '0 - value', saving operand in eax for undo.
- __ mov(edx, eax);
- __ Set(eax, Immediate(0));
- __ sub(eax, edx);
- __ j(overflow, undo, undo_near);
- __ ret(0);
-}
-
-
-void UnaryOpStub::GenerateSmiCodeBitNot(
- MacroAssembler* masm,
- Label* non_smi,
- Label::Distance non_smi_near) {
- // Check whether the value is a smi.
- __ JumpIfNotSmi(eax, non_smi, non_smi_near);
-
- // Flip bits and revert inverted smi-tag.
- __ not_(eax);
- __ and_(eax, ~kSmiTagMask);
- __ ret(0);
-}
-
-
-void UnaryOpStub::GenerateSmiCodeUndo(MacroAssembler* masm) {
- __ mov(eax, edx);
-}
-
-
-// TODO(svenpanne): Use virtual functions instead of switch.
-void UnaryOpStub::GenerateNumberStub(MacroAssembler* masm) {
- switch (op_) {
- case Token::SUB:
- GenerateNumberStubSub(masm);
- break;
- case Token::BIT_NOT:
- GenerateNumberStubBitNot(masm);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void UnaryOpStub::GenerateNumberStubSub(MacroAssembler* masm) {
- Label non_smi, undo, slow, call_builtin;
- GenerateSmiCodeSub(masm, &non_smi, &undo, &call_builtin, Label::kNear);
- __ bind(&non_smi);
- GenerateHeapNumberCodeSub(masm, &slow);
- __ bind(&undo);
- GenerateSmiCodeUndo(masm);
- __ bind(&slow);
- GenerateTypeTransition(masm);
- __ bind(&call_builtin);
- GenerateGenericCodeFallback(masm);
-}
-
-
-void UnaryOpStub::GenerateNumberStubBitNot(
- MacroAssembler* masm) {
- Label non_smi, slow;
- GenerateSmiCodeBitNot(masm, &non_smi, Label::kNear);
- __ bind(&non_smi);
- GenerateHeapNumberCodeBitNot(masm, &slow);
- __ bind(&slow);
- GenerateTypeTransition(masm);
-}
-
-
-void UnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm,
- Label* slow) {
- __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
- __ cmp(edx, masm->isolate()->factory()->heap_number_map());
- __ j(not_equal, slow);
-
- if (mode_ == UNARY_OVERWRITE) {
- __ xor_(FieldOperand(eax, HeapNumber::kExponentOffset),
- Immediate(HeapNumber::kSignMask)); // Flip sign.
- } else {
- __ mov(edx, eax);
- // edx: operand
-
- Label slow_allocate_heapnumber, heapnumber_allocated;
- __ AllocateHeapNumber(eax, ebx, ecx, &slow_allocate_heapnumber);
- __ jmp(&heapnumber_allocated, Label::kNear);
-
- __ bind(&slow_allocate_heapnumber);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(edx);
- __ CallRuntime(Runtime::kNumberAlloc, 0);
- __ pop(edx);
- }
-
- __ bind(&heapnumber_allocated);
- // eax: allocated 'empty' number
- __ mov(ecx, FieldOperand(edx, HeapNumber::kExponentOffset));
- __ xor_(ecx, HeapNumber::kSignMask); // Flip sign.
- __ mov(FieldOperand(eax, HeapNumber::kExponentOffset), ecx);
- __ mov(ecx, FieldOperand(edx, HeapNumber::kMantissaOffset));
- __ mov(FieldOperand(eax, HeapNumber::kMantissaOffset), ecx);
- }
- __ ret(0);
-}
-
-
-void UnaryOpStub::GenerateHeapNumberCodeBitNot(MacroAssembler* masm,
- Label* slow) {
- __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
- __ cmp(edx, masm->isolate()->factory()->heap_number_map());
- __ j(not_equal, slow);
-
- // Convert the heap number in eax to an untagged integer in ecx.
- IntegerConvert(masm, eax, CpuFeatures::IsSupported(SSE3), slow);
-
- // Do the bitwise operation and check if the result fits in a smi.
- Label try_float;
- __ not_(ecx);
- __ cmp(ecx, 0xc0000000);
- __ j(sign, &try_float, Label::kNear);
-
- // Tag the result as a smi and we're done.
- STATIC_ASSERT(kSmiTagSize == 1);
- __ lea(eax, Operand(ecx, times_2, kSmiTag));
- __ ret(0);
-
- // Try to store the result in a heap number.
- __ bind(&try_float);
- if (mode_ == UNARY_NO_OVERWRITE) {
- Label slow_allocate_heapnumber, heapnumber_allocated;
- __ mov(ebx, eax);
- __ AllocateHeapNumber(eax, edx, edi, &slow_allocate_heapnumber);
- __ jmp(&heapnumber_allocated);
-
- __ bind(&slow_allocate_heapnumber);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- // Push the original HeapNumber on the stack. The integer value can't
- // be stored since it's untagged and not in the smi range (so we can't
- // smi-tag it). We'll recalculate the value after the GC instead.
- __ push(ebx);
- __ CallRuntime(Runtime::kNumberAlloc, 0);
- // New HeapNumber is in eax.
- __ pop(edx);
- }
- // IntegerConvert uses ebx and edi as scratch registers.
- // This conversion won't go slow-case.
- IntegerConvert(masm, edx, CpuFeatures::IsSupported(SSE3), slow);
- __ not_(ecx);
-
- __ bind(&heapnumber_allocated);
- }
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope use_sse2(SSE2);
- __ cvtsi2sd(xmm0, ecx);
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
- } else {
- __ push(ecx);
- __ fild_s(Operand(esp, 0));
- __ pop(ecx);
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- }
- __ ret(0);
-}
-
-
-// TODO(svenpanne): Use virtual functions instead of switch.
-void UnaryOpStub::GenerateGenericStub(MacroAssembler* masm) {
- switch (op_) {
- case Token::SUB:
- GenerateGenericStubSub(masm);
- break;
- case Token::BIT_NOT:
- GenerateGenericStubBitNot(masm);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void UnaryOpStub::GenerateGenericStubSub(MacroAssembler* masm) {
- Label non_smi, undo, slow;
- GenerateSmiCodeSub(masm, &non_smi, &undo, &slow, Label::kNear);
- __ bind(&non_smi);
- GenerateHeapNumberCodeSub(masm, &slow);
- __ bind(&undo);
- GenerateSmiCodeUndo(masm);
- __ bind(&slow);
- GenerateGenericCodeFallback(masm);
-}
-
-
-void UnaryOpStub::GenerateGenericStubBitNot(MacroAssembler* masm) {
- Label non_smi, slow;
- GenerateSmiCodeBitNot(masm, &non_smi, Label::kNear);
- __ bind(&non_smi);
- GenerateHeapNumberCodeBitNot(masm, &slow);
- __ bind(&slow);
- GenerateGenericCodeFallback(masm);
-}
-
-
-void UnaryOpStub::GenerateGenericCodeFallback(MacroAssembler* masm) {
- // Handle the slow case by jumping to the corresponding JavaScript builtin.
- __ pop(ecx); // pop return address.
- __ push(eax);
- __ push(ecx); // push return address
- switch (op_) {
- case Token::SUB:
- __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
- break;
- case Token::BIT_NOT:
- __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void BinaryOpStub::Initialize() {
- platform_specific_bit_ = CpuFeatures::IsSupported(SSE3);
-}
-
-
-void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
- __ pop(ecx); // Save return address.
- __ push(edx);
- __ push(eax);
- // Left and right arguments are now on top.
- __ push(Immediate(Smi::FromInt(MinorKey())));
-
- __ push(ecx); // Push return address.
-
- // Patch the caller to an appropriate specialized stub and return the
- // operation result to the caller of the stub.
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
- masm->isolate()),
- 3,
- 1);
-}
-
-
-// Prepare for a type transition runtime call when the args are already on
-// the stack, under the return address.
-void BinaryOpStub::GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm) {
- __ pop(ecx); // Save return address.
- // Left and right arguments are already on top of the stack.
- __ push(Immediate(Smi::FromInt(MinorKey())));
-
- __ push(ecx); // Push return address.
-
- // Patch the caller to an appropriate specialized stub and return the
- // operation result to the caller of the stub.
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
- masm->isolate()),
- 3,
- 1);
-}
-
-
-static void BinaryOpStub_GenerateSmiCode(
- MacroAssembler* masm,
- Label* slow,
- BinaryOpStub::SmiCodeGenerateHeapNumberResults allow_heapnumber_results,
- Token::Value op) {
- // 1. Move arguments into edx, eax except for DIV and MOD, which need the
- // dividend in eax and edx free for the division. Use eax, ebx for those.
- Comment load_comment(masm, "-- Load arguments");
- Register left = edx;
- Register right = eax;
- if (op == Token::DIV || op == Token::MOD) {
- left = eax;
- right = ebx;
- __ mov(ebx, eax);
- __ mov(eax, edx);
- }
-
-
- // 2. Prepare the smi check of both operands by oring them together.
- Comment smi_check_comment(masm, "-- Smi check arguments");
- Label not_smis;
- Register combined = ecx;
- ASSERT(!left.is(combined) && !right.is(combined));
- switch (op) {
- case Token::BIT_OR:
- // Perform the operation into eax and smi check the result. Preserve
- // eax in case the result is not a smi.
- ASSERT(!left.is(ecx) && !right.is(ecx));
- __ mov(ecx, right);
- __ or_(right, left); // Bitwise or is commutative.
- combined = right;
- break;
-
- case Token::BIT_XOR:
- case Token::BIT_AND:
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- case Token::MOD:
- __ mov(combined, right);
- __ or_(combined, left);
- break;
-
- case Token::SHL:
- case Token::SAR:
- case Token::SHR:
- // Move the right operand into ecx for the shift operation, use eax
- // for the smi check register.
- ASSERT(!left.is(ecx) && !right.is(ecx));
- __ mov(ecx, right);
- __ or_(right, left);
- combined = right;
- break;
-
- default:
- break;
- }
-
- // 3. Perform the smi check of the operands.
- STATIC_ASSERT(kSmiTag == 0); // Adjust zero check if not the case.
- __ JumpIfNotSmi(combined, &not_smis);
-
- // 4. Operands are both smis, perform the operation leaving the result in
- // eax and check the result if necessary.
- Comment perform_smi(masm, "-- Perform smi operation");
- Label use_fp_on_smis;
- switch (op) {
- case Token::BIT_OR:
- // Nothing to do.
- break;
-
- case Token::BIT_XOR:
- ASSERT(right.is(eax));
- __ xor_(right, left); // Bitwise xor is commutative.
- break;
-
- case Token::BIT_AND:
- ASSERT(right.is(eax));
- __ and_(right, left); // Bitwise and is commutative.
- break;
-
- case Token::SHL:
- // Remove tags from operands (but keep sign).
- __ SmiUntag(left);
- __ SmiUntag(ecx);
- // Perform the operation.
- __ shl_cl(left);
- // Check that the *signed* result fits in a smi.
- __ cmp(left, 0xc0000000);
- __ j(sign, &use_fp_on_smis);
- // Tag the result and store it in register eax.
- __ SmiTag(left);
- __ mov(eax, left);
- break;
-
- case Token::SAR:
- // Remove tags from operands (but keep sign).
- __ SmiUntag(left);
- __ SmiUntag(ecx);
- // Perform the operation.
- __ sar_cl(left);
- // Tag the result and store it in register eax.
- __ SmiTag(left);
- __ mov(eax, left);
- break;
-
- case Token::SHR:
- // Remove tags from operands (but keep sign).
- __ SmiUntag(left);
- __ SmiUntag(ecx);
- // Perform the operation.
- __ shr_cl(left);
- // Check that the *unsigned* result fits in a smi.
- // Neither of the two high-order bits can be set:
- // - 0x80000000: high bit would be lost when smi tagging.
- // - 0x40000000: this number would convert to negative when
- // Smi tagging these two cases can only happen with shifts
- // by 0 or 1 when handed a valid smi.
- __ test(left, Immediate(0xc0000000));
- __ j(not_zero, &use_fp_on_smis);
- // Tag the result and store it in register eax.
- __ SmiTag(left);
- __ mov(eax, left);
- break;
-
- case Token::ADD:
- ASSERT(right.is(eax));
- __ add(right, left); // Addition is commutative.
- __ j(overflow, &use_fp_on_smis);
- break;
-
- case Token::SUB:
- __ sub(left, right);
- __ j(overflow, &use_fp_on_smis);
- __ mov(eax, left);
- break;
-
- case Token::MUL:
- // If the smi tag is 0 we can just leave the tag on one operand.
- STATIC_ASSERT(kSmiTag == 0); // Adjust code below if not the case.
- // We can't revert the multiplication if the result is not a smi
- // so save the right operand.
- __ mov(ebx, right);
- // Remove tag from one of the operands (but keep sign).
- __ SmiUntag(right);
- // Do multiplication.
- __ imul(right, left); // Multiplication is commutative.
- __ j(overflow, &use_fp_on_smis);
- // Check for negative zero result. Use combined = left | right.
- __ NegativeZeroTest(right, combined, &use_fp_on_smis);
- break;
-
- case Token::DIV:
- // We can't revert the division if the result is not a smi so
- // save the left operand.
- __ mov(edi, left);
- // Check for 0 divisor.
- __ test(right, right);
- __ j(zero, &use_fp_on_smis);
- // Sign extend left into edx:eax.
- ASSERT(left.is(eax));
- __ cdq();
- // Divide edx:eax by right.
- __ idiv(right);
- // Check for the corner case of dividing the most negative smi by
- // -1. We cannot use the overflow flag, since it is not set by idiv
- // instruction.
- STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
- __ cmp(eax, 0x40000000);
- __ j(equal, &use_fp_on_smis);
- // Check for negative zero result. Use combined = left | right.
- __ NegativeZeroTest(eax, combined, &use_fp_on_smis);
- // Check that the remainder is zero.
- __ test(edx, edx);
- __ j(not_zero, &use_fp_on_smis);
- // Tag the result and store it in register eax.
- __ SmiTag(eax);
- break;
-
- case Token::MOD:
- // Check for 0 divisor.
- __ test(right, right);
- __ j(zero, &not_smis);
-
- // Sign extend left into edx:eax.
- ASSERT(left.is(eax));
- __ cdq();
- // Divide edx:eax by right.
- __ idiv(right);
- // Check for negative zero result. Use combined = left | right.
- __ NegativeZeroTest(edx, combined, slow);
- // Move remainder to register eax.
- __ mov(eax, edx);
- break;
-
- default:
- UNREACHABLE();
- }
-
- // 5. Emit return of result in eax. Some operations have registers pushed.
- switch (op) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- __ ret(0);
- break;
- case Token::MOD:
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SAR:
- case Token::SHL:
- case Token::SHR:
- __ ret(2 * kPointerSize);
- break;
- default:
- UNREACHABLE();
- }
-
- // 6. For some operations emit inline code to perform floating point
- // operations on known smis (e.g., if the result of the operation
- // overflowed the smi range).
- if (allow_heapnumber_results == BinaryOpStub::NO_HEAPNUMBER_RESULTS) {
- __ bind(&use_fp_on_smis);
- switch (op) {
- // Undo the effects of some operations, and some register moves.
- case Token::SHL:
- // The arguments are saved on the stack, and only used from there.
- break;
- case Token::ADD:
- // Revert right = right + left.
- __ sub(right, left);
- break;
- case Token::SUB:
- // Revert left = left - right.
- __ add(left, right);
- break;
- case Token::MUL:
- // Right was clobbered but a copy is in ebx.
- __ mov(right, ebx);
- break;
- case Token::DIV:
- // Left was clobbered but a copy is in edi. Right is in ebx for
- // division. They should be in eax, ebx for jump to not_smi.
- __ mov(eax, edi);
- break;
- default:
- // No other operators jump to use_fp_on_smis.
- break;
- }
- __ jmp(&not_smis);
- } else {
- ASSERT(allow_heapnumber_results == BinaryOpStub::ALLOW_HEAPNUMBER_RESULTS);
- switch (op) {
- case Token::SHL:
- case Token::SHR: {
- Comment perform_float(masm, "-- Perform float operation on smis");
- __ bind(&use_fp_on_smis);
- // Result we want is in left == edx, so we can put the allocated heap
- // number in eax.
- __ AllocateHeapNumber(eax, ecx, ebx, slow);
- // Store the result in the HeapNumber and return.
- // It's OK to overwrite the arguments on the stack because we
- // are about to return.
- if (op == Token::SHR) {
- __ mov(Operand(esp, 1 * kPointerSize), left);
- __ mov(Operand(esp, 2 * kPointerSize), Immediate(0));
- __ fild_d(Operand(esp, 1 * kPointerSize));
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- } else {
- ASSERT_EQ(Token::SHL, op);
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope use_sse2(SSE2);
- __ cvtsi2sd(xmm0, left);
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
- } else {
- __ mov(Operand(esp, 1 * kPointerSize), left);
- __ fild_s(Operand(esp, 1 * kPointerSize));
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- }
- }
- __ ret(2 * kPointerSize);
- break;
- }
-
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV: {
- Comment perform_float(masm, "-- Perform float operation on smis");
- __ bind(&use_fp_on_smis);
- // Restore arguments to edx, eax.
- switch (op) {
- case Token::ADD:
- // Revert right = right + left.
- __ sub(right, left);
- break;
- case Token::SUB:
- // Revert left = left - right.
- __ add(left, right);
- break;
- case Token::MUL:
- // Right was clobbered but a copy is in ebx.
- __ mov(right, ebx);
- break;
- case Token::DIV:
- // Left was clobbered but a copy is in edi. Right is in ebx for
- // division.
- __ mov(edx, edi);
- __ mov(eax, right);
- break;
- default: UNREACHABLE();
- break;
- }
- __ AllocateHeapNumber(ecx, ebx, no_reg, slow);
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope use_sse2(SSE2);
- FloatingPointHelper::LoadSSE2Smis(masm, ebx);
- switch (op) {
- case Token::ADD: __ addsd(xmm0, xmm1); break;
- case Token::SUB: __ subsd(xmm0, xmm1); break;
- case Token::MUL: __ mulsd(xmm0, xmm1); break;
- case Token::DIV: __ divsd(xmm0, xmm1); break;
- default: UNREACHABLE();
- }
- __ movdbl(FieldOperand(ecx, HeapNumber::kValueOffset), xmm0);
- } else { // SSE2 not available, use FPU.
- FloatingPointHelper::LoadFloatSmis(masm, ebx);
- switch (op) {
- case Token::ADD: __ faddp(1); break;
- case Token::SUB: __ fsubp(1); break;
- case Token::MUL: __ fmulp(1); break;
- case Token::DIV: __ fdivp(1); break;
- default: UNREACHABLE();
- }
- __ fstp_d(FieldOperand(ecx, HeapNumber::kValueOffset));
- }
- __ mov(eax, ecx);
- __ ret(0);
- break;
- }
-
- default:
- break;
- }
- }
-
- // 7. Non-smi operands, fall out to the non-smi code with the operands in
- // edx and eax.
- Comment done_comment(masm, "-- Enter non-smi code");
- __ bind(&not_smis);
- switch (op) {
- case Token::BIT_OR:
- case Token::SHL:
- case Token::SAR:
- case Token::SHR:
- // Right operand is saved in ecx and eax was destroyed by the smi
- // check.
- __ mov(eax, ecx);
- break;
-
- case Token::DIV:
- case Token::MOD:
- // Operands are in eax, ebx at this point.
- __ mov(edx, eax);
- __ mov(eax, ebx);
- break;
-
- default:
- break;
- }
-}
-
-
-void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
- Label call_runtime;
-
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- break;
- case Token::MOD:
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SAR:
- case Token::SHL:
- case Token::SHR:
- GenerateRegisterArgsPush(masm);
- break;
- default:
- UNREACHABLE();
- }
-
- if (result_type_ == BinaryOpIC::UNINITIALIZED ||
- result_type_ == BinaryOpIC::SMI) {
- BinaryOpStub_GenerateSmiCode(
- masm, &call_runtime, NO_HEAPNUMBER_RESULTS, op_);
- } else {
- BinaryOpStub_GenerateSmiCode(
- masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS, op_);
- }
- __ bind(&call_runtime);
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- GenerateTypeTransition(masm);
- break;
- case Token::MOD:
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SAR:
- case Token::SHL:
- case Token::SHR:
- GenerateTypeTransitionWithSavedArgs(masm);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
- Label call_runtime;
- ASSERT(left_type_ == BinaryOpIC::STRING && right_type_ == BinaryOpIC::STRING);
- ASSERT(op_ == Token::ADD);
- // If both arguments are strings, call the string add stub.
- // Otherwise, do a transition.
-
- // Registers containing left and right operands respectively.
- Register left = edx;
- Register right = eax;
-
- // Test if left operand is a string.
- __ JumpIfSmi(left, &call_runtime, Label::kNear);
- __ CmpObjectType(left, FIRST_NONSTRING_TYPE, ecx);
- __ j(above_equal, &call_runtime, Label::kNear);
-
- // Test if right operand is a string.
- __ JumpIfSmi(right, &call_runtime, Label::kNear);
- __ CmpObjectType(right, FIRST_NONSTRING_TYPE, ecx);
- __ j(above_equal, &call_runtime, Label::kNear);
-
- StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
- GenerateRegisterArgsPush(masm);
- __ TailCallStub(&string_add_stub);
-
- __ bind(&call_runtime);
- GenerateTypeTransition(masm);
-}
-
-
-static void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm,
- Label* alloc_failure,
- OverwriteMode mode);
-
-
-// Input:
-// edx: left operand (tagged)
-// eax: right operand (tagged)
-// Output:
-// eax: result (tagged)
-void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
- Label call_runtime;
- ASSERT(Max(left_type_, right_type_) == BinaryOpIC::INT32);
-
- // Floating point case.
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- case Token::MOD: {
- Label not_floats;
- Label not_int32;
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope use_sse2(SSE2);
- // It could be that only SMIs have been seen at either the left
- // or the right operand. For precise type feedback, patch the IC
- // again if this changes.
- // In theory, we would need the same check in the non-SSE2 case,
- // but since we don't support Crankshaft on such hardware we can
- // afford not to care about precise type feedback.
- if (left_type_ == BinaryOpIC::SMI) {
- __ JumpIfNotSmi(edx, &not_int32);
- }
- if (right_type_ == BinaryOpIC::SMI) {
- __ JumpIfNotSmi(eax, &not_int32);
- }
- FloatingPointHelper::LoadSSE2Operands(masm, &not_floats);
- FloatingPointHelper::CheckSSE2OperandsAreInt32(masm, &not_int32, ecx);
- if (op_ == Token::MOD) {
- GenerateRegisterArgsPush(masm);
- __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
- } else {
- switch (op_) {
- case Token::ADD: __ addsd(xmm0, xmm1); break;
- case Token::SUB: __ subsd(xmm0, xmm1); break;
- case Token::MUL: __ mulsd(xmm0, xmm1); break;
- case Token::DIV: __ divsd(xmm0, xmm1); break;
- default: UNREACHABLE();
- }
- // Check result type if it is currently Int32.
- if (result_type_ <= BinaryOpIC::INT32) {
- FloatingPointHelper::CheckSSE2OperandIsInt32(
- masm, &not_int32, xmm0, ecx, ecx, xmm2);
- }
- BinaryOpStub_GenerateHeapResultAllocation(masm, &call_runtime, mode_);
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
- __ ret(0);
- }
- } else { // SSE2 not available, use FPU.
- FloatingPointHelper::CheckFloatOperands(masm, &not_floats, ebx);
- FloatingPointHelper::LoadFloatOperands(
- masm,
- ecx,
- FloatingPointHelper::ARGS_IN_REGISTERS);
- if (op_ == Token::MOD) {
- // The operands are now on the FPU stack, but we don't need them.
- __ fstp(0);
- __ fstp(0);
- GenerateRegisterArgsPush(masm);
- __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
- } else {
- switch (op_) {
- case Token::ADD: __ faddp(1); break;
- case Token::SUB: __ fsubp(1); break;
- case Token::MUL: __ fmulp(1); break;
- case Token::DIV: __ fdivp(1); break;
- default: UNREACHABLE();
- }
- Label after_alloc_failure;
- BinaryOpStub_GenerateHeapResultAllocation(
- masm, &after_alloc_failure, mode_);
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- __ ret(0);
- __ bind(&after_alloc_failure);
- __ fstp(0); // Pop FPU stack before calling runtime.
- __ jmp(&call_runtime);
- }
- }
-
- __ bind(&not_floats);
- __ bind(&not_int32);
- GenerateTypeTransition(masm);
- break;
- }
-
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SAR:
- case Token::SHL:
- case Token::SHR: {
- GenerateRegisterArgsPush(masm);
- Label not_floats;
- Label not_int32;
- Label non_smi_result;
- bool use_sse3 = platform_specific_bit_;
- FloatingPointHelper::LoadUnknownsAsIntegers(
- masm, use_sse3, left_type_, right_type_, &not_floats);
- switch (op_) {
- case Token::BIT_OR: __ or_(eax, ecx); break;
- case Token::BIT_AND: __ and_(eax, ecx); break;
- case Token::BIT_XOR: __ xor_(eax, ecx); break;
- case Token::SAR: __ sar_cl(eax); break;
- case Token::SHL: __ shl_cl(eax); break;
- case Token::SHR: __ shr_cl(eax); break;
- default: UNREACHABLE();
- }
- if (op_ == Token::SHR) {
- // Check if result is non-negative and fits in a smi.
- __ test(eax, Immediate(0xc0000000));
- __ j(not_zero, &call_runtime);
- } else {
- // Check if result fits in a smi.
- __ cmp(eax, 0xc0000000);
- __ j(negative, &non_smi_result, Label::kNear);
- }
- // Tag smi result and return.
- __ SmiTag(eax);
- __ ret(2 * kPointerSize); // Drop two pushed arguments from the stack.
-
- // All ops except SHR return a signed int32 that we load in
- // a HeapNumber.
- if (op_ != Token::SHR) {
- __ bind(&non_smi_result);
- // Allocate a heap number if needed.
- __ mov(ebx, eax); // ebx: result
- Label skip_allocation;
- switch (mode_) {
- case OVERWRITE_LEFT:
- case OVERWRITE_RIGHT:
- // If the operand was an object, we skip the
- // allocation of a heap number.
- __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ?
- 1 * kPointerSize : 2 * kPointerSize));
- __ JumpIfNotSmi(eax, &skip_allocation, Label::kNear);
- // Fall through!
- case NO_OVERWRITE:
- __ AllocateHeapNumber(eax, ecx, edx, &call_runtime);
- __ bind(&skip_allocation);
- break;
- default: UNREACHABLE();
- }
- // Store the result in the HeapNumber and return.
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope use_sse2(SSE2);
- __ cvtsi2sd(xmm0, ebx);
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
- } else {
- __ mov(Operand(esp, 1 * kPointerSize), ebx);
- __ fild_s(Operand(esp, 1 * kPointerSize));
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- }
- __ ret(2 * kPointerSize); // Drop two pushed arguments from the stack.
- }
-
- __ bind(&not_floats);
- __ bind(&not_int32);
- GenerateTypeTransitionWithSavedArgs(masm);
- break;
- }
- default: UNREACHABLE(); break;
- }
-
- // If an allocation fails, or SHR hits a hard case, use the runtime system to
- // get the correct result.
- __ bind(&call_runtime);
-
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- GenerateRegisterArgsPush(masm);
- break;
- case Token::MOD:
- return; // Handled above.
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SAR:
- case Token::SHL:
- case Token::SHR:
- break;
- default:
- UNREACHABLE();
- }
- GenerateCallRuntime(masm);
-}
-
-
-void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
- if (op_ == Token::ADD) {
- // Handle string addition here, because it is the only operation
- // that does not do a ToNumber conversion on the operands.
- GenerateAddStrings(masm);
- }
-
- Factory* factory = masm->isolate()->factory();
-
- // Convert odd ball arguments to numbers.
- Label check, done;
- __ cmp(edx, factory->undefined_value());
- __ j(not_equal, &check, Label::kNear);
- if (Token::IsBitOp(op_)) {
- __ xor_(edx, edx);
- } else {
- __ mov(edx, Immediate(factory->nan_value()));
- }
- __ jmp(&done, Label::kNear);
- __ bind(&check);
- __ cmp(eax, factory->undefined_value());
- __ j(not_equal, &done, Label::kNear);
- if (Token::IsBitOp(op_)) {
- __ xor_(eax, eax);
- } else {
- __ mov(eax, Immediate(factory->nan_value()));
- }
- __ bind(&done);
-
- GenerateNumberStub(masm);
-}
-
-
-void BinaryOpStub::GenerateNumberStub(MacroAssembler* masm) {
- Label call_runtime;
-
- // Floating point case.
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV: {
- Label not_floats;
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope use_sse2(SSE2);
-
- // It could be that only SMIs have been seen at either the left
- // or the right operand. For precise type feedback, patch the IC
- // again if this changes.
- // In theory, we would need the same check in the non-SSE2 case,
- // but since we don't support Crankshaft on such hardware we can
- // afford not to care about precise type feedback.
- if (left_type_ == BinaryOpIC::SMI) {
- __ JumpIfNotSmi(edx, &not_floats);
- }
- if (right_type_ == BinaryOpIC::SMI) {
- __ JumpIfNotSmi(eax, &not_floats);
- }
- FloatingPointHelper::LoadSSE2Operands(masm, &not_floats);
- if (left_type_ == BinaryOpIC::INT32) {
- FloatingPointHelper::CheckSSE2OperandIsInt32(
- masm, &not_floats, xmm0, ecx, ecx, xmm2);
- }
- if (right_type_ == BinaryOpIC::INT32) {
- FloatingPointHelper::CheckSSE2OperandIsInt32(
- masm, &not_floats, xmm1, ecx, ecx, xmm2);
- }
-
- switch (op_) {
- case Token::ADD: __ addsd(xmm0, xmm1); break;
- case Token::SUB: __ subsd(xmm0, xmm1); break;
- case Token::MUL: __ mulsd(xmm0, xmm1); break;
- case Token::DIV: __ divsd(xmm0, xmm1); break;
- default: UNREACHABLE();
- }
- BinaryOpStub_GenerateHeapResultAllocation(masm, &call_runtime, mode_);
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
- __ ret(0);
- } else { // SSE2 not available, use FPU.
- FloatingPointHelper::CheckFloatOperands(masm, &not_floats, ebx);
- FloatingPointHelper::LoadFloatOperands(
- masm,
- ecx,
- FloatingPointHelper::ARGS_IN_REGISTERS);
- switch (op_) {
- case Token::ADD: __ faddp(1); break;
- case Token::SUB: __ fsubp(1); break;
- case Token::MUL: __ fmulp(1); break;
- case Token::DIV: __ fdivp(1); break;
- default: UNREACHABLE();
- }
- Label after_alloc_failure;
- BinaryOpStub_GenerateHeapResultAllocation(
- masm, &after_alloc_failure, mode_);
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- __ ret(0);
- __ bind(&after_alloc_failure);
- __ fstp(0); // Pop FPU stack before calling runtime.
- __ jmp(&call_runtime);
- }
-
- __ bind(&not_floats);
- GenerateTypeTransition(masm);
- break;
- }
-
- case Token::MOD: {
- // For MOD we go directly to runtime in the non-smi case.
- break;
- }
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SAR:
- case Token::SHL:
- case Token::SHR: {
- GenerateRegisterArgsPush(masm);
- Label not_floats;
- Label non_smi_result;
- // We do not check the input arguments here, as any value is
- // unconditionally truncated to an int32 anyway. To get the
- // right optimized code, int32 type feedback is just right.
- bool use_sse3 = platform_specific_bit_;
- FloatingPointHelper::LoadUnknownsAsIntegers(
- masm, use_sse3, left_type_, right_type_, &not_floats);
- switch (op_) {
- case Token::BIT_OR: __ or_(eax, ecx); break;
- case Token::BIT_AND: __ and_(eax, ecx); break;
- case Token::BIT_XOR: __ xor_(eax, ecx); break;
- case Token::SAR: __ sar_cl(eax); break;
- case Token::SHL: __ shl_cl(eax); break;
- case Token::SHR: __ shr_cl(eax); break;
- default: UNREACHABLE();
- }
- if (op_ == Token::SHR) {
- // Check if result is non-negative and fits in a smi.
- __ test(eax, Immediate(0xc0000000));
- __ j(not_zero, &call_runtime);
- } else {
- // Check if result fits in a smi.
- __ cmp(eax, 0xc0000000);
- __ j(negative, &non_smi_result, Label::kNear);
- }
- // Tag smi result and return.
- __ SmiTag(eax);
- __ ret(2 * kPointerSize); // Drop two pushed arguments from the stack.
-
- // All ops except SHR return a signed int32 that we load in
- // a HeapNumber.
- if (op_ != Token::SHR) {
- __ bind(&non_smi_result);
- // Allocate a heap number if needed.
- __ mov(ebx, eax); // ebx: result
- Label skip_allocation;
- switch (mode_) {
- case OVERWRITE_LEFT:
- case OVERWRITE_RIGHT:
- // If the operand was an object, we skip the
- // allocation of a heap number.
- __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ?
- 1 * kPointerSize : 2 * kPointerSize));
- __ JumpIfNotSmi(eax, &skip_allocation, Label::kNear);
- // Fall through!
- case NO_OVERWRITE:
- __ AllocateHeapNumber(eax, ecx, edx, &call_runtime);
- __ bind(&skip_allocation);
- break;
- default: UNREACHABLE();
- }
- // Store the result in the HeapNumber and return.
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope use_sse2(SSE2);
- __ cvtsi2sd(xmm0, ebx);
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
- } else {
- __ mov(Operand(esp, 1 * kPointerSize), ebx);
- __ fild_s(Operand(esp, 1 * kPointerSize));
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- }
- __ ret(2 * kPointerSize); // Drop two pushed arguments from the stack.
- }
-
- __ bind(&not_floats);
- GenerateTypeTransitionWithSavedArgs(masm);
- break;
- }
- default: UNREACHABLE(); break;
- }
-
- // If an allocation fails, or SHR or MOD hit a hard case,
- // use the runtime system to get the correct result.
- __ bind(&call_runtime);
-
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- case Token::MOD:
- GenerateRegisterArgsPush(masm);
- break;
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SAR:
- case Token::SHL:
- case Token::SHR:
- break;
- default:
- UNREACHABLE();
- }
- GenerateCallRuntime(masm);
-}
-
-
-void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
- Label call_runtime;
-
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->generic_binary_stub_calls(), 1);
-
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- break;
- case Token::MOD:
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SAR:
- case Token::SHL:
- case Token::SHR:
- GenerateRegisterArgsPush(masm);
- break;
- default:
- UNREACHABLE();
- }
-
- BinaryOpStub_GenerateSmiCode(
- masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS, op_);
-
- // Floating point case.
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV: {
- Label not_floats;
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope use_sse2(SSE2);
- FloatingPointHelper::LoadSSE2Operands(masm, &not_floats);
-
- switch (op_) {
- case Token::ADD: __ addsd(xmm0, xmm1); break;
- case Token::SUB: __ subsd(xmm0, xmm1); break;
- case Token::MUL: __ mulsd(xmm0, xmm1); break;
- case Token::DIV: __ divsd(xmm0, xmm1); break;
- default: UNREACHABLE();
- }
- BinaryOpStub_GenerateHeapResultAllocation(masm, &call_runtime, mode_);
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
- __ ret(0);
- } else { // SSE2 not available, use FPU.
- FloatingPointHelper::CheckFloatOperands(masm, &not_floats, ebx);
- FloatingPointHelper::LoadFloatOperands(
- masm,
- ecx,
- FloatingPointHelper::ARGS_IN_REGISTERS);
- switch (op_) {
- case Token::ADD: __ faddp(1); break;
- case Token::SUB: __ fsubp(1); break;
- case Token::MUL: __ fmulp(1); break;
- case Token::DIV: __ fdivp(1); break;
- default: UNREACHABLE();
- }
- Label after_alloc_failure;
- BinaryOpStub_GenerateHeapResultAllocation(
- masm, &after_alloc_failure, mode_);
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- __ ret(0);
- __ bind(&after_alloc_failure);
- __ fstp(0); // Pop FPU stack before calling runtime.
- __ jmp(&call_runtime);
- }
- __ bind(&not_floats);
- break;
- }
- case Token::MOD: {
- // For MOD we go directly to runtime in the non-smi case.
- break;
- }
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SAR:
- case Token::SHL:
- case Token::SHR: {
- Label non_smi_result;
- bool use_sse3 = platform_specific_bit_;
- FloatingPointHelper::LoadUnknownsAsIntegers(masm,
- use_sse3,
- BinaryOpIC::GENERIC,
- BinaryOpIC::GENERIC,
- &call_runtime);
- switch (op_) {
- case Token::BIT_OR: __ or_(eax, ecx); break;
- case Token::BIT_AND: __ and_(eax, ecx); break;
- case Token::BIT_XOR: __ xor_(eax, ecx); break;
- case Token::SAR: __ sar_cl(eax); break;
- case Token::SHL: __ shl_cl(eax); break;
- case Token::SHR: __ shr_cl(eax); break;
- default: UNREACHABLE();
- }
- if (op_ == Token::SHR) {
- // Check if result is non-negative and fits in a smi.
- __ test(eax, Immediate(0xc0000000));
- __ j(not_zero, &call_runtime);
- } else {
- // Check if result fits in a smi.
- __ cmp(eax, 0xc0000000);
- __ j(negative, &non_smi_result, Label::kNear);
- }
- // Tag smi result and return.
- __ SmiTag(eax);
- __ ret(2 * kPointerSize); // Drop the arguments from the stack.
-
- // All ops except SHR return a signed int32 that we load in
- // a HeapNumber.
- if (op_ != Token::SHR) {
- __ bind(&non_smi_result);
- // Allocate a heap number if needed.
- __ mov(ebx, eax); // ebx: result
- Label skip_allocation;
- switch (mode_) {
- case OVERWRITE_LEFT:
- case OVERWRITE_RIGHT:
- // If the operand was an object, we skip the
- // allocation of a heap number.
- __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ?
- 1 * kPointerSize : 2 * kPointerSize));
- __ JumpIfNotSmi(eax, &skip_allocation, Label::kNear);
- // Fall through!
- case NO_OVERWRITE:
- __ AllocateHeapNumber(eax, ecx, edx, &call_runtime);
- __ bind(&skip_allocation);
- break;
- default: UNREACHABLE();
- }
- // Store the result in the HeapNumber and return.
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope use_sse2(SSE2);
- __ cvtsi2sd(xmm0, ebx);
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
- } else {
- __ mov(Operand(esp, 1 * kPointerSize), ebx);
- __ fild_s(Operand(esp, 1 * kPointerSize));
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- }
- __ ret(2 * kPointerSize);
- }
- break;
- }
- default: UNREACHABLE(); break;
- }
-
- // If all else fails, use the runtime system to get the correct
- // result.
- __ bind(&call_runtime);
- switch (op_) {
- case Token::ADD:
- GenerateAddStrings(masm);
- // Fall through.
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- GenerateRegisterArgsPush(masm);
- break;
- case Token::MOD:
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SAR:
- case Token::SHL:
- case Token::SHR:
- break;
- default:
- UNREACHABLE();
- }
- GenerateCallRuntime(masm);
-}
-
-
-void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
- ASSERT(op_ == Token::ADD);
- Label left_not_string, call_runtime;
-
- // Registers containing left and right operands respectively.
- Register left = edx;
- Register right = eax;
-
- // Test if left operand is a string.
- __ JumpIfSmi(left, &left_not_string, Label::kNear);
- __ CmpObjectType(left, FIRST_NONSTRING_TYPE, ecx);
- __ j(above_equal, &left_not_string, Label::kNear);
-
- StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB);
- GenerateRegisterArgsPush(masm);
- __ TailCallStub(&string_add_left_stub);
-
- // Left operand is not a string, test right.
- __ bind(&left_not_string);
- __ JumpIfSmi(right, &call_runtime, Label::kNear);
- __ CmpObjectType(right, FIRST_NONSTRING_TYPE, ecx);
- __ j(above_equal, &call_runtime, Label::kNear);
-
- StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB);
- GenerateRegisterArgsPush(masm);
- __ TailCallStub(&string_add_right_stub);
-
- // Neither argument is a string.
- __ bind(&call_runtime);
-}
-
-
-static void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm,
- Label* alloc_failure,
- OverwriteMode mode) {
- Label skip_allocation;
- switch (mode) {
- case OVERWRITE_LEFT: {
- // If the argument in edx is already an object, we skip the
- // allocation of a heap number.
- __ JumpIfNotSmi(edx, &skip_allocation, Label::kNear);
- // Allocate a heap number for the result. Keep eax and edx intact
- // for the possible runtime call.
- __ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure);
- // Now edx can be overwritten losing one of the arguments as we are
- // now done and will not need it any more.
- __ mov(edx, ebx);
- __ bind(&skip_allocation);
- // Use object in edx as a result holder
- __ mov(eax, edx);
- break;
- }
- case OVERWRITE_RIGHT:
- // If the argument in eax is already an object, we skip the
- // allocation of a heap number.
- __ JumpIfNotSmi(eax, &skip_allocation, Label::kNear);
- // Fall through!
- case NO_OVERWRITE:
- // Allocate a heap number for the result. Keep eax and edx intact
- // for the possible runtime call.
- __ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure);
- // Now eax can be overwritten losing one of the arguments as we are
- // now done and will not need it any more.
- __ mov(eax, ebx);
- __ bind(&skip_allocation);
- break;
- default: UNREACHABLE();
- }
-}
-
-
-void BinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
- __ pop(ecx);
- __ push(edx);
- __ push(eax);
- __ push(ecx);
-}
-
-
-void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
- // TAGGED case:
- // Input:
- // esp[4]: tagged number input argument (should be number).
- // esp[0]: return address.
- // Output:
- // eax: tagged double result.
- // UNTAGGED case:
- // Input::
- // esp[0]: return address.
- // xmm1: untagged double input argument
- // Output:
- // xmm1: untagged double result.
-
- Label runtime_call;
- Label runtime_call_clear_stack;
- Label skip_cache;
- const bool tagged = (argument_type_ == TAGGED);
- if (tagged) {
- // Test that eax is a number.
- Label input_not_smi;
- Label loaded;
- __ mov(eax, Operand(esp, kPointerSize));
- __ JumpIfNotSmi(eax, &input_not_smi, Label::kNear);
- // Input is a smi. Untag and load it onto the FPU stack.
- // Then load the low and high words of the double into ebx, edx.
- STATIC_ASSERT(kSmiTagSize == 1);
- __ sar(eax, 1);
- __ sub(esp, Immediate(2 * kPointerSize));
- __ mov(Operand(esp, 0), eax);
- __ fild_s(Operand(esp, 0));
- __ fst_d(Operand(esp, 0));
- __ pop(edx);
- __ pop(ebx);
- __ jmp(&loaded, Label::kNear);
- __ bind(&input_not_smi);
- // Check if input is a HeapNumber.
- __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
- Factory* factory = masm->isolate()->factory();
- __ cmp(ebx, Immediate(factory->heap_number_map()));
- __ j(not_equal, &runtime_call);
- // Input is a HeapNumber. Push it on the FPU stack and load its
- // low and high words into ebx, edx.
- __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
- __ mov(edx, FieldOperand(eax, HeapNumber::kExponentOffset));
- __ mov(ebx, FieldOperand(eax, HeapNumber::kMantissaOffset));
-
- __ bind(&loaded);
- } else { // UNTAGGED.
- CpuFeatures::Scope scope(SSE2);
- if (CpuFeatures::IsSupported(SSE4_1)) {
- CpuFeatures::Scope sse4_scope(SSE4_1);
- __ pextrd(edx, xmm1, 0x1); // copy xmm1[63..32] to edx.
- } else {
- __ pshufd(xmm0, xmm1, 0x1);
- __ movd(edx, xmm0);
- }
- __ movd(ebx, xmm1);
- }
-
- // ST[0] or xmm1 == double value
- // ebx = low 32 bits of double value
- // edx = high 32 bits of double value
- // Compute hash (the shifts are arithmetic):
- // h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1);
- __ mov(ecx, ebx);
- __ xor_(ecx, edx);
- __ mov(eax, ecx);
- __ sar(eax, 16);
- __ xor_(ecx, eax);
- __ mov(eax, ecx);
- __ sar(eax, 8);
- __ xor_(ecx, eax);
- ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize));
- __ and_(ecx,
- Immediate(TranscendentalCache::SubCache::kCacheSize - 1));
-
- // ST[0] or xmm1 == double value.
- // ebx = low 32 bits of double value.
- // edx = high 32 bits of double value.
- // ecx = TranscendentalCache::hash(double value).
- ExternalReference cache_array =
- ExternalReference::transcendental_cache_array_address(masm->isolate());
- __ mov(eax, Immediate(cache_array));
- int cache_array_index =
- type_ * sizeof(masm->isolate()->transcendental_cache()->caches_[0]);
- __ mov(eax, Operand(eax, cache_array_index));
- // Eax points to the cache for the type type_.
- // If NULL, the cache hasn't been initialized yet, so go through runtime.
- __ test(eax, eax);
- __ j(zero, &runtime_call_clear_stack);
-#ifdef DEBUG
- // Check that the layout of cache elements match expectations.
- { TranscendentalCache::SubCache::Element test_elem[2];
- char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
- char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
- char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0]));
- char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1]));
- char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output));
- CHECK_EQ(12, elem2_start - elem_start); // Two uint_32's and a pointer.
- CHECK_EQ(0, elem_in0 - elem_start);
- CHECK_EQ(kIntSize, elem_in1 - elem_start);
- CHECK_EQ(2 * kIntSize, elem_out - elem_start);
- }
-#endif
- // Find the address of the ecx'th entry in the cache, i.e., &eax[ecx*12].
- __ lea(ecx, Operand(ecx, ecx, times_2, 0));
- __ lea(ecx, Operand(eax, ecx, times_4, 0));
- // Check if cache matches: Double value is stored in uint32_t[2] array.
- Label cache_miss;
- __ cmp(ebx, Operand(ecx, 0));
- __ j(not_equal, &cache_miss, Label::kNear);
- __ cmp(edx, Operand(ecx, kIntSize));
- __ j(not_equal, &cache_miss, Label::kNear);
- // Cache hit!
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->transcendental_cache_hit(), 1);
- __ mov(eax, Operand(ecx, 2 * kIntSize));
- if (tagged) {
- __ fstp(0);
- __ ret(kPointerSize);
- } else { // UNTAGGED.
- CpuFeatures::Scope scope(SSE2);
- __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
- __ Ret();
- }
-
- __ bind(&cache_miss);
- __ IncrementCounter(counters->transcendental_cache_miss(), 1);
- // Update cache with new value.
- // We are short on registers, so use no_reg as scratch.
- // This gives slightly larger code.
- if (tagged) {
- __ AllocateHeapNumber(eax, edi, no_reg, &runtime_call_clear_stack);
- } else { // UNTAGGED.
- CpuFeatures::Scope scope(SSE2);
- __ AllocateHeapNumber(eax, edi, no_reg, &skip_cache);
- __ sub(esp, Immediate(kDoubleSize));
- __ movdbl(Operand(esp, 0), xmm1);
- __ fld_d(Operand(esp, 0));
- __ add(esp, Immediate(kDoubleSize));
- }
- GenerateOperation(masm, type_);
- __ mov(Operand(ecx, 0), ebx);
- __ mov(Operand(ecx, kIntSize), edx);
- __ mov(Operand(ecx, 2 * kIntSize), eax);
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- if (tagged) {
- __ ret(kPointerSize);
- } else { // UNTAGGED.
- CpuFeatures::Scope scope(SSE2);
- __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
- __ Ret();
-
- // Skip cache and return answer directly, only in untagged case.
- __ bind(&skip_cache);
- __ sub(esp, Immediate(kDoubleSize));
- __ movdbl(Operand(esp, 0), xmm1);
- __ fld_d(Operand(esp, 0));
- GenerateOperation(masm, type_);
- __ fstp_d(Operand(esp, 0));
- __ movdbl(xmm1, Operand(esp, 0));
- __ add(esp, Immediate(kDoubleSize));
- // We return the value in xmm1 without adding it to the cache, but
- // we cause a scavenging GC so that future allocations will succeed.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- // Allocate an unused object bigger than a HeapNumber.
- __ push(Immediate(Smi::FromInt(2 * kDoubleSize)));
- __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
- }
- __ Ret();
- }
-
- // Call runtime, doing whatever allocation and cleanup is necessary.
- if (tagged) {
- __ bind(&runtime_call_clear_stack);
- __ fstp(0);
- __ bind(&runtime_call);
- ExternalReference runtime =
- ExternalReference(RuntimeFunction(), masm->isolate());
- __ TailCallExternalReference(runtime, 1, 1);
- } else { // UNTAGGED.
- CpuFeatures::Scope scope(SSE2);
- __ bind(&runtime_call_clear_stack);
- __ bind(&runtime_call);
- __ AllocateHeapNumber(eax, edi, no_reg, &skip_cache);
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm1);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(eax);
- __ CallRuntime(RuntimeFunction(), 1);
- }
- __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
- __ Ret();
- }
-}
-
-
-Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
- switch (type_) {
- case TranscendentalCache::SIN: return Runtime::kMath_sin;
- case TranscendentalCache::COS: return Runtime::kMath_cos;
- case TranscendentalCache::TAN: return Runtime::kMath_tan;
- case TranscendentalCache::LOG: return Runtime::kMath_log;
- default:
- UNIMPLEMENTED();
- return Runtime::kAbort;
- }
-}
-
-
-void TranscendentalCacheStub::GenerateOperation(
- MacroAssembler* masm, TranscendentalCache::Type type) {
- // Only free register is edi.
- // Input value is on FP stack, and also in ebx/edx.
- // Input value is possibly in xmm1.
- // Address of result (a newly allocated HeapNumber) may be in eax.
- if (type == TranscendentalCache::SIN ||
- type == TranscendentalCache::COS ||
- type == TranscendentalCache::TAN) {
- // Both fsin and fcos require arguments in the range +/-2^63 and
- // return NaN for infinities and NaN. They can share all code except
- // the actual fsin/fcos operation.
- Label in_range, done;
- // If argument is outside the range -2^63..2^63, fsin/cos doesn't
- // work. We must reduce it to the appropriate range.
- __ mov(edi, edx);
- __ and_(edi, Immediate(0x7ff00000)); // Exponent only.
- int supported_exponent_limit =
- (63 + HeapNumber::kExponentBias) << HeapNumber::kExponentShift;
- __ cmp(edi, Immediate(supported_exponent_limit));
- __ j(below, &in_range, Label::kNear);
- // Check for infinity and NaN. Both return NaN for sin.
- __ cmp(edi, Immediate(0x7ff00000));
- Label non_nan_result;
- __ j(not_equal, &non_nan_result, Label::kNear);
- // Input is +/-Infinity or NaN. Result is NaN.
- __ fstp(0);
- // NaN is represented by 0x7ff8000000000000.
- __ push(Immediate(0x7ff80000));
- __ push(Immediate(0));
- __ fld_d(Operand(esp, 0));
- __ add(esp, Immediate(2 * kPointerSize));
- __ jmp(&done, Label::kNear);
-
- __ bind(&non_nan_result);
-
- // Use fpmod to restrict argument to the range +/-2*PI.
- __ mov(edi, eax); // Save eax before using fnstsw_ax.
- __ fldpi();
- __ fadd(0);
- __ fld(1);
- // FPU Stack: input, 2*pi, input.
- {
- Label no_exceptions;
- __ fwait();
- __ fnstsw_ax();
- // Clear if Illegal Operand or Zero Division exceptions are set.
- __ test(eax, Immediate(5));
- __ j(zero, &no_exceptions, Label::kNear);
- __ fnclex();
- __ bind(&no_exceptions);
- }
-
- // Compute st(0) % st(1)
- {
- Label partial_remainder_loop;
- __ bind(&partial_remainder_loop);
- __ fprem1();
- __ fwait();
- __ fnstsw_ax();
- __ test(eax, Immediate(0x400 /* C2 */));
- // If C2 is set, computation only has partial result. Loop to
- // continue computation.
- __ j(not_zero, &partial_remainder_loop);
- }
- // FPU Stack: input, 2*pi, input % 2*pi
- __ fstp(2);
- __ fstp(0);
- __ mov(eax, edi); // Restore eax (allocated HeapNumber pointer).
-
- // FPU Stack: input % 2*pi
- __ bind(&in_range);
- switch (type) {
- case TranscendentalCache::SIN:
- __ fsin();
- break;
- case TranscendentalCache::COS:
- __ fcos();
- break;
- case TranscendentalCache::TAN:
- // FPTAN calculates tangent onto st(0) and pushes 1.0 onto the
- // FP register stack.
- __ fptan();
- __ fstp(0); // Pop FP register stack.
- break;
- default:
- UNREACHABLE();
- }
- __ bind(&done);
- } else {
- ASSERT(type == TranscendentalCache::LOG);
- __ fldln2();
- __ fxch();
- __ fyl2x();
- }
-}
-
-
-// Input: edx, eax are the left and right objects of a bit op.
-// Output: eax, ecx are left and right integers for a bit op.
-// Warning: can clobber inputs even when it jumps to |conversion_failure|!
-void FloatingPointHelper::LoadUnknownsAsIntegers(
- MacroAssembler* masm,
- bool use_sse3,
- BinaryOpIC::TypeInfo left_type,
- BinaryOpIC::TypeInfo right_type,
- Label* conversion_failure) {
- // Check float operands.
- Label arg1_is_object, check_undefined_arg1;
- Label arg2_is_object, check_undefined_arg2;
- Label load_arg2, done;
-
- // Test if arg1 is a Smi.
- if (left_type == BinaryOpIC::SMI) {
- __ JumpIfNotSmi(edx, conversion_failure);
- } else {
- __ JumpIfNotSmi(edx, &arg1_is_object, Label::kNear);
- }
-
- __ SmiUntag(edx);
- __ jmp(&load_arg2);
-
- // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
- __ bind(&check_undefined_arg1);
- Factory* factory = masm->isolate()->factory();
- __ cmp(edx, factory->undefined_value());
- __ j(not_equal, conversion_failure);
- __ mov(edx, Immediate(0));
- __ jmp(&load_arg2);
-
- __ bind(&arg1_is_object);
- __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
- __ cmp(ebx, factory->heap_number_map());
- __ j(not_equal, &check_undefined_arg1);
-
- // Get the untagged integer version of the edx heap number in ecx.
- if (left_type == BinaryOpIC::INT32 && CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope use_sse2(SSE2);
- ConvertHeapNumberToInt32(masm, edx, conversion_failure);
- } else {
- IntegerConvert(masm, edx, use_sse3, conversion_failure);
- }
- __ mov(edx, ecx);
-
- // Here edx has the untagged integer, eax has a Smi or a heap number.
- __ bind(&load_arg2);
-
- // Test if arg2 is a Smi.
- if (right_type == BinaryOpIC::SMI) {
- __ JumpIfNotSmi(eax, conversion_failure);
- } else {
- __ JumpIfNotSmi(eax, &arg2_is_object, Label::kNear);
- }
-
- __ SmiUntag(eax);
- __ mov(ecx, eax);
- __ jmp(&done);
-
- // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
- __ bind(&check_undefined_arg2);
- __ cmp(eax, factory->undefined_value());
- __ j(not_equal, conversion_failure);
- __ mov(ecx, Immediate(0));
- __ jmp(&done);
-
- __ bind(&arg2_is_object);
- __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
- __ cmp(ebx, factory->heap_number_map());
- __ j(not_equal, &check_undefined_arg2);
- // Get the untagged integer version of the eax heap number in ecx.
-
- if (right_type == BinaryOpIC::INT32 && CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope use_sse2(SSE2);
- ConvertHeapNumberToInt32(masm, eax, conversion_failure);
- } else {
- IntegerConvert(masm, eax, use_sse3, conversion_failure);
- }
-
- __ bind(&done);
- __ mov(eax, edx);
-}
-
-
-void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
- Register number) {
- Label load_smi, done;
-
- __ JumpIfSmi(number, &load_smi, Label::kNear);
- __ fld_d(FieldOperand(number, HeapNumber::kValueOffset));
- __ jmp(&done, Label::kNear);
-
- __ bind(&load_smi);
- __ SmiUntag(number);
- __ push(number);
- __ fild_s(Operand(esp, 0));
- __ pop(number);
-
- __ bind(&done);
-}
-
-
-void FloatingPointHelper::LoadSSE2Operands(MacroAssembler* masm) {
- Label load_smi_edx, load_eax, load_smi_eax, done;
- // Load operand in edx into xmm0.
- __ JumpIfSmi(edx, &load_smi_edx, Label::kNear);
- __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
-
- __ bind(&load_eax);
- // Load operand in eax into xmm1.
- __ JumpIfSmi(eax, &load_smi_eax, Label::kNear);
- __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
- __ jmp(&done, Label::kNear);
-
- __ bind(&load_smi_edx);
- __ SmiUntag(edx); // Untag smi before converting to float.
- __ cvtsi2sd(xmm0, edx);
- __ SmiTag(edx); // Retag smi for heap number overwriting test.
- __ jmp(&load_eax);
-
- __ bind(&load_smi_eax);
- __ SmiUntag(eax); // Untag smi before converting to float.
- __ cvtsi2sd(xmm1, eax);
- __ SmiTag(eax); // Retag smi for heap number overwriting test.
-
- __ bind(&done);
-}
-
-
-void FloatingPointHelper::LoadSSE2Operands(MacroAssembler* masm,
- Label* not_numbers) {
- Label load_smi_edx, load_eax, load_smi_eax, load_float_eax, done;
- // Load operand in edx into xmm0, or branch to not_numbers.
- __ JumpIfSmi(edx, &load_smi_edx, Label::kNear);
- Factory* factory = masm->isolate()->factory();
- __ cmp(FieldOperand(edx, HeapObject::kMapOffset), factory->heap_number_map());
- __ j(not_equal, not_numbers); // Argument in edx is not a number.
- __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
- __ bind(&load_eax);
- // Load operand in eax into xmm1, or branch to not_numbers.
- __ JumpIfSmi(eax, &load_smi_eax, Label::kNear);
- __ cmp(FieldOperand(eax, HeapObject::kMapOffset), factory->heap_number_map());
- __ j(equal, &load_float_eax, Label::kNear);
- __ jmp(not_numbers); // Argument in eax is not a number.
- __ bind(&load_smi_edx);
- __ SmiUntag(edx); // Untag smi before converting to float.
- __ cvtsi2sd(xmm0, edx);
- __ SmiTag(edx); // Retag smi for heap number overwriting test.
- __ jmp(&load_eax);
- __ bind(&load_smi_eax);
- __ SmiUntag(eax); // Untag smi before converting to float.
- __ cvtsi2sd(xmm1, eax);
- __ SmiTag(eax); // Retag smi for heap number overwriting test.
- __ jmp(&done, Label::kNear);
- __ bind(&load_float_eax);
- __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
- __ bind(&done);
-}
-
-
-void FloatingPointHelper::LoadSSE2Smis(MacroAssembler* masm,
- Register scratch) {
- const Register left = edx;
- const Register right = eax;
- __ mov(scratch, left);
- ASSERT(!scratch.is(right)); // We're about to clobber scratch.
- __ SmiUntag(scratch);
- __ cvtsi2sd(xmm0, scratch);
-
- __ mov(scratch, right);
- __ SmiUntag(scratch);
- __ cvtsi2sd(xmm1, scratch);
-}
-
-
-void FloatingPointHelper::CheckSSE2OperandsAreInt32(MacroAssembler* masm,
- Label* non_int32,
- Register scratch) {
- CheckSSE2OperandIsInt32(masm, non_int32, xmm0, scratch, scratch, xmm2);
- CheckSSE2OperandIsInt32(masm, non_int32, xmm1, scratch, scratch, xmm2);
-}
-
-
-void FloatingPointHelper::CheckSSE2OperandIsInt32(MacroAssembler* masm,
- Label* non_int32,
- XMMRegister operand,
- Register int32_result,
- Register scratch,
- XMMRegister xmm_scratch) {
- __ cvttsd2si(int32_result, Operand(operand));
- __ cvtsi2sd(xmm_scratch, int32_result);
- __ pcmpeqd(xmm_scratch, operand);
- __ movmskps(scratch, xmm_scratch);
- // Two least significant bits should be both set.
- __ not_(scratch);
- __ test(scratch, Immediate(3));
- __ j(not_zero, non_int32);
-}
-
-
-void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm,
- Register scratch,
- ArgLocation arg_location) {
- Label load_smi_1, load_smi_2, done_load_1, done;
- if (arg_location == ARGS_IN_REGISTERS) {
- __ mov(scratch, edx);
- } else {
- __ mov(scratch, Operand(esp, 2 * kPointerSize));
- }
- __ JumpIfSmi(scratch, &load_smi_1, Label::kNear);
- __ fld_d(FieldOperand(scratch, HeapNumber::kValueOffset));
- __ bind(&done_load_1);
-
- if (arg_location == ARGS_IN_REGISTERS) {
- __ mov(scratch, eax);
- } else {
- __ mov(scratch, Operand(esp, 1 * kPointerSize));
- }
- __ JumpIfSmi(scratch, &load_smi_2, Label::kNear);
- __ fld_d(FieldOperand(scratch, HeapNumber::kValueOffset));
- __ jmp(&done, Label::kNear);
-
- __ bind(&load_smi_1);
- __ SmiUntag(scratch);
- __ push(scratch);
- __ fild_s(Operand(esp, 0));
- __ pop(scratch);
- __ jmp(&done_load_1);
-
- __ bind(&load_smi_2);
- __ SmiUntag(scratch);
- __ push(scratch);
- __ fild_s(Operand(esp, 0));
- __ pop(scratch);
-
- __ bind(&done);
-}
-
-
-void FloatingPointHelper::LoadFloatSmis(MacroAssembler* masm,
- Register scratch) {
- const Register left = edx;
- const Register right = eax;
- __ mov(scratch, left);
- ASSERT(!scratch.is(right)); // We're about to clobber scratch.
- __ SmiUntag(scratch);
- __ push(scratch);
- __ fild_s(Operand(esp, 0));
-
- __ mov(scratch, right);
- __ SmiUntag(scratch);
- __ mov(Operand(esp, 0), scratch);
- __ fild_s(Operand(esp, 0));
- __ pop(scratch);
-}
-
-
-void FloatingPointHelper::CheckFloatOperands(MacroAssembler* masm,
- Label* non_float,
- Register scratch) {
- Label test_other, done;
- // Test if both operands are floats or smi -> scratch=k_is_float;
- // Otherwise scratch = k_not_float.
- __ JumpIfSmi(edx, &test_other, Label::kNear);
- __ mov(scratch, FieldOperand(edx, HeapObject::kMapOffset));
- Factory* factory = masm->isolate()->factory();
- __ cmp(scratch, factory->heap_number_map());
- __ j(not_equal, non_float); // argument in edx is not a number -> NaN
-
- __ bind(&test_other);
- __ JumpIfSmi(eax, &done, Label::kNear);
- __ mov(scratch, FieldOperand(eax, HeapObject::kMapOffset));
- __ cmp(scratch, factory->heap_number_map());
- __ j(not_equal, non_float); // argument in eax is not a number -> NaN
-
- // Fall-through: Both operands are numbers.
- __ bind(&done);
-}
-
-
-void MathPowStub::Generate(MacroAssembler* masm) {
- CpuFeatures::Scope use_sse2(SSE2);
- Factory* factory = masm->isolate()->factory();
- const Register exponent = eax;
- const Register base = edx;
- const Register scratch = ecx;
- const XMMRegister double_result = xmm3;
- const XMMRegister double_base = xmm2;
- const XMMRegister double_exponent = xmm1;
- const XMMRegister double_scratch = xmm4;
-
- Label call_runtime, done, exponent_not_smi, int_exponent;
-
- // Save 1 in double_result - we need this several times later on.
- __ mov(scratch, Immediate(1));
- __ cvtsi2sd(double_result, scratch);
-
- if (exponent_type_ == ON_STACK) {
- Label base_is_smi, unpack_exponent;
- // The exponent and base are supplied as arguments on the stack.
- // This can only happen if the stub is called from non-optimized code.
- // Load input parameters from stack.
- __ mov(base, Operand(esp, 2 * kPointerSize));
- __ mov(exponent, Operand(esp, 1 * kPointerSize));
-
- __ JumpIfSmi(base, &base_is_smi, Label::kNear);
- __ cmp(FieldOperand(base, HeapObject::kMapOffset),
- factory->heap_number_map());
- __ j(not_equal, &call_runtime);
-
- __ movdbl(double_base, FieldOperand(base, HeapNumber::kValueOffset));
- __ jmp(&unpack_exponent, Label::kNear);
-
- __ bind(&base_is_smi);
- __ SmiUntag(base);
- __ cvtsi2sd(double_base, base);
-
- __ bind(&unpack_exponent);
- __ JumpIfNotSmi(exponent, &exponent_not_smi, Label::kNear);
- __ SmiUntag(exponent);
- __ jmp(&int_exponent);
-
- __ bind(&exponent_not_smi);
- __ cmp(FieldOperand(exponent, HeapObject::kMapOffset),
- factory->heap_number_map());
- __ j(not_equal, &call_runtime);
- __ movdbl(double_exponent,
- FieldOperand(exponent, HeapNumber::kValueOffset));
- } else if (exponent_type_ == TAGGED) {
- __ JumpIfNotSmi(exponent, &exponent_not_smi, Label::kNear);
- __ SmiUntag(exponent);
- __ jmp(&int_exponent);
-
- __ bind(&exponent_not_smi);
- __ movdbl(double_exponent,
- FieldOperand(exponent, HeapNumber::kValueOffset));
- }
-
- if (exponent_type_ != INTEGER) {
- Label fast_power;
- // Detect integer exponents stored as double.
- __ cvttsd2si(exponent, Operand(double_exponent));
- // Skip to runtime if possibly NaN (indicated by the indefinite integer).
- __ cmp(exponent, Immediate(0x80000000u));
- __ j(equal, &call_runtime);
- __ cvtsi2sd(double_scratch, exponent);
- // Already ruled out NaNs for exponent.
- __ ucomisd(double_exponent, double_scratch);
- __ j(equal, &int_exponent);
-
- if (exponent_type_ == ON_STACK) {
- // Detect square root case. Crankshaft detects constant +/-0.5 at
- // compile time and uses DoMathPowHalf instead. We then skip this check
- // for non-constant cases of +/-0.5 as these hardly occur.
- Label continue_sqrt, continue_rsqrt, not_plus_half;
- // Test for 0.5.
- // Load double_scratch with 0.5.
- __ mov(scratch, Immediate(0x3F000000u));
- __ movd(double_scratch, scratch);
- __ cvtss2sd(double_scratch, double_scratch);
- // Already ruled out NaNs for exponent.
- __ ucomisd(double_scratch, double_exponent);
- __ j(not_equal, &not_plus_half, Label::kNear);
-
- // Calculates square root of base. Check for the special case of
- // Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
- // According to IEEE-754, single-precision -Infinity has the highest
- // 9 bits set and the lowest 23 bits cleared.
- __ mov(scratch, 0xFF800000u);
- __ movd(double_scratch, scratch);
- __ cvtss2sd(double_scratch, double_scratch);
- __ ucomisd(double_base, double_scratch);
- // Comparing -Infinity with NaN results in "unordered", which sets the
- // zero flag as if both were equal. However, it also sets the carry flag.
- __ j(not_equal, &continue_sqrt, Label::kNear);
- __ j(carry, &continue_sqrt, Label::kNear);
-
- // Set result to Infinity in the special case.
- __ xorps(double_result, double_result);
- __ subsd(double_result, double_scratch);
- __ jmp(&done);
-
- __ bind(&continue_sqrt);
- // sqrtsd returns -0 when input is -0. ECMA spec requires +0.
- __ xorps(double_scratch, double_scratch);
- __ addsd(double_scratch, double_base); // Convert -0 to +0.
- __ sqrtsd(double_result, double_scratch);
- __ jmp(&done);
-
- // Test for -0.5.
- __ bind(&not_plus_half);
- // Load double_exponent with -0.5 by substracting 1.
- __ subsd(double_scratch, double_result);
- // Already ruled out NaNs for exponent.
- __ ucomisd(double_scratch, double_exponent);
- __ j(not_equal, &fast_power, Label::kNear);
-
- // Calculates reciprocal of square root of base. Check for the special
- // case of Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
- // According to IEEE-754, single-precision -Infinity has the highest
- // 9 bits set and the lowest 23 bits cleared.
- __ mov(scratch, 0xFF800000u);
- __ movd(double_scratch, scratch);
- __ cvtss2sd(double_scratch, double_scratch);
- __ ucomisd(double_base, double_scratch);
- // Comparing -Infinity with NaN results in "unordered", which sets the
- // zero flag as if both were equal. However, it also sets the carry flag.
- __ j(not_equal, &continue_rsqrt, Label::kNear);
- __ j(carry, &continue_rsqrt, Label::kNear);
-
- // Set result to 0 in the special case.
- __ xorps(double_result, double_result);
- __ jmp(&done);
-
- __ bind(&continue_rsqrt);
- // sqrtsd returns -0 when input is -0. ECMA spec requires +0.
- __ xorps(double_exponent, double_exponent);
- __ addsd(double_exponent, double_base); // Convert -0 to +0.
- __ sqrtsd(double_exponent, double_exponent);
- __ divsd(double_result, double_exponent);
- __ jmp(&done);
- }
-
- // Using FPU instructions to calculate power.
- Label fast_power_failed;
- __ bind(&fast_power);
- __ fnclex(); // Clear flags to catch exceptions later.
- // Transfer (B)ase and (E)xponent onto the FPU register stack.
- __ sub(esp, Immediate(kDoubleSize));
- __ movdbl(Operand(esp, 0), double_exponent);
- __ fld_d(Operand(esp, 0)); // E
- __ movdbl(Operand(esp, 0), double_base);
- __ fld_d(Operand(esp, 0)); // B, E
-
- // Exponent is in st(1) and base is in st(0)
- // B ^ E = (2^(E * log2(B)) - 1) + 1 = (2^X - 1) + 1 for X = E * log2(B)
- // FYL2X calculates st(1) * log2(st(0))
- __ fyl2x(); // X
- __ fld(0); // X, X
- __ frndint(); // rnd(X), X
- __ fsub(1); // rnd(X), X-rnd(X)
- __ fxch(1); // X - rnd(X), rnd(X)
- // F2XM1 calculates 2^st(0) - 1 for -1 < st(0) < 1
- __ f2xm1(); // 2^(X-rnd(X)) - 1, rnd(X)
- __ fld1(); // 1, 2^(X-rnd(X)) - 1, rnd(X)
- __ faddp(1); // 2^(X-rnd(X)), rnd(X)
- // FSCALE calculates st(0) * 2^st(1)
- __ fscale(); // 2^X, rnd(X)
- __ fstp(1); // 2^X
- // Bail out to runtime in case of exceptions in the status word.
- __ fnstsw_ax();
- __ test_b(eax, 0x5F); // We check for all but precision exception.
- __ j(not_zero, &fast_power_failed, Label::kNear);
- __ fstp_d(Operand(esp, 0));
- __ movdbl(double_result, Operand(esp, 0));
- __ add(esp, Immediate(kDoubleSize));
- __ jmp(&done);
-
- __ bind(&fast_power_failed);
- __ fninit();
- __ add(esp, Immediate(kDoubleSize));
- __ jmp(&call_runtime);
- }
-
- // Calculate power with integer exponent.
- __ bind(&int_exponent);
- const XMMRegister double_scratch2 = double_exponent;
- __ mov(scratch, exponent); // Back up exponent.
- __ movsd(double_scratch, double_base); // Back up base.
- __ movsd(double_scratch2, double_result); // Load double_exponent with 1.
-
- // Get absolute value of exponent.
- Label no_neg, while_true, while_false;
- __ test(scratch, scratch);
- __ j(positive, &no_neg, Label::kNear);
- __ neg(scratch);
- __ bind(&no_neg);
-
- __ j(zero, &while_false, Label::kNear);
- __ shr(scratch, 1);
- // Above condition means CF==0 && ZF==0. This means that the
- // bit that has been shifted out is 0 and the result is not 0.
- __ j(above, &while_true, Label::kNear);
- __ movsd(double_result, double_scratch);
- __ j(zero, &while_false, Label::kNear);
-
- __ bind(&while_true);
- __ shr(scratch, 1);
- __ mulsd(double_scratch, double_scratch);
- __ j(above, &while_true, Label::kNear);
- __ mulsd(double_result, double_scratch);
- __ j(not_zero, &while_true);
-
- __ bind(&while_false);
- // scratch has the original value of the exponent - if the exponent is
- // negative, return 1/result.
- __ test(exponent, exponent);
- __ j(positive, &done);
- __ divsd(double_scratch2, double_result);
- __ movsd(double_result, double_scratch2);
- // Test whether result is zero. Bail out to check for subnormal result.
- // Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
- __ xorps(double_scratch2, double_scratch2);
- __ ucomisd(double_scratch2, double_result); // Result cannot be NaN.
- // double_exponent aliased as double_scratch2 has already been overwritten
- // and may not have contained the exponent value in the first place when the
- // exponent is a smi. We reset it with exponent value before bailing out.
- __ j(not_equal, &done);
- __ cvtsi2sd(double_exponent, exponent);
-
- // Returning or bailing out.
- Counters* counters = masm->isolate()->counters();
- if (exponent_type_ == ON_STACK) {
- // The arguments are still on the stack.
- __ bind(&call_runtime);
- __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
-
- // The stub is called from non-optimized code, which expects the result
- // as heap number in exponent.
- __ bind(&done);
- __ AllocateHeapNumber(eax, scratch, base, &call_runtime);
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), double_result);
- __ IncrementCounter(counters->math_pow(), 1);
- __ ret(2 * kPointerSize);
- } else {
- __ bind(&call_runtime);
- {
- AllowExternalCallThatCantCauseGC scope(masm);
- __ PrepareCallCFunction(4, scratch);
- __ movdbl(Operand(esp, 0 * kDoubleSize), double_base);
- __ movdbl(Operand(esp, 1 * kDoubleSize), double_exponent);
- __ CallCFunction(
- ExternalReference::power_double_double_function(masm->isolate()), 4);
- }
- // Return value is in st(0) on ia32.
- // Store it into the (fixed) result register.
- __ sub(esp, Immediate(kDoubleSize));
- __ fstp_d(Operand(esp, 0));
- __ movdbl(double_result, Operand(esp, 0));
- __ add(esp, Immediate(kDoubleSize));
-
- __ bind(&done);
- __ IncrementCounter(counters->math_pow(), 1);
- __ ret(0);
- }
-}
-
-
-void ArrayLengthStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label miss;
-
- if (kind() == Code::KEYED_LOAD_IC) {
- __ cmp(ecx, Immediate(masm->isolate()->factory()->length_string()));
- __ j(not_equal, &miss);
- }
-
- StubCompiler::GenerateLoadArrayLength(masm, edx, eax, &miss);
- __ bind(&miss);
- StubCompiler::GenerateLoadMiss(masm, kind());
-}
-
-
-void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label miss;
-
- if (kind() == Code::KEYED_LOAD_IC) {
- __ cmp(ecx, Immediate(masm->isolate()->factory()->prototype_string()));
- __ j(not_equal, &miss);
- }
-
- StubCompiler::GenerateLoadFunctionPrototype(masm, edx, eax, ebx, &miss);
- __ bind(&miss);
- StubCompiler::GenerateLoadMiss(masm, kind());
-}
-
-
-void StringLengthStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label miss;
-
- if (kind() == Code::KEYED_LOAD_IC) {
- __ cmp(ecx, Immediate(masm->isolate()->factory()->length_string()));
- __ j(not_equal, &miss);
- }
-
- StubCompiler::GenerateLoadStringLength(masm, edx, eax, ebx, &miss,
- support_wrapper_);
- __ bind(&miss);
- StubCompiler::GenerateLoadMiss(masm, kind());
-}
-
-
-void StoreArrayLengthStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax : value
- // -- ecx : name
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- //
- // This accepts as a receiver anything JSArray::SetElementsLength accepts
- // (currently anything except for external arrays which means anything with
- // elements of FixedArray type). Value must be a number, but only smis are
- // accepted as the most common case.
-
- Label miss;
-
- Register receiver = edx;
- Register value = eax;
- Register scratch = ebx;
-
- if (kind() == Code::KEYED_LOAD_IC) {
- __ cmp(ecx, Immediate(masm->isolate()->factory()->length_string()));
- __ j(not_equal, &miss);
- }
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, &miss);
-
- // Check that the object is a JS array.
- __ CmpObjectType(receiver, JS_ARRAY_TYPE, scratch);
- __ j(not_equal, &miss);
-
- // Check that elements are FixedArray.
- // We rely on StoreIC_ArrayLength below to deal with all types of
- // fast elements (including COW).
- __ mov(scratch, FieldOperand(receiver, JSArray::kElementsOffset));
- __ CmpObjectType(scratch, FIXED_ARRAY_TYPE, scratch);
- __ j(not_equal, &miss);
-
- // Check that the array has fast properties, otherwise the length
- // property might have been redefined.
- __ mov(scratch, FieldOperand(receiver, JSArray::kPropertiesOffset));
- __ CompareRoot(FieldOperand(scratch, FixedArray::kMapOffset),
- Heap::kHashTableMapRootIndex);
- __ j(equal, &miss);
-
- // Check that value is a smi.
- __ JumpIfNotSmi(value, &miss);
-
- // Prepare tail call to StoreIC_ArrayLength.
- __ pop(scratch);
- __ push(receiver);
- __ push(value);
- __ push(scratch); // return address
-
- ExternalReference ref =
- ExternalReference(IC_Utility(IC::kStoreIC_ArrayLength), masm->isolate());
- __ TailCallExternalReference(ref, 2, 1);
-
- __ bind(&miss);
-
- StubCompiler::GenerateStoreMiss(masm, kind());
-}
-
-
-void LoadFieldStub::Generate(MacroAssembler* masm) {
- StubCompiler::DoGenerateFastPropertyLoad(masm, eax, reg_, inobject_, index_);
- __ ret(0);
-}
-
-
-void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
- // The key is in edx and the parameter count is in eax.
-
- // The displacement is used for skipping the frame pointer on the
- // stack. It is the offset of the last parameter (if any) relative
- // to the frame pointer.
- static const int kDisplacement = 1 * kPointerSize;
-
- // Check that the key is a smi.
- Label slow;
- __ JumpIfNotSmi(edx, &slow, Label::kNear);
-
- // Check if the calling frame is an arguments adaptor frame.
- Label adaptor;
- __ mov(ebx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
- __ mov(ecx, Operand(ebx, StandardFrameConstants::kContextOffset));
- __ cmp(ecx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ j(equal, &adaptor, Label::kNear);
-
- // Check index against formal parameters count limit passed in
- // through register eax. Use unsigned comparison to get negative
- // check for free.
- __ cmp(edx, eax);
- __ j(above_equal, &slow, Label::kNear);
-
- // Read the argument from the stack and return it.
- STATIC_ASSERT(kSmiTagSize == 1);
- STATIC_ASSERT(kSmiTag == 0); // Shifting code depends on these.
- __ lea(ebx, Operand(ebp, eax, times_2, 0));
- __ neg(edx);
- __ mov(eax, Operand(ebx, edx, times_2, kDisplacement));
- __ ret(0);
-
- // Arguments adaptor case: Check index against actual arguments
- // limit found in the arguments adaptor frame. Use unsigned
- // comparison to get negative check for free.
- __ bind(&adaptor);
- __ mov(ecx, Operand(ebx, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ cmp(edx, ecx);
- __ j(above_equal, &slow, Label::kNear);
-
- // Read the argument from the stack and return it.
- STATIC_ASSERT(kSmiTagSize == 1);
- STATIC_ASSERT(kSmiTag == 0); // Shifting code depends on these.
- __ lea(ebx, Operand(ebx, ecx, times_2, 0));
- __ neg(edx);
- __ mov(eax, Operand(ebx, edx, times_2, kDisplacement));
- __ ret(0);
-
- // Slow-case: Handle non-smi or out-of-bounds access to arguments
- // by calling the runtime system.
- __ bind(&slow);
- __ pop(ebx); // Return address.
- __ push(edx);
- __ push(ebx);
- __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
-}
-
-
-void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) {
- // esp[0] : return address
- // esp[4] : number of parameters
- // esp[8] : receiver displacement
- // esp[12] : function
-
- // Check if the calling frame is an arguments adaptor frame.
- Label runtime;
- __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
- __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
- __ cmp(ecx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ j(not_equal, &runtime, Label::kNear);
-
- // Patch the arguments.length and the parameters pointer.
- __ mov(ecx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ mov(Operand(esp, 1 * kPointerSize), ecx);
- __ lea(edx, Operand(edx, ecx, times_2,
- StandardFrameConstants::kCallerSPOffset));
- __ mov(Operand(esp, 2 * kPointerSize), edx);
-
- __ bind(&runtime);
- __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
-}
-
-
-void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
- // esp[0] : return address
- // esp[4] : number of parameters (tagged)
- // esp[8] : receiver displacement
- // esp[12] : function
-
- // ebx = parameter count (tagged)
- __ mov(ebx, Operand(esp, 1 * kPointerSize));
-
- // Check if the calling frame is an arguments adaptor frame.
- // TODO(rossberg): Factor out some of the bits that are shared with the other
- // Generate* functions.
- Label runtime;
- Label adaptor_frame, try_allocate;
- __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
- __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
- __ cmp(ecx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ j(equal, &adaptor_frame, Label::kNear);
-
- // No adaptor, parameter count = argument count.
- __ mov(ecx, ebx);
- __ jmp(&try_allocate, Label::kNear);
-
- // We have an adaptor frame. Patch the parameters pointer.
- __ bind(&adaptor_frame);
- __ mov(ecx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ lea(edx, Operand(edx, ecx, times_2,
- StandardFrameConstants::kCallerSPOffset));
- __ mov(Operand(esp, 2 * kPointerSize), edx);
-
- // ebx = parameter count (tagged)
- // ecx = argument count (tagged)
- // esp[4] = parameter count (tagged)
- // esp[8] = address of receiver argument
- // Compute the mapped parameter count = min(ebx, ecx) in ebx.
- __ cmp(ebx, ecx);
- __ j(less_equal, &try_allocate, Label::kNear);
- __ mov(ebx, ecx);
-
- __ bind(&try_allocate);
-
- // Save mapped parameter count.
- __ push(ebx);
-
- // Compute the sizes of backing store, parameter map, and arguments object.
- // 1. Parameter map, has 2 extra words containing context and backing store.
- const int kParameterMapHeaderSize =
- FixedArray::kHeaderSize + 2 * kPointerSize;
- Label no_parameter_map;
- __ test(ebx, ebx);
- __ j(zero, &no_parameter_map, Label::kNear);
- __ lea(ebx, Operand(ebx, times_2, kParameterMapHeaderSize));
- __ bind(&no_parameter_map);
-
- // 2. Backing store.
- __ lea(ebx, Operand(ebx, ecx, times_2, FixedArray::kHeaderSize));
-
- // 3. Arguments object.
- __ add(ebx, Immediate(Heap::kArgumentsObjectSize));
-
- // Do the allocation of all three objects in one go.
- __ AllocateInNewSpace(ebx, eax, edx, edi, &runtime, TAG_OBJECT);
-
- // eax = address of new object(s) (tagged)
- // ecx = argument count (tagged)
- // esp[0] = mapped parameter count (tagged)
- // esp[8] = parameter count (tagged)
- // esp[12] = address of receiver argument
- // Get the arguments boilerplate from the current native context into edi.
- Label has_mapped_parameters, copy;
- __ mov(edi, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ mov(edi, FieldOperand(edi, GlobalObject::kNativeContextOffset));
- __ mov(ebx, Operand(esp, 0 * kPointerSize));
- __ test(ebx, ebx);
- __ j(not_zero, &has_mapped_parameters, Label::kNear);
- __ mov(edi, Operand(edi,
- Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX)));
- __ jmp(&copy, Label::kNear);
-
- __ bind(&has_mapped_parameters);
- __ mov(edi, Operand(edi,
- Context::SlotOffset(Context::ALIASED_ARGUMENTS_BOILERPLATE_INDEX)));
- __ bind(&copy);
-
- // eax = address of new object (tagged)
- // ebx = mapped parameter count (tagged)
- // ecx = argument count (tagged)
- // edi = address of boilerplate object (tagged)
- // esp[0] = mapped parameter count (tagged)
- // esp[8] = parameter count (tagged)
- // esp[12] = address of receiver argument
- // Copy the JS object part.
- for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
- __ mov(edx, FieldOperand(edi, i));
- __ mov(FieldOperand(eax, i), edx);
- }
-
- // Set up the callee in-object property.
- STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
- __ mov(edx, Operand(esp, 4 * kPointerSize));
- __ mov(FieldOperand(eax, JSObject::kHeaderSize +
- Heap::kArgumentsCalleeIndex * kPointerSize),
- edx);
-
- // Use the length (smi tagged) and set that as an in-object property too.
- STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
- __ mov(FieldOperand(eax, JSObject::kHeaderSize +
- Heap::kArgumentsLengthIndex * kPointerSize),
- ecx);
-
- // Set up the elements pointer in the allocated arguments object.
- // If we allocated a parameter map, edi will point there, otherwise to the
- // backing store.
- __ lea(edi, Operand(eax, Heap::kArgumentsObjectSize));
- __ mov(FieldOperand(eax, JSObject::kElementsOffset), edi);
-
- // eax = address of new object (tagged)
- // ebx = mapped parameter count (tagged)
- // ecx = argument count (tagged)
- // edi = address of parameter map or backing store (tagged)
- // esp[0] = mapped parameter count (tagged)
- // esp[8] = parameter count (tagged)
- // esp[12] = address of receiver argument
- // Free a register.
- __ push(eax);
-
- // Initialize parameter map. If there are no mapped arguments, we're done.
- Label skip_parameter_map;
- __ test(ebx, ebx);
- __ j(zero, &skip_parameter_map);
-
- __ mov(FieldOperand(edi, FixedArray::kMapOffset),
- Immediate(FACTORY->non_strict_arguments_elements_map()));
- __ lea(eax, Operand(ebx, reinterpret_cast<intptr_t>(Smi::FromInt(2))));
- __ mov(FieldOperand(edi, FixedArray::kLengthOffset), eax);
- __ mov(FieldOperand(edi, FixedArray::kHeaderSize + 0 * kPointerSize), esi);
- __ lea(eax, Operand(edi, ebx, times_2, kParameterMapHeaderSize));
- __ mov(FieldOperand(edi, FixedArray::kHeaderSize + 1 * kPointerSize), eax);
-
- // Copy the parameter slots and the holes in the arguments.
- // We need to fill in mapped_parameter_count slots. They index the context,
- // where parameters are stored in reverse order, at
- // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
- // The mapped parameter thus need to get indices
- // MIN_CONTEXT_SLOTS+parameter_count-1 ..
- // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
- // We loop from right to left.
- Label parameters_loop, parameters_test;
- __ push(ecx);
- __ mov(eax, Operand(esp, 2 * kPointerSize));
- __ mov(ebx, Immediate(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
- __ add(ebx, Operand(esp, 4 * kPointerSize));
- __ sub(ebx, eax);
- __ mov(ecx, FACTORY->the_hole_value());
- __ mov(edx, edi);
- __ lea(edi, Operand(edi, eax, times_2, kParameterMapHeaderSize));
- // eax = loop variable (tagged)
- // ebx = mapping index (tagged)
- // ecx = the hole value
- // edx = address of parameter map (tagged)
- // edi = address of backing store (tagged)
- // esp[0] = argument count (tagged)
- // esp[4] = address of new object (tagged)
- // esp[8] = mapped parameter count (tagged)
- // esp[16] = parameter count (tagged)
- // esp[20] = address of receiver argument
- __ jmp(&parameters_test, Label::kNear);
-
- __ bind(&parameters_loop);
- __ sub(eax, Immediate(Smi::FromInt(1)));
- __ mov(FieldOperand(edx, eax, times_2, kParameterMapHeaderSize), ebx);
- __ mov(FieldOperand(edi, eax, times_2, FixedArray::kHeaderSize), ecx);
- __ add(ebx, Immediate(Smi::FromInt(1)));
- __ bind(&parameters_test);
- __ test(eax, eax);
- __ j(not_zero, &parameters_loop, Label::kNear);
- __ pop(ecx);
-
- __ bind(&skip_parameter_map);
-
- // ecx = argument count (tagged)
- // edi = address of backing store (tagged)
- // esp[0] = address of new object (tagged)
- // esp[4] = mapped parameter count (tagged)
- // esp[12] = parameter count (tagged)
- // esp[16] = address of receiver argument
- // Copy arguments header and remaining slots (if there are any).
- __ mov(FieldOperand(edi, FixedArray::kMapOffset),
- Immediate(FACTORY->fixed_array_map()));
- __ mov(FieldOperand(edi, FixedArray::kLengthOffset), ecx);
-
- Label arguments_loop, arguments_test;
- __ mov(ebx, Operand(esp, 1 * kPointerSize));
- __ mov(edx, Operand(esp, 4 * kPointerSize));
- __ sub(edx, ebx); // Is there a smarter way to do negative scaling?
- __ sub(edx, ebx);
- __ jmp(&arguments_test, Label::kNear);
-
- __ bind(&arguments_loop);
- __ sub(edx, Immediate(kPointerSize));
- __ mov(eax, Operand(edx, 0));
- __ mov(FieldOperand(edi, ebx, times_2, FixedArray::kHeaderSize), eax);
- __ add(ebx, Immediate(Smi::FromInt(1)));
-
- __ bind(&arguments_test);
- __ cmp(ebx, ecx);
- __ j(less, &arguments_loop, Label::kNear);
-
- // Restore.
- __ pop(eax); // Address of arguments object.
- __ pop(ebx); // Parameter count.
-
- // Return and remove the on-stack parameters.
- __ ret(3 * kPointerSize);
-
- // Do the runtime call to allocate the arguments object.
- __ bind(&runtime);
- __ pop(eax); // Remove saved parameter count.
- __ mov(Operand(esp, 1 * kPointerSize), ecx); // Patch argument count.
- __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
-}
-
-
-void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
- // esp[0] : return address
- // esp[4] : number of parameters
- // esp[8] : receiver displacement
- // esp[12] : function
-
- // Check if the calling frame is an arguments adaptor frame.
- Label adaptor_frame, try_allocate, runtime;
- __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
- __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
- __ cmp(ecx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ j(equal, &adaptor_frame, Label::kNear);
-
- // Get the length from the frame.
- __ mov(ecx, Operand(esp, 1 * kPointerSize));
- __ jmp(&try_allocate, Label::kNear);
-
- // Patch the arguments.length and the parameters pointer.
- __ bind(&adaptor_frame);
- __ mov(ecx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ mov(Operand(esp, 1 * kPointerSize), ecx);
- __ lea(edx, Operand(edx, ecx, times_2,
- StandardFrameConstants::kCallerSPOffset));
- __ mov(Operand(esp, 2 * kPointerSize), edx);
-
- // Try the new space allocation. Start out with computing the size of
- // the arguments object and the elements array.
- Label add_arguments_object;
- __ bind(&try_allocate);
- __ test(ecx, ecx);
- __ j(zero, &add_arguments_object, Label::kNear);
- __ lea(ecx, Operand(ecx, times_2, FixedArray::kHeaderSize));
- __ bind(&add_arguments_object);
- __ add(ecx, Immediate(Heap::kArgumentsObjectSizeStrict));
-
- // Do the allocation of both objects in one go.
- __ AllocateInNewSpace(ecx, eax, edx, ebx, &runtime, TAG_OBJECT);
-
- // Get the arguments boilerplate from the current native context.
- __ mov(edi, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ mov(edi, FieldOperand(edi, GlobalObject::kNativeContextOffset));
- const int offset =
- Context::SlotOffset(Context::STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX);
- __ mov(edi, Operand(edi, offset));
-
- // Copy the JS object part.
- for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
- __ mov(ebx, FieldOperand(edi, i));
- __ mov(FieldOperand(eax, i), ebx);
- }
-
- // Get the length (smi tagged) and set that as an in-object property too.
- STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
- __ mov(ecx, Operand(esp, 1 * kPointerSize));
- __ mov(FieldOperand(eax, JSObject::kHeaderSize +
- Heap::kArgumentsLengthIndex * kPointerSize),
- ecx);
-
- // If there are no actual arguments, we're done.
- Label done;
- __ test(ecx, ecx);
- __ j(zero, &done, Label::kNear);
-
- // Get the parameters pointer from the stack.
- __ mov(edx, Operand(esp, 2 * kPointerSize));
-
- // Set up the elements pointer in the allocated arguments object and
- // initialize the header in the elements fixed array.
- __ lea(edi, Operand(eax, Heap::kArgumentsObjectSizeStrict));
- __ mov(FieldOperand(eax, JSObject::kElementsOffset), edi);
- __ mov(FieldOperand(edi, FixedArray::kMapOffset),
- Immediate(FACTORY->fixed_array_map()));
-
- __ mov(FieldOperand(edi, FixedArray::kLengthOffset), ecx);
- // Untag the length for the loop below.
- __ SmiUntag(ecx);
-
- // Copy the fixed array slots.
- Label loop;
- __ bind(&loop);
- __ mov(ebx, Operand(edx, -1 * kPointerSize)); // Skip receiver.
- __ mov(FieldOperand(edi, FixedArray::kHeaderSize), ebx);
- __ add(edi, Immediate(kPointerSize));
- __ sub(edx, Immediate(kPointerSize));
- __ dec(ecx);
- __ j(not_zero, &loop);
-
- // Return and remove the on-stack parameters.
- __ bind(&done);
- __ ret(3 * kPointerSize);
-
- // Do the runtime call to allocate the arguments object.
- __ bind(&runtime);
- __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1);
-}
-
-
-void RegExpExecStub::Generate(MacroAssembler* masm) {
- // Just jump directly to runtime if native RegExp is not selected at compile
- // time or if regexp entry in generated code is turned off runtime switch or
- // at compilation.
-#ifdef V8_INTERPRETED_REGEXP
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
-#else // V8_INTERPRETED_REGEXP
-
- // Stack frame on entry.
- // esp[0]: return address
- // esp[4]: last_match_info (expected JSArray)
- // esp[8]: previous index
- // esp[12]: subject string
- // esp[16]: JSRegExp object
-
- static const int kLastMatchInfoOffset = 1 * kPointerSize;
- static const int kPreviousIndexOffset = 2 * kPointerSize;
- static const int kSubjectOffset = 3 * kPointerSize;
- static const int kJSRegExpOffset = 4 * kPointerSize;
-
- Label runtime;
- Factory* factory = masm->isolate()->factory();
-
- // Ensure that a RegExp stack is allocated.
- ExternalReference address_of_regexp_stack_memory_address =
- ExternalReference::address_of_regexp_stack_memory_address(
- masm->isolate());
- ExternalReference address_of_regexp_stack_memory_size =
- ExternalReference::address_of_regexp_stack_memory_size(masm->isolate());
- __ mov(ebx, Operand::StaticVariable(address_of_regexp_stack_memory_size));
- __ test(ebx, ebx);
- __ j(zero, &runtime);
-
- // Check that the first argument is a JSRegExp object.
- __ mov(eax, Operand(esp, kJSRegExpOffset));
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfSmi(eax, &runtime);
- __ CmpObjectType(eax, JS_REGEXP_TYPE, ecx);
- __ j(not_equal, &runtime);
-
- // Check that the RegExp has been compiled (data contains a fixed array).
- __ mov(ecx, FieldOperand(eax, JSRegExp::kDataOffset));
- if (FLAG_debug_code) {
- __ test(ecx, Immediate(kSmiTagMask));
- __ Check(not_zero, "Unexpected type for RegExp data, FixedArray expected");
- __ CmpObjectType(ecx, FIXED_ARRAY_TYPE, ebx);
- __ Check(equal, "Unexpected type for RegExp data, FixedArray expected");
- }
-
- // ecx: RegExp data (FixedArray)
- // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
- __ mov(ebx, FieldOperand(ecx, JSRegExp::kDataTagOffset));
- __ cmp(ebx, Immediate(Smi::FromInt(JSRegExp::IRREGEXP)));
- __ j(not_equal, &runtime);
-
- // ecx: RegExp data (FixedArray)
- // Check that the number of captures fit in the static offsets vector buffer.
- __ mov(edx, FieldOperand(ecx, JSRegExp::kIrregexpCaptureCountOffset));
- // Check (number_of_captures + 1) * 2 <= offsets vector size
- // Or number_of_captures * 2 <= offsets vector size - 2
- // Multiplying by 2 comes for free since edx is smi-tagged.
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
- STATIC_ASSERT(Isolate::kJSRegexpStaticOffsetsVectorSize >= 2);
- __ cmp(edx, Isolate::kJSRegexpStaticOffsetsVectorSize - 2);
- __ j(above, &runtime);
-
- // Reset offset for possibly sliced string.
- __ Set(edi, Immediate(0));
- __ mov(eax, Operand(esp, kSubjectOffset));
- __ JumpIfSmi(eax, &runtime);
- __ mov(edx, eax); // Make a copy of the original subject string.
- __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
- __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
-
- // eax: subject string
- // edx: subject string
- // ebx: subject string instance type
- // ecx: RegExp data (FixedArray)
- // Handle subject string according to its encoding and representation:
- // (1) Sequential two byte? If yes, go to (9).
- // (2) Sequential one byte? If yes, go to (6).
- // (3) Anything but sequential or cons? If yes, go to (7).
- // (4) Cons string. If the string is flat, replace subject with first string.
- // Otherwise bailout.
- // (5a) Is subject sequential two byte? If yes, go to (9).
- // (5b) Is subject external? If yes, go to (8).
- // (6) One byte sequential. Load regexp code for one byte.
- // (E) Carry on.
- /// [...]
-
- // Deferred code at the end of the stub:
- // (7) Not a long external string? If yes, go to (10).
- // (8) External string. Make it, offset-wise, look like a sequential string.
- // (8a) Is the external string one byte? If yes, go to (6).
- // (9) Two byte sequential. Load regexp code for one byte. Go to (E).
- // (10) Short external string or not a string? If yes, bail out to runtime.
- // (11) Sliced string. Replace subject with parent. Go to (5a).
-
- Label seq_one_byte_string /* 6 */, seq_two_byte_string /* 9 */,
- external_string /* 8 */, check_underlying /* 5a */,
- not_seq_nor_cons /* 7 */, check_code /* E */,
- not_long_external /* 10 */;
-
- // (1) Sequential two byte? If yes, go to (9).
- __ and_(ebx, kIsNotStringMask |
- kStringRepresentationMask |
- kStringEncodingMask |
- kShortExternalStringMask);
- STATIC_ASSERT((kStringTag | kSeqStringTag | kTwoByteStringTag) == 0);
- __ j(zero, &seq_two_byte_string); // Go to (9).
-
- // (2) Sequential one byte? If yes, go to (6).
- // Any other sequential string must be one byte.
- __ and_(ebx, Immediate(kIsNotStringMask |
- kStringRepresentationMask |
- kShortExternalStringMask));
- __ j(zero, &seq_one_byte_string, Label::kNear); // Go to (6).
-
- // (3) Anything but sequential or cons? If yes, go to (7).
- // We check whether the subject string is a cons, since sequential strings
- // have already been covered.
- STATIC_ASSERT(kConsStringTag < kExternalStringTag);
- STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
- STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
- STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
- __ cmp(ebx, Immediate(kExternalStringTag));
- __ j(greater_equal, &not_seq_nor_cons); // Go to (7).
-
- // (4) Cons string. Check that it's flat.
- // Replace subject with first string and reload instance type.
- __ cmp(FieldOperand(eax, ConsString::kSecondOffset), factory->empty_string());
- __ j(not_equal, &runtime);
- __ mov(eax, FieldOperand(eax, ConsString::kFirstOffset));
- __ bind(&check_underlying);
- __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
- __ mov(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
-
- // (5a) Is subject sequential two byte? If yes, go to (9).
- __ test_b(ebx, kStringRepresentationMask | kStringEncodingMask);
- STATIC_ASSERT((kSeqStringTag | kTwoByteStringTag) == 0);
- __ j(zero, &seq_two_byte_string); // Go to (9).
- // (5b) Is subject external? If yes, go to (8).
- __ test_b(ebx, kStringRepresentationMask);
- // The underlying external string is never a short external string.
- STATIC_CHECK(ExternalString::kMaxShortLength < ConsString::kMinLength);
- STATIC_CHECK(ExternalString::kMaxShortLength < SlicedString::kMinLength);
- __ j(not_zero, &external_string); // Go to (8).
-
- // eax: sequential subject string (or look-alike, external string)
- // edx: original subject string
- // ecx: RegExp data (FixedArray)
- // (6) One byte sequential. Load regexp code for one byte.
- __ bind(&seq_one_byte_string);
- // Load previous index and check range before edx is overwritten. We have
- // to use edx instead of eax here because it might have been only made to
- // look like a sequential string when it actually is an external string.
- __ mov(ebx, Operand(esp, kPreviousIndexOffset));
- __ JumpIfNotSmi(ebx, &runtime);
- __ cmp(ebx, FieldOperand(edx, String::kLengthOffset));
- __ j(above_equal, &runtime);
- __ mov(edx, FieldOperand(ecx, JSRegExp::kDataAsciiCodeOffset));
- __ Set(ecx, Immediate(1)); // Type is one byte.
-
- // (E) Carry on. String handling is done.
- __ bind(&check_code);
- // edx: irregexp code
- // Check that the irregexp code has been generated for the actual string
- // encoding. If it has, the field contains a code object otherwise it contains
- // a smi (code flushing support).
- __ JumpIfSmi(edx, &runtime);
-
- // eax: subject string
- // ebx: previous index (smi)
- // edx: code
- // ecx: encoding of subject string (1 if ASCII, 0 if two_byte);
- // All checks done. Now push arguments for native regexp code.
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->regexp_entry_native(), 1);
-
- // Isolates: note we add an additional parameter here (isolate pointer).
- static const int kRegExpExecuteArguments = 9;
- __ EnterApiExitFrame(kRegExpExecuteArguments);
-
- // Argument 9: Pass current isolate address.
- __ mov(Operand(esp, 8 * kPointerSize),
- Immediate(ExternalReference::isolate_address()));
-
- // Argument 8: Indicate that this is a direct call from JavaScript.
- __ mov(Operand(esp, 7 * kPointerSize), Immediate(1));
-
- // Argument 7: Start (high end) of backtracking stack memory area.
- __ mov(esi, Operand::StaticVariable(address_of_regexp_stack_memory_address));
- __ add(esi, Operand::StaticVariable(address_of_regexp_stack_memory_size));
- __ mov(Operand(esp, 6 * kPointerSize), esi);
-
- // Argument 6: Set the number of capture registers to zero to force global
- // regexps to behave as non-global. This does not affect non-global regexps.
- __ mov(Operand(esp, 5 * kPointerSize), Immediate(0));
-
- // Argument 5: static offsets vector buffer.
- __ mov(Operand(esp, 4 * kPointerSize),
- Immediate(ExternalReference::address_of_static_offsets_vector(
- masm->isolate())));
-
- // Argument 2: Previous index.
- __ SmiUntag(ebx);
- __ mov(Operand(esp, 1 * kPointerSize), ebx);
-
- // Argument 1: Original subject string.
- // The original subject is in the previous stack frame. Therefore we have to
- // use ebp, which points exactly to one pointer size below the previous esp.
- // (Because creating a new stack frame pushes the previous ebp onto the stack
- // and thereby moves up esp by one kPointerSize.)
- __ mov(esi, Operand(ebp, kSubjectOffset + kPointerSize));
- __ mov(Operand(esp, 0 * kPointerSize), esi);
-
- // esi: original subject string
- // eax: underlying subject string
- // ebx: previous index
- // ecx: encoding of subject string (1 if ASCII 0 if two_byte);
- // edx: code
- // Argument 4: End of string data
- // Argument 3: Start of string data
- // Prepare start and end index of the input.
- // Load the length from the original sliced string if that is the case.
- __ mov(esi, FieldOperand(esi, String::kLengthOffset));
- __ add(esi, edi); // Calculate input end wrt offset.
- __ SmiUntag(edi);
- __ add(ebx, edi); // Calculate input start wrt offset.
-
- // ebx: start index of the input string
- // esi: end index of the input string
- Label setup_two_byte, setup_rest;
- __ test(ecx, ecx);
- __ j(zero, &setup_two_byte, Label::kNear);
- __ SmiUntag(esi);
- __ lea(ecx, FieldOperand(eax, esi, times_1, SeqOneByteString::kHeaderSize));
- __ mov(Operand(esp, 3 * kPointerSize), ecx); // Argument 4.
- __ lea(ecx, FieldOperand(eax, ebx, times_1, SeqOneByteString::kHeaderSize));
- __ mov(Operand(esp, 2 * kPointerSize), ecx); // Argument 3.
- __ jmp(&setup_rest, Label::kNear);
-
- __ bind(&setup_two_byte);
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize == 1); // esi is smi (powered by 2).
- __ lea(ecx, FieldOperand(eax, esi, times_1, SeqTwoByteString::kHeaderSize));
- __ mov(Operand(esp, 3 * kPointerSize), ecx); // Argument 4.
- __ lea(ecx, FieldOperand(eax, ebx, times_2, SeqTwoByteString::kHeaderSize));
- __ mov(Operand(esp, 2 * kPointerSize), ecx); // Argument 3.
-
- __ bind(&setup_rest);
-
- // Locate the code entry and call it.
- __ add(edx, Immediate(Code::kHeaderSize - kHeapObjectTag));
- __ call(edx);
-
- // Drop arguments and come back to JS mode.
- __ LeaveApiExitFrame();
-
- // Check the result.
- Label success;
- __ cmp(eax, 1);
- // We expect exactly one result since we force the called regexp to behave
- // as non-global.
- __ j(equal, &success);
- Label failure;
- __ cmp(eax, NativeRegExpMacroAssembler::FAILURE);
- __ j(equal, &failure);
- __ cmp(eax, NativeRegExpMacroAssembler::EXCEPTION);
- // If not exception it can only be retry. Handle that in the runtime system.
- __ j(not_equal, &runtime);
- // Result must now be exception. If there is no pending exception already a
- // stack overflow (on the backtrack stack) was detected in RegExp code but
- // haven't created the exception yet. Handle that in the runtime system.
- // TODO(592): Rerunning the RegExp to get the stack overflow exception.
- ExternalReference pending_exception(Isolate::kPendingExceptionAddress,
- masm->isolate());
- __ mov(edx, Immediate(masm->isolate()->factory()->the_hole_value()));
- __ mov(eax, Operand::StaticVariable(pending_exception));
- __ cmp(edx, eax);
- __ j(equal, &runtime);
- // For exception, throw the exception again.
-
- // Clear the pending exception variable.
- __ mov(Operand::StaticVariable(pending_exception), edx);
-
- // Special handling of termination exceptions which are uncatchable
- // by javascript code.
- __ cmp(eax, factory->termination_exception());
- Label throw_termination_exception;
- __ j(equal, &throw_termination_exception, Label::kNear);
-
- // Handle normal exception by following handler chain.
- __ Throw(eax);
-
- __ bind(&throw_termination_exception);
- __ ThrowUncatchable(eax);
-
- __ bind(&failure);
- // For failure to match, return null.
- __ mov(eax, factory->null_value());
- __ ret(4 * kPointerSize);
-
- // Load RegExp data.
- __ bind(&success);
- __ mov(eax, Operand(esp, kJSRegExpOffset));
- __ mov(ecx, FieldOperand(eax, JSRegExp::kDataOffset));
- __ mov(edx, FieldOperand(ecx, JSRegExp::kIrregexpCaptureCountOffset));
- // Calculate number of capture registers (number_of_captures + 1) * 2.
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
- __ add(edx, Immediate(2)); // edx was a smi.
-
- // edx: Number of capture registers
- // Load last_match_info which is still known to be a fast case JSArray.
- // Check that the fourth object is a JSArray object.
- __ mov(eax, Operand(esp, kLastMatchInfoOffset));
- __ JumpIfSmi(eax, &runtime);
- __ CmpObjectType(eax, JS_ARRAY_TYPE, ebx);
- __ j(not_equal, &runtime);
- // Check that the JSArray is in fast case.
- __ mov(ebx, FieldOperand(eax, JSArray::kElementsOffset));
- __ mov(eax, FieldOperand(ebx, HeapObject::kMapOffset));
- __ cmp(eax, factory->fixed_array_map());
- __ j(not_equal, &runtime);
- // Check that the last match info has space for the capture registers and the
- // additional information.
- __ mov(eax, FieldOperand(ebx, FixedArray::kLengthOffset));
- __ SmiUntag(eax);
- __ sub(eax, Immediate(RegExpImpl::kLastMatchOverhead));
- __ cmp(edx, eax);
- __ j(greater, &runtime);
-
- // ebx: last_match_info backing store (FixedArray)
- // edx: number of capture registers
- // Store the capture count.
- __ SmiTag(edx); // Number of capture registers to smi.
- __ mov(FieldOperand(ebx, RegExpImpl::kLastCaptureCountOffset), edx);
- __ SmiUntag(edx); // Number of capture registers back from smi.
- // Store last subject and last input.
- __ mov(eax, Operand(esp, kSubjectOffset));
- __ mov(ecx, eax);
- __ mov(FieldOperand(ebx, RegExpImpl::kLastSubjectOffset), eax);
- __ RecordWriteField(ebx,
- RegExpImpl::kLastSubjectOffset,
- eax,
- edi,
- kDontSaveFPRegs);
- __ mov(eax, ecx);
- __ mov(FieldOperand(ebx, RegExpImpl::kLastInputOffset), eax);
- __ RecordWriteField(ebx,
- RegExpImpl::kLastInputOffset,
- eax,
- edi,
- kDontSaveFPRegs);
-
- // Get the static offsets vector filled by the native regexp code.
- ExternalReference address_of_static_offsets_vector =
- ExternalReference::address_of_static_offsets_vector(masm->isolate());
- __ mov(ecx, Immediate(address_of_static_offsets_vector));
-
- // ebx: last_match_info backing store (FixedArray)
- // ecx: offsets vector
- // edx: number of capture registers
- Label next_capture, done;
- // Capture register counter starts from number of capture registers and
- // counts down until wraping after zero.
- __ bind(&next_capture);
- __ sub(edx, Immediate(1));
- __ j(negative, &done, Label::kNear);
- // Read the value from the static offsets vector buffer.
- __ mov(edi, Operand(ecx, edx, times_int_size, 0));
- __ SmiTag(edi);
- // Store the smi value in the last match info.
- __ mov(FieldOperand(ebx,
- edx,
- times_pointer_size,
- RegExpImpl::kFirstCaptureOffset),
- edi);
- __ jmp(&next_capture);
- __ bind(&done);
-
- // Return last match info.
- __ mov(eax, Operand(esp, kLastMatchInfoOffset));
- __ ret(4 * kPointerSize);
-
- // Do the runtime call to execute the regexp.
- __ bind(&runtime);
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
-
- // Deferred code for string handling.
- // (7) Not a long external string? If yes, go to (10).
- __ bind(&not_seq_nor_cons);
- // Compare flags are still set from (3).
- __ j(greater, &not_long_external, Label::kNear); // Go to (10).
-
- // (8) External string. Short external strings have been ruled out.
- __ bind(&external_string);
- // Reload instance type.
- __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
- __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
- if (FLAG_debug_code) {
- // Assert that we do not have a cons or slice (indirect strings) here.
- // Sequential strings have already been ruled out.
- __ test_b(ebx, kIsIndirectStringMask);
- __ Assert(zero, "external string expected, but not found");
- }
- __ mov(eax, FieldOperand(eax, ExternalString::kResourceDataOffset));
- // Move the pointer so that offset-wise, it looks like a sequential string.
- STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
- __ sub(eax, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
- STATIC_ASSERT(kTwoByteStringTag == 0);
- // (8a) Is the external string one byte? If yes, go to (6).
- __ test_b(ebx, kStringEncodingMask);
- __ j(not_zero, &seq_one_byte_string); // Goto (6).
-
- // eax: sequential subject string (or look-alike, external string)
- // edx: original subject string
- // ecx: RegExp data (FixedArray)
- // (9) Two byte sequential. Load regexp code for one byte. Go to (E).
- __ bind(&seq_two_byte_string);
- // Load previous index and check range before edx is overwritten. We have
- // to use edx instead of eax here because it might have been only made to
- // look like a sequential string when it actually is an external string.
- __ mov(ebx, Operand(esp, kPreviousIndexOffset));
- __ JumpIfNotSmi(ebx, &runtime);
- __ cmp(ebx, FieldOperand(edx, String::kLengthOffset));
- __ j(above_equal, &runtime);
- __ mov(edx, FieldOperand(ecx, JSRegExp::kDataUC16CodeOffset));
- __ Set(ecx, Immediate(0)); // Type is two byte.
- __ jmp(&check_code); // Go to (E).
-
- // (10) Not a string or a short external string? If yes, bail out to runtime.
- __ bind(&not_long_external);
- // Catch non-string subject or short external string.
- STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0);
- __ test(ebx, Immediate(kIsNotStringMask | kShortExternalStringTag));
- __ j(not_zero, &runtime);
-
- // (11) Sliced string. Replace subject with parent. Go to (5a).
- // Load offset into edi and replace subject string with parent.
- __ mov(edi, FieldOperand(eax, SlicedString::kOffsetOffset));
- __ mov(eax, FieldOperand(eax, SlicedString::kParentOffset));
- __ jmp(&check_underlying); // Go to (5a).
-#endif // V8_INTERPRETED_REGEXP
-}
-
-
-void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
- const int kMaxInlineLength = 100;
- Label slowcase;
- Label done;
- __ mov(ebx, Operand(esp, kPointerSize * 3));
- __ JumpIfNotSmi(ebx, &slowcase);
- __ cmp(ebx, Immediate(Smi::FromInt(kMaxInlineLength)));
- __ j(above, &slowcase);
- // Smi-tagging is equivalent to multiplying by 2.
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize == 1);
- // Allocate RegExpResult followed by FixedArray with size in ebx.
- // JSArray: [Map][empty properties][Elements][Length-smi][index][input]
- // Elements: [Map][Length][..elements..]
- __ AllocateInNewSpace(JSRegExpResult::kSize + FixedArray::kHeaderSize,
- times_pointer_size,
- ebx, // In: Number of elements as a smi
- REGISTER_VALUE_IS_SMI,
- eax, // Out: Start of allocation (tagged).
- ecx, // Out: End of allocation.
- edx, // Scratch register
- &slowcase,
- TAG_OBJECT);
- // eax: Start of allocated area, object-tagged.
-
- // Set JSArray map to global.regexp_result_map().
- // Set empty properties FixedArray.
- // Set elements to point to FixedArray allocated right after the JSArray.
- // Interleave operations for better latency.
- __ mov(edx, ContextOperand(esi, Context::GLOBAL_OBJECT_INDEX));
- Factory* factory = masm->isolate()->factory();
- __ mov(ecx, Immediate(factory->empty_fixed_array()));
- __ lea(ebx, Operand(eax, JSRegExpResult::kSize));
- __ mov(edx, FieldOperand(edx, GlobalObject::kNativeContextOffset));
- __ mov(FieldOperand(eax, JSObject::kElementsOffset), ebx);
- __ mov(FieldOperand(eax, JSObject::kPropertiesOffset), ecx);
- __ mov(edx, ContextOperand(edx, Context::REGEXP_RESULT_MAP_INDEX));
- __ mov(FieldOperand(eax, HeapObject::kMapOffset), edx);
-
- // Set input, index and length fields from arguments.
- __ mov(ecx, Operand(esp, kPointerSize * 1));
- __ mov(FieldOperand(eax, JSRegExpResult::kInputOffset), ecx);
- __ mov(ecx, Operand(esp, kPointerSize * 2));
- __ mov(FieldOperand(eax, JSRegExpResult::kIndexOffset), ecx);
- __ mov(ecx, Operand(esp, kPointerSize * 3));
- __ mov(FieldOperand(eax, JSArray::kLengthOffset), ecx);
-
- // Fill out the elements FixedArray.
- // eax: JSArray.
- // ebx: FixedArray.
- // ecx: Number of elements in array, as smi.
-
- // Set map.
- __ mov(FieldOperand(ebx, HeapObject::kMapOffset),
- Immediate(factory->fixed_array_map()));
- // Set length.
- __ mov(FieldOperand(ebx, FixedArray::kLengthOffset), ecx);
- // Fill contents of fixed-array with undefined.
- __ SmiUntag(ecx);
- __ mov(edx, Immediate(factory->undefined_value()));
- __ lea(ebx, FieldOperand(ebx, FixedArray::kHeaderSize));
- // Fill fixed array elements with undefined.
- // eax: JSArray.
- // ecx: Number of elements to fill.
- // ebx: Start of elements in FixedArray.
- // edx: undefined.
- Label loop;
- __ test(ecx, ecx);
- __ bind(&loop);
- __ j(less_equal, &done, Label::kNear); // Jump if ecx is negative or zero.
- __ sub(ecx, Immediate(1));
- __ mov(Operand(ebx, ecx, times_pointer_size, 0), edx);
- __ jmp(&loop);
-
- __ bind(&done);
- __ ret(3 * kPointerSize);
-
- __ bind(&slowcase);
- __ TailCallRuntime(Runtime::kRegExpConstructResult, 3, 1);
-}
-
-
-void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
- Register object,
- Register result,
- Register scratch1,
- Register scratch2,
- bool object_is_smi,
- Label* not_found) {
- // Use of registers. Register result is used as a temporary.
- Register number_string_cache = result;
- Register mask = scratch1;
- Register scratch = scratch2;
-
- // Load the number string cache.
- ExternalReference roots_array_start =
- ExternalReference::roots_array_start(masm->isolate());
- __ mov(scratch, Immediate(Heap::kNumberStringCacheRootIndex));
- __ mov(number_string_cache,
- Operand::StaticArray(scratch, times_pointer_size, roots_array_start));
- // Make the hash mask from the length of the number string cache. It
- // contains two elements (number and string) for each cache entry.
- __ mov(mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
- __ shr(mask, kSmiTagSize + 1); // Untag length and divide it by two.
- __ sub(mask, Immediate(1)); // Make mask.
-
- // Calculate the entry in the number string cache. The hash value in the
- // number string cache for smis is just the smi value, and the hash for
- // doubles is the xor of the upper and lower words. See
- // Heap::GetNumberStringCache.
- Label smi_hash_calculated;
- Label load_result_from_cache;
- if (object_is_smi) {
- __ mov(scratch, object);
- __ SmiUntag(scratch);
- } else {
- Label not_smi;
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfNotSmi(object, &not_smi, Label::kNear);
- __ mov(scratch, object);
- __ SmiUntag(scratch);
- __ jmp(&smi_hash_calculated, Label::kNear);
- __ bind(&not_smi);
- __ cmp(FieldOperand(object, HeapObject::kMapOffset),
- masm->isolate()->factory()->heap_number_map());
- __ j(not_equal, not_found);
- STATIC_ASSERT(8 == kDoubleSize);
- __ mov(scratch, FieldOperand(object, HeapNumber::kValueOffset));
- __ xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
- // Object is heap number and hash is now in scratch. Calculate cache index.
- __ and_(scratch, mask);
- Register index = scratch;
- Register probe = mask;
- __ mov(probe,
- FieldOperand(number_string_cache,
- index,
- times_twice_pointer_size,
- FixedArray::kHeaderSize));
- __ JumpIfSmi(probe, not_found);
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope fscope(SSE2);
- __ movdbl(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
- __ movdbl(xmm1, FieldOperand(probe, HeapNumber::kValueOffset));
- __ ucomisd(xmm0, xmm1);
- } else {
- __ fld_d(FieldOperand(object, HeapNumber::kValueOffset));
- __ fld_d(FieldOperand(probe, HeapNumber::kValueOffset));
- __ FCmp();
- }
- __ j(parity_even, not_found); // Bail out if NaN is involved.
- __ j(not_equal, not_found); // The cache did not contain this value.
- __ jmp(&load_result_from_cache, Label::kNear);
- }
-
- __ bind(&smi_hash_calculated);
- // Object is smi and hash is now in scratch. Calculate cache index.
- __ and_(scratch, mask);
- Register index = scratch;
- // Check if the entry is the smi we are looking for.
- __ cmp(object,
- FieldOperand(number_string_cache,
- index,
- times_twice_pointer_size,
- FixedArray::kHeaderSize));
- __ j(not_equal, not_found);
-
- // Get the result from the cache.
- __ bind(&load_result_from_cache);
- __ mov(result,
- FieldOperand(number_string_cache,
- index,
- times_twice_pointer_size,
- FixedArray::kHeaderSize + kPointerSize));
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->number_to_string_native(), 1);
-}
-
-
-void NumberToStringStub::Generate(MacroAssembler* masm) {
- Label runtime;
-
- __ mov(ebx, Operand(esp, kPointerSize));
-
- // Generate code to lookup number in the number string cache.
- GenerateLookupNumberStringCache(masm, ebx, eax, ecx, edx, false, &runtime);
- __ ret(1 * kPointerSize);
-
- __ bind(&runtime);
- // Handle number to string in the runtime system if not found in the cache.
- __ TailCallRuntime(Runtime::kNumberToStringSkipCache, 1, 1);
-}
-
-
-static int NegativeComparisonResult(Condition cc) {
- ASSERT(cc != equal);
- ASSERT((cc == less) || (cc == less_equal)
- || (cc == greater) || (cc == greater_equal));
- return (cc == greater || cc == greater_equal) ? LESS : GREATER;
-}
-
-
-static void CheckInputType(MacroAssembler* masm,
- Register input,
- CompareIC::State expected,
- Label* fail) {
- Label ok;
- if (expected == CompareIC::SMI) {
- __ JumpIfNotSmi(input, fail);
- } else if (expected == CompareIC::NUMBER) {
- __ JumpIfSmi(input, &ok);
- __ cmp(FieldOperand(input, HeapObject::kMapOffset),
- Immediate(masm->isolate()->factory()->heap_number_map()));
- __ j(not_equal, fail);
- }
- // We could be strict about internalized/non-internalized here, but as long as
- // hydrogen doesn't care, the stub doesn't have to care either.
- __ bind(&ok);
-}
-
-
-static void BranchIfNotInternalizedString(MacroAssembler* masm,
- Label* label,
- Register object,
- Register scratch) {
- __ JumpIfSmi(object, label);
- __ mov(scratch, FieldOperand(object, HeapObject::kMapOffset));
- __ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
- __ and_(scratch, kIsInternalizedMask | kIsNotStringMask);
- __ cmp(scratch, kInternalizedTag | kStringTag);
- __ j(not_equal, label);
-}
-
-
-void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
- Label check_unequal_objects;
- Condition cc = GetCondition();
-
- Label miss;
- CheckInputType(masm, edx, left_, &miss);
- CheckInputType(masm, eax, right_, &miss);
-
- // Compare two smis.
- Label non_smi, smi_done;
- __ mov(ecx, edx);
- __ or_(ecx, eax);
- __ JumpIfNotSmi(ecx, &non_smi, Label::kNear);
- __ sub(edx, eax); // Return on the result of the subtraction.
- __ j(no_overflow, &smi_done, Label::kNear);
- __ not_(edx); // Correct sign in case of overflow. edx is never 0 here.
- __ bind(&smi_done);
- __ mov(eax, edx);
- __ ret(0);
- __ bind(&non_smi);
-
- // NOTICE! This code is only reached after a smi-fast-case check, so
- // it is certain that at least one operand isn't a smi.
-
- {
- Label not_user_equal, user_equal;
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &not_user_equal);
- __ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &not_user_equal);
-
- __ CmpObjectType(eax, JS_OBJECT_TYPE, ebx);
- __ j(not_equal, &not_user_equal);
-
- __ CmpObjectType(edx, JS_OBJECT_TYPE, ecx);
- __ j(not_equal, &not_user_equal);
-
- __ test_b(FieldOperand(ebx, Map::kBitField2Offset),
- 1 << Map::kUseUserObjectComparison);
- __ j(not_zero, &user_equal);
- __ test_b(FieldOperand(ecx, Map::kBitField2Offset),
- 1 << Map::kUseUserObjectComparison);
- __ j(not_zero, &user_equal);
-
- __ jmp(&not_user_equal);
-
- __ bind(&user_equal);
-
- __ pop(ebx); // Return address.
- __ push(eax);
- __ push(edx);
- __ push(ebx);
- __ TailCallRuntime(Runtime::kUserObjectEquals, 2, 1);
-
- __ bind(&not_user_equal);
- }
-
- // Identical objects can be compared fast, but there are some tricky cases
- // for NaN and undefined.
- {
- Label not_identical;
- __ cmp(eax, edx);
- __ j(not_equal, &not_identical);
-
- if (cc != equal) {
- // Check for undefined. undefined OP undefined is false even though
- // undefined == undefined.
- Label check_for_nan;
- __ cmp(edx, masm->isolate()->factory()->undefined_value());
- __ j(not_equal, &check_for_nan, Label::kNear);
- __ Set(eax, Immediate(Smi::FromInt(NegativeComparisonResult(cc))));
- __ ret(0);
- __ bind(&check_for_nan);
- }
-
- // Test for NaN. Sadly, we can't just compare to factory->nan_value(),
- // so we do the second best thing - test it ourselves.
- Label heap_number;
- __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
- Immediate(masm->isolate()->factory()->heap_number_map()));
- __ j(equal, &heap_number, Label::kNear);
- if (cc != equal) {
- // Call runtime on identical JSObjects. Otherwise return equal.
- __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
- __ j(above_equal, &not_identical);
- }
- __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
- __ ret(0);
-
- __ bind(&heap_number);
- // It is a heap number, so return non-equal if it's NaN and equal if
- // it's not NaN.
- // The representation of NaN values has all exponent bits (52..62) set,
- // and not all mantissa bits (0..51) clear.
- // We only accept QNaNs, which have bit 51 set.
- // Read top bits of double representation (second word of value).
-
- // Value is a QNaN if value & kQuietNaNMask == kQuietNaNMask, i.e.,
- // all bits in the mask are set. We only need to check the word
- // that contains the exponent and high bit of the mantissa.
- STATIC_ASSERT(((kQuietNaNHighBitsMask << 1) & 0x80000000u) != 0);
- __ mov(edx, FieldOperand(edx, HeapNumber::kExponentOffset));
- __ Set(eax, Immediate(0));
- // Shift value and mask so kQuietNaNHighBitsMask applies to topmost
- // bits.
- __ add(edx, edx);
- __ cmp(edx, kQuietNaNHighBitsMask << 1);
- if (cc == equal) {
- STATIC_ASSERT(EQUAL != 1);
- __ setcc(above_equal, eax);
- __ ret(0);
- } else {
- Label nan;
- __ j(above_equal, &nan, Label::kNear);
- __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
- __ ret(0);
- __ bind(&nan);
- __ Set(eax, Immediate(Smi::FromInt(NegativeComparisonResult(cc))));
- __ ret(0);
- }
-
- __ bind(&not_identical);
- }
-
- // Strict equality can quickly decide whether objects are equal.
- // Non-strict object equality is slower, so it is handled later in the stub.
- if (cc == equal && strict()) {
- Label slow; // Fallthrough label.
- Label not_smis;
- // If we're doing a strict equality comparison, we don't have to do
- // type conversion, so we generate code to do fast comparison for objects
- // and oddballs. Non-smi numbers and strings still go through the usual
- // slow-case code.
- // If either is a Smi (we know that not both are), then they can only
- // be equal if the other is a HeapNumber. If so, use the slow case.
- STATIC_ASSERT(kSmiTag == 0);
- ASSERT_EQ(0, Smi::FromInt(0));
- __ mov(ecx, Immediate(kSmiTagMask));
- __ and_(ecx, eax);
- __ test(ecx, edx);
- __ j(not_zero, &not_smis, Label::kNear);
- // One operand is a smi.
-
- // Check whether the non-smi is a heap number.
- STATIC_ASSERT(kSmiTagMask == 1);
- // ecx still holds eax & kSmiTag, which is either zero or one.
- __ sub(ecx, Immediate(0x01));
- __ mov(ebx, edx);
- __ xor_(ebx, eax);
- __ and_(ebx, ecx); // ebx holds either 0 or eax ^ edx.
- __ xor_(ebx, eax);
- // if eax was smi, ebx is now edx, else eax.
-
- // Check if the non-smi operand is a heap number.
- __ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
- Immediate(masm->isolate()->factory()->heap_number_map()));
- // If heap number, handle it in the slow case.
- __ j(equal, &slow, Label::kNear);
- // Return non-equal (ebx is not zero)
- __ mov(eax, ebx);
- __ ret(0);
-
- __ bind(&not_smis);
- // If either operand is a JSObject or an oddball value, then they are not
- // equal since their pointers are different
- // There is no test for undetectability in strict equality.
-
- // Get the type of the first operand.
- // If the first object is a JS object, we have done pointer comparison.
- Label first_non_object;
- STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
- __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
- __ j(below, &first_non_object, Label::kNear);
-
- // Return non-zero (eax is not zero)
- Label return_not_equal;
- STATIC_ASSERT(kHeapObjectTag != 0);
- __ bind(&return_not_equal);
- __ ret(0);
-
- __ bind(&first_non_object);
- // Check for oddballs: true, false, null, undefined.
- __ CmpInstanceType(ecx, ODDBALL_TYPE);
- __ j(equal, &return_not_equal);
-
- __ CmpObjectType(edx, FIRST_SPEC_OBJECT_TYPE, ecx);
- __ j(above_equal, &return_not_equal);
-
- // Check for oddballs: true, false, null, undefined.
- __ CmpInstanceType(ecx, ODDBALL_TYPE);
- __ j(equal, &return_not_equal);
-
- // Fall through to the general case.
- __ bind(&slow);
- }
-
- // Generate the number comparison code.
- Label non_number_comparison;
- Label unordered;
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope use_sse2(SSE2);
- CpuFeatures::Scope use_cmov(CMOV);
-
- FloatingPointHelper::LoadSSE2Operands(masm, &non_number_comparison);
- __ ucomisd(xmm0, xmm1);
-
- // Don't base result on EFLAGS when a NaN is involved.
- __ j(parity_even, &unordered, Label::kNear);
- // Return a result of -1, 0, or 1, based on EFLAGS.
- __ mov(eax, 0); // equal
- __ mov(ecx, Immediate(Smi::FromInt(1)));
- __ cmov(above, eax, ecx);
- __ mov(ecx, Immediate(Smi::FromInt(-1)));
- __ cmov(below, eax, ecx);
- __ ret(0);
- } else {
- FloatingPointHelper::CheckFloatOperands(
- masm, &non_number_comparison, ebx);
- FloatingPointHelper::LoadFloatOperand(masm, eax);
- FloatingPointHelper::LoadFloatOperand(masm, edx);
- __ FCmp();
-
- // Don't base result on EFLAGS when a NaN is involved.
- __ j(parity_even, &unordered, Label::kNear);
-
- Label below_label, above_label;
- // Return a result of -1, 0, or 1, based on EFLAGS.
- __ j(below, &below_label, Label::kNear);
- __ j(above, &above_label, Label::kNear);
-
- __ Set(eax, Immediate(0));
- __ ret(0);
-
- __ bind(&below_label);
- __ mov(eax, Immediate(Smi::FromInt(-1)));
- __ ret(0);
-
- __ bind(&above_label);
- __ mov(eax, Immediate(Smi::FromInt(1)));
- __ ret(0);
- }
-
- // If one of the numbers was NaN, then the result is always false.
- // The cc is never not-equal.
- __ bind(&unordered);
- ASSERT(cc != not_equal);
- if (cc == less || cc == less_equal) {
- __ mov(eax, Immediate(Smi::FromInt(1)));
- } else {
- __ mov(eax, Immediate(Smi::FromInt(-1)));
- }
- __ ret(0);
-
- // The number comparison code did not provide a valid result.
- __ bind(&non_number_comparison);
-
- // Fast negative check for internalized-to-internalized equality.
- Label check_for_strings;
- if (cc == equal) {
- BranchIfNotInternalizedString(masm, &check_for_strings, eax, ecx);
- BranchIfNotInternalizedString(masm, &check_for_strings, edx, ecx);
-
- // We've already checked for object identity, so if both operands
- // are internalized they aren't equal. Register eax already holds a
- // non-zero value, which indicates not equal, so just return.
- __ ret(0);
- }
-
- __ bind(&check_for_strings);
-
- __ JumpIfNotBothSequentialAsciiStrings(edx, eax, ecx, ebx,
- &check_unequal_objects);
-
- // Inline comparison of ASCII strings.
- if (cc == equal) {
- StringCompareStub::GenerateFlatAsciiStringEquals(masm,
- edx,
- eax,
- ecx,
- ebx);
- } else {
- StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
- edx,
- eax,
- ecx,
- ebx,
- edi);
- }
-#ifdef DEBUG
- __ Abort("Unexpected fall-through from string comparison");
-#endif
-
- __ bind(&check_unequal_objects);
- if (cc == equal && !strict()) {
- // Non-strict equality. Objects are unequal if
- // they are both JSObjects and not undetectable,
- // and their pointers are different.
- Label not_both_objects;
- Label return_unequal;
- // At most one is a smi, so we can test for smi by adding the two.
- // A smi plus a heap object has the low bit set, a heap object plus
- // a heap object has the low bit clear.
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagMask == 1);
- __ lea(ecx, Operand(eax, edx, times_1, 0));
- __ test(ecx, Immediate(kSmiTagMask));
- __ j(not_zero, &not_both_objects, Label::kNear);
- __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
- __ j(below, &not_both_objects, Label::kNear);
- __ CmpObjectType(edx, FIRST_SPEC_OBJECT_TYPE, ebx);
- __ j(below, &not_both_objects, Label::kNear);
- // We do not bail out after this point. Both are JSObjects, and
- // they are equal if and only if both are undetectable.
- // The and of the undetectable flags is 1 if and only if they are equal.
- __ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
- 1 << Map::kIsUndetectable);
- __ j(zero, &return_unequal, Label::kNear);
- __ test_b(FieldOperand(ebx, Map::kBitFieldOffset),
- 1 << Map::kIsUndetectable);
- __ j(zero, &return_unequal, Label::kNear);
- // The objects are both undetectable, so they both compare as the value
- // undefined, and are equal.
- __ Set(eax, Immediate(EQUAL));
- __ bind(&return_unequal);
- // Return non-equal by returning the non-zero object pointer in eax,
- // or return equal if we fell through to here.
- __ ret(0); // rax, rdx were pushed
- __ bind(&not_both_objects);
- }
-
- // Push arguments below the return address.
- __ pop(ecx);
- __ push(edx);
- __ push(eax);
-
- // Figure out which native to call and setup the arguments.
- Builtins::JavaScript builtin;
- if (cc == equal) {
- builtin = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
- } else {
- builtin = Builtins::COMPARE;
- __ push(Immediate(Smi::FromInt(NegativeComparisonResult(cc))));
- }
-
- // Restore return address on the stack.
- __ push(ecx);
-
- // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
- // tagged as a small integer.
- __ InvokeBuiltin(builtin, JUMP_FUNCTION);
-
- __ bind(&miss);
- GenerateMiss(masm);
-}
-
-
-void StackCheckStub::Generate(MacroAssembler* masm) {
- __ TailCallRuntime(Runtime::kStackGuard, 0, 1);
-}
-
-
-void InterruptStub::Generate(MacroAssembler* masm) {
- __ TailCallRuntime(Runtime::kInterrupt, 0, 1);
-}
-
-
-static void GenerateRecordCallTargetNoArray(MacroAssembler* masm) {
- // Cache the called function in a global property cell. Cache states
- // are uninitialized, monomorphic (indicated by a JSFunction), and
- // megamorphic.
- // ebx : cache cell for call target
- // edi : the function to call
- ASSERT(!FLAG_optimize_constructed_arrays);
- Isolate* isolate = masm->isolate();
- Label initialize, done;
-
- // Load the cache state into ecx.
- __ mov(ecx, FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset));
-
- // A monomorphic cache hit or an already megamorphic state: invoke the
- // function without changing the state.
- __ cmp(ecx, edi);
- __ j(equal, &done, Label::kNear);
- __ cmp(ecx, Immediate(TypeFeedbackCells::MegamorphicSentinel(isolate)));
- __ j(equal, &done, Label::kNear);
-
- // A monomorphic miss (i.e, here the cache is not uninitialized) goes
- // megamorphic.
- __ cmp(ecx, Immediate(TypeFeedbackCells::UninitializedSentinel(isolate)));
- __ j(equal, &initialize, Label::kNear);
- // MegamorphicSentinel is an immortal immovable object (undefined) so no
- // write-barrier is needed.
- __ mov(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset),
- Immediate(TypeFeedbackCells::MegamorphicSentinel(isolate)));
- __ jmp(&done, Label::kNear);
-
- // An uninitialized cache is patched with the function.
- __ bind(&initialize);
- __ mov(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset), edi);
- // No need for a write barrier here - cells are rescanned.
-
- __ bind(&done);
-}
-
-
-static void GenerateRecordCallTarget(MacroAssembler* masm) {
- // Cache the called function in a global property cell. Cache states
- // are uninitialized, monomorphic (indicated by a JSFunction), and
- // megamorphic.
- // ebx : cache cell for call target
- // edi : the function to call
- ASSERT(FLAG_optimize_constructed_arrays);
- Isolate* isolate = masm->isolate();
- Label initialize, done, miss, megamorphic, not_array_function;
-
- // Load the cache state into ecx.
- __ mov(ecx, FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset));
-
- // A monomorphic cache hit or an already megamorphic state: invoke the
- // function without changing the state.
- __ cmp(ecx, edi);
- __ j(equal, &done);
- __ cmp(ecx, Immediate(TypeFeedbackCells::MegamorphicSentinel(isolate)));
- __ j(equal, &done);
-
- // Special handling of the Array() function, which caches not only the
- // monomorphic Array function but the initial ElementsKind with special
- // sentinels
- Handle<Object> terminal_kind_sentinel =
- TypeFeedbackCells::MonomorphicArraySentinel(isolate,
- LAST_FAST_ELEMENTS_KIND);
- __ cmp(ecx, Immediate(terminal_kind_sentinel));
- __ j(above, &miss);
- // Load the global or builtins object from the current context
- __ LoadGlobalContext(ecx);
- // Make sure the function is the Array() function
- __ cmp(edi, Operand(ecx,
- Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
- __ j(not_equal, &megamorphic);
- __ jmp(&done);
-
- __ bind(&miss);
-
- // A monomorphic miss (i.e, here the cache is not uninitialized) goes
- // megamorphic.
- __ cmp(ecx, Immediate(TypeFeedbackCells::UninitializedSentinel(isolate)));
- __ j(equal, &initialize);
- // MegamorphicSentinel is an immortal immovable object (undefined) so no
- // write-barrier is needed.
- __ bind(&megamorphic);
- __ mov(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset),
- Immediate(TypeFeedbackCells::MegamorphicSentinel(isolate)));
- __ jmp(&done, Label::kNear);
-
- // An uninitialized cache is patched with the function or sentinel to
- // indicate the ElementsKind if function is the Array constructor.
- __ bind(&initialize);
- __ LoadGlobalContext(ecx);
- // Make sure the function is the Array() function
- __ cmp(edi, Operand(ecx,
- Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
- __ j(not_equal, &not_array_function);
-
- // The target function is the Array constructor, install a sentinel value in
- // the constructor's type info cell that will track the initial ElementsKind
- // that should be used for the array when its constructed.
- Handle<Object> initial_kind_sentinel =
- TypeFeedbackCells::MonomorphicArraySentinel(isolate,
- GetInitialFastElementsKind());
- __ mov(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset),
- Immediate(initial_kind_sentinel));
- __ jmp(&done);
-
- __ bind(&not_array_function);
- __ mov(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset), edi);
- // No need for a write barrier here - cells are rescanned.
-
- __ bind(&done);
-}
-
-
-void CallFunctionStub::Generate(MacroAssembler* masm) {
- // ebx : cache cell for call target
- // edi : the function to call
- Isolate* isolate = masm->isolate();
- Label slow, non_function;
-
- // The receiver might implicitly be the global object. This is
- // indicated by passing the hole as the receiver to the call
- // function stub.
- if (ReceiverMightBeImplicit()) {
- Label receiver_ok;
- // Get the receiver from the stack.
- // +1 ~ return address
- __ mov(eax, Operand(esp, (argc_ + 1) * kPointerSize));
- // Call as function is indicated with the hole.
- __ cmp(eax, isolate->factory()->the_hole_value());
- __ j(not_equal, &receiver_ok, Label::kNear);
- // Patch the receiver on the stack with the global receiver object.
- __ mov(ecx, GlobalObjectOperand());
- __ mov(ecx, FieldOperand(ecx, GlobalObject::kGlobalReceiverOffset));
- __ mov(Operand(esp, (argc_ + 1) * kPointerSize), ecx);
- __ bind(&receiver_ok);
- }
-
- // Check that the function really is a JavaScript function.
- __ JumpIfSmi(edi, &non_function);
- // Goto slow case if we do not have a function.
- __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
- __ j(not_equal, &slow);
-
- if (RecordCallTarget()) {
- if (FLAG_optimize_constructed_arrays) {
- GenerateRecordCallTarget(masm);
- } else {
- GenerateRecordCallTargetNoArray(masm);
- }
- }
-
- // Fast-case: Just invoke the function.
- ParameterCount actual(argc_);
-
- if (ReceiverMightBeImplicit()) {
- Label call_as_function;
- __ cmp(eax, isolate->factory()->the_hole_value());
- __ j(equal, &call_as_function);
- __ InvokeFunction(edi,
- actual,
- JUMP_FUNCTION,
- NullCallWrapper(),
- CALL_AS_METHOD);
- __ bind(&call_as_function);
- }
- __ InvokeFunction(edi,
- actual,
- JUMP_FUNCTION,
- NullCallWrapper(),
- CALL_AS_FUNCTION);
-
- // Slow-case: Non-function called.
- __ bind(&slow);
- if (RecordCallTarget()) {
- // If there is a call target cache, mark it megamorphic in the
- // non-function case. MegamorphicSentinel is an immortal immovable
- // object (undefined) so no write barrier is needed.
- __ mov(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset),
- Immediate(TypeFeedbackCells::MegamorphicSentinel(isolate)));
- }
- // Check for function proxy.
- __ CmpInstanceType(ecx, JS_FUNCTION_PROXY_TYPE);
- __ j(not_equal, &non_function);
- __ pop(ecx);
- __ push(edi); // put proxy as additional argument under return address
- __ push(ecx);
- __ Set(eax, Immediate(argc_ + 1));
- __ Set(ebx, Immediate(0));
- __ SetCallKind(ecx, CALL_AS_FUNCTION);
- __ GetBuiltinEntry(edx, Builtins::CALL_FUNCTION_PROXY);
- {
- Handle<Code> adaptor = isolate->builtins()->ArgumentsAdaptorTrampoline();
- __ jmp(adaptor, RelocInfo::CODE_TARGET);
- }
-
- // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
- // of the original receiver from the call site).
- __ bind(&non_function);
- __ mov(Operand(esp, (argc_ + 1) * kPointerSize), edi);
- __ Set(eax, Immediate(argc_));
- __ Set(ebx, Immediate(0));
- __ SetCallKind(ecx, CALL_AS_METHOD);
- __ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION);
- Handle<Code> adaptor = isolate->builtins()->ArgumentsAdaptorTrampoline();
- __ jmp(adaptor, RelocInfo::CODE_TARGET);
-}
-
-
-void CallConstructStub::Generate(MacroAssembler* masm) {
- // eax : number of arguments
- // ebx : cache cell for call target
- // edi : constructor function
- Label slow, non_function_call;
-
- // Check that function is not a smi.
- __ JumpIfSmi(edi, &non_function_call);
- // Check that function is a JSFunction.
- __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
- __ j(not_equal, &slow);
-
- if (RecordCallTarget()) {
- if (FLAG_optimize_constructed_arrays) {
- GenerateRecordCallTarget(masm);
- } else {
- GenerateRecordCallTargetNoArray(masm);
- }
- }
-
- // Jump to the function-specific construct stub.
- Register jmp_reg = FLAG_optimize_constructed_arrays ? ecx : ebx;
- __ mov(jmp_reg, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ mov(jmp_reg, FieldOperand(jmp_reg,
- SharedFunctionInfo::kConstructStubOffset));
- __ lea(jmp_reg, FieldOperand(jmp_reg, Code::kHeaderSize));
- __ jmp(jmp_reg);
-
- // edi: called object
- // eax: number of arguments
- // ecx: object map
- Label do_call;
- __ bind(&slow);
- __ CmpInstanceType(ecx, JS_FUNCTION_PROXY_TYPE);
- __ j(not_equal, &non_function_call);
- __ GetBuiltinEntry(edx, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
- __ jmp(&do_call);
-
- __ bind(&non_function_call);
- __ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
- __ bind(&do_call);
- // Set expected number of arguments to zero (not changing eax).
- __ Set(ebx, Immediate(0));
- Handle<Code> arguments_adaptor =
- masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
- __ SetCallKind(ecx, CALL_AS_METHOD);
- __ jmp(arguments_adaptor, RelocInfo::CODE_TARGET);
-}
-
-
-bool CEntryStub::NeedsImmovableCode() {
- return false;
-}
-
-
-bool CEntryStub::IsPregenerated() {
- return (!save_doubles_ || ISOLATE->fp_stubs_generated()) &&
- result_size_ == 1;
-}
-
-
-void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
- CEntryStub::GenerateAheadOfTime(isolate);
- StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
- // It is important that the store buffer overflow stubs are generated first.
- RecordWriteStub::GenerateFixedRegStubsAheadOfTime(isolate);
-}
-
-
-void CodeStub::GenerateFPStubs(Isolate* isolate) {
- if (CpuFeatures::IsSupported(SSE2)) {
- CEntryStub save_doubles(1, kSaveFPRegs);
- // Stubs might already be in the snapshot, detect that and don't regenerate,
- // which would lead to code stub initialization state being messed up.
- Code* save_doubles_code;
- if (!save_doubles.FindCodeInCache(&save_doubles_code, isolate)) {
- save_doubles_code = *(save_doubles.GetCode(isolate));
- }
- save_doubles_code->set_is_pregenerated(true);
- isolate->set_fp_stubs_generated(true);
- }
-}
-
-
-void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
- CEntryStub stub(1, kDontSaveFPRegs);
- Handle<Code> code = stub.GetCode(isolate);
- code->set_is_pregenerated(true);
-}
-
-
-static void JumpIfOOM(MacroAssembler* masm,
- Register value,
- Register scratch,
- Label* oom_label) {
- __ mov(scratch, value);
- STATIC_ASSERT(Failure::OUT_OF_MEMORY_EXCEPTION == 3);
- STATIC_ASSERT(kFailureTag == 3);
- __ and_(scratch, 0xf);
- __ cmp(scratch, 0xf);
- __ j(equal, oom_label);
-}
-
-
-void CEntryStub::GenerateCore(MacroAssembler* masm,
- Label* throw_normal_exception,
- Label* throw_termination_exception,
- Label* throw_out_of_memory_exception,
- bool do_gc,
- bool always_allocate_scope) {
- // eax: result parameter for PerformGC, if any
- // ebx: pointer to C function (C callee-saved)
- // ebp: frame pointer (restored after C call)
- // esp: stack pointer (restored after C call)
- // edi: number of arguments including receiver (C callee-saved)
- // esi: pointer to the first argument (C callee-saved)
-
- // Result returned in eax, or eax+edx if result_size_ is 2.
-
- // Check stack alignment.
- if (FLAG_debug_code) {
- __ CheckStackAlignment();
- }
-
- if (do_gc) {
- // Pass failure code returned from last attempt as first argument to
- // PerformGC. No need to use PrepareCallCFunction/CallCFunction here as the
- // stack alignment is known to be correct. This function takes one argument
- // which is passed on the stack, and we know that the stack has been
- // prepared to pass at least one argument.
- __ mov(Operand(esp, 0 * kPointerSize), eax); // Result.
- __ call(FUNCTION_ADDR(Runtime::PerformGC), RelocInfo::RUNTIME_ENTRY);
- }
-
- ExternalReference scope_depth =
- ExternalReference::heap_always_allocate_scope_depth(masm->isolate());
- if (always_allocate_scope) {
- __ inc(Operand::StaticVariable(scope_depth));
- }
-
- // Call C function.
- __ mov(Operand(esp, 0 * kPointerSize), edi); // argc.
- __ mov(Operand(esp, 1 * kPointerSize), esi); // argv.
- __ mov(Operand(esp, 2 * kPointerSize),
- Immediate(ExternalReference::isolate_address()));
- __ call(ebx);
- // Result is in eax or edx:eax - do not destroy these registers!
-
- if (always_allocate_scope) {
- __ dec(Operand::StaticVariable(scope_depth));
- }
-
- // Make sure we're not trying to return 'the hole' from the runtime
- // call as this may lead to crashes in the IC code later.
- if (FLAG_debug_code) {
- Label okay;
- __ cmp(eax, masm->isolate()->factory()->the_hole_value());
- __ j(not_equal, &okay, Label::kNear);
- __ int3();
- __ bind(&okay);
- }
-
- // Check for failure result.
- Label failure_returned;
- STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
- __ lea(ecx, Operand(eax, 1));
- // Lower 2 bits of ecx are 0 iff eax has failure tag.
- __ test(ecx, Immediate(kFailureTagMask));
- __ j(zero, &failure_returned);
-
- ExternalReference pending_exception_address(
- Isolate::kPendingExceptionAddress, masm->isolate());
-
- // Check that there is no pending exception, otherwise we
- // should have returned some failure value.
- if (FLAG_debug_code) {
- __ push(edx);
- __ mov(edx, Immediate(masm->isolate()->factory()->the_hole_value()));
- Label okay;
- __ cmp(edx, Operand::StaticVariable(pending_exception_address));
- // Cannot use check here as it attempts to generate call into runtime.
- __ j(equal, &okay, Label::kNear);
- __ int3();
- __ bind(&okay);
- __ pop(edx);
- }
-
- // Exit the JavaScript to C++ exit frame.
- __ LeaveExitFrame(save_doubles_ == kSaveFPRegs);
- __ ret(0);
-
- // Handling of failure.
- __ bind(&failure_returned);
-
- Label retry;
- // If the returned exception is RETRY_AFTER_GC continue at retry label
- STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0);
- __ test(eax, Immediate(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize));
- __ j(zero, &retry, Label::kNear);
-
- // Special handling of out of memory exceptions.
- JumpIfOOM(masm, eax, ecx, throw_out_of_memory_exception);
-
- // Retrieve the pending exception and clear the variable.
- __ mov(eax, Operand::StaticVariable(pending_exception_address));
- __ mov(edx, Immediate(masm->isolate()->factory()->the_hole_value()));
- __ mov(Operand::StaticVariable(pending_exception_address), edx);
-
- // Special handling of termination exceptions which are uncatchable
- // by javascript code.
- __ cmp(eax, masm->isolate()->factory()->termination_exception());
- __ j(equal, throw_termination_exception);
-
- // Handle normal exception.
- __ jmp(throw_normal_exception);
-
- // Retry.
- __ bind(&retry);
-}
-
-
-void CEntryStub::Generate(MacroAssembler* masm) {
- // eax: number of arguments including receiver
- // ebx: pointer to C function (C callee-saved)
- // ebp: frame pointer (restored after C call)
- // esp: stack pointer (restored after C call)
- // esi: current context (C callee-saved)
- // edi: JS function of the caller (C callee-saved)
-
- // NOTE: Invocations of builtins may return failure objects instead
- // of a proper result. The builtin entry handles this by performing
- // a garbage collection and retrying the builtin (twice).
-
- // Enter the exit frame that transitions from JavaScript to C++.
- __ EnterExitFrame(save_doubles_ == kSaveFPRegs);
-
- // eax: result parameter for PerformGC, if any (setup below)
- // ebx: pointer to builtin function (C callee-saved)
- // ebp: frame pointer (restored after C call)
- // esp: stack pointer (restored after C call)
- // edi: number of arguments including receiver (C callee-saved)
- // esi: argv pointer (C callee-saved)
-
- Label throw_normal_exception;
- Label throw_termination_exception;
- Label throw_out_of_memory_exception;
-
- // Call into the runtime system.
- GenerateCore(masm,
- &throw_normal_exception,
- &throw_termination_exception,
- &throw_out_of_memory_exception,
- false,
- false);
-
- // Do space-specific GC and retry runtime call.
- GenerateCore(masm,
- &throw_normal_exception,
- &throw_termination_exception,
- &throw_out_of_memory_exception,
- true,
- false);
-
- // Do full GC and retry runtime call one final time.
- Failure* failure = Failure::InternalError();
- __ mov(eax, Immediate(reinterpret_cast<int32_t>(failure)));
- GenerateCore(masm,
- &throw_normal_exception,
- &throw_termination_exception,
- &throw_out_of_memory_exception,
- true,
- true);
-
- __ bind(&throw_out_of_memory_exception);
- // Set external caught exception to false.
- Isolate* isolate = masm->isolate();
- ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress,
- isolate);
- __ mov(Operand::StaticVariable(external_caught), Immediate(false));
-
- // Set pending exception and eax to out of memory exception.
- ExternalReference pending_exception(Isolate::kPendingExceptionAddress,
- isolate);
- Label already_have_failure;
- JumpIfOOM(masm, eax, ecx, &already_have_failure);
- __ mov(eax, reinterpret_cast<int32_t>(Failure::OutOfMemoryException(0x1)));
- __ bind(&already_have_failure);
- __ mov(Operand::StaticVariable(pending_exception), eax);
- // Fall through to the next label.
-
- __ bind(&throw_termination_exception);
- __ ThrowUncatchable(eax);
-
- __ bind(&throw_normal_exception);
- __ Throw(eax);
-}
-
-
-void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
- Label invoke, handler_entry, exit;
- Label not_outermost_js, not_outermost_js_2;
-
- // Set up frame.
- __ push(ebp);
- __ mov(ebp, esp);
-
- // Push marker in two places.
- int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
- __ push(Immediate(Smi::FromInt(marker))); // context slot
- __ push(Immediate(Smi::FromInt(marker))); // function slot
- // Save callee-saved registers (C calling conventions).
- __ push(edi);
- __ push(esi);
- __ push(ebx);
-
- // Save copies of the top frame descriptor on the stack.
- ExternalReference c_entry_fp(Isolate::kCEntryFPAddress, masm->isolate());
- __ push(Operand::StaticVariable(c_entry_fp));
-
- // If this is the outermost JS call, set js_entry_sp value.
- ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress,
- masm->isolate());
- __ cmp(Operand::StaticVariable(js_entry_sp), Immediate(0));
- __ j(not_equal, &not_outermost_js, Label::kNear);
- __ mov(Operand::StaticVariable(js_entry_sp), ebp);
- __ push(Immediate(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
- __ jmp(&invoke, Label::kNear);
- __ bind(&not_outermost_js);
- __ push(Immediate(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME)));
-
- // Jump to a faked try block that does the invoke, with a faked catch
- // block that sets the pending exception.
- __ jmp(&invoke);
- __ bind(&handler_entry);
- handler_offset_ = handler_entry.pos();
- // Caught exception: Store result (exception) in the pending exception
- // field in the JSEnv and return a failure sentinel.
- ExternalReference pending_exception(Isolate::kPendingExceptionAddress,
- masm->isolate());
- __ mov(Operand::StaticVariable(pending_exception), eax);
- __ mov(eax, reinterpret_cast<int32_t>(Failure::Exception()));
- __ jmp(&exit);
-
- // Invoke: Link this frame into the handler chain. There's only one
- // handler block in this code object, so its index is 0.
- __ bind(&invoke);
- __ PushTryHandler(StackHandler::JS_ENTRY, 0);
-
- // Clear any pending exceptions.
- __ mov(edx, Immediate(masm->isolate()->factory()->the_hole_value()));
- __ mov(Operand::StaticVariable(pending_exception), edx);
-
- // Fake a receiver (NULL).
- __ push(Immediate(0)); // receiver
-
- // Invoke the function by calling through JS entry trampoline builtin and
- // pop the faked function when we return. Notice that we cannot store a
- // reference to the trampoline code directly in this stub, because the
- // builtin stubs may not have been generated yet.
- if (is_construct) {
- ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
- masm->isolate());
- __ mov(edx, Immediate(construct_entry));
- } else {
- ExternalReference entry(Builtins::kJSEntryTrampoline,
- masm->isolate());
- __ mov(edx, Immediate(entry));
- }
- __ mov(edx, Operand(edx, 0)); // deref address
- __ lea(edx, FieldOperand(edx, Code::kHeaderSize));
- __ call(edx);
-
- // Unlink this frame from the handler chain.
- __ PopTryHandler();
-
- __ bind(&exit);
- // Check if the current stack frame is marked as the outermost JS frame.
- __ pop(ebx);
- __ cmp(ebx, Immediate(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
- __ j(not_equal, &not_outermost_js_2);
- __ mov(Operand::StaticVariable(js_entry_sp), Immediate(0));
- __ bind(&not_outermost_js_2);
-
- // Restore the top frame descriptor from the stack.
- __ pop(Operand::StaticVariable(ExternalReference(
- Isolate::kCEntryFPAddress,
- masm->isolate())));
-
- // Restore callee-saved registers (C calling conventions).
- __ pop(ebx);
- __ pop(esi);
- __ pop(edi);
- __ add(esp, Immediate(2 * kPointerSize)); // remove markers
-
- // Restore frame pointer and return.
- __ pop(ebp);
- __ ret(0);
-}
-
-
-// Generate stub code for instanceof.
-// This code can patch a call site inlined cache of the instance of check,
-// which looks like this.
-//
-// 81 ff XX XX XX XX cmp edi, <the hole, patched to a map>
-// 75 0a jne <some near label>
-// b8 XX XX XX XX mov eax, <the hole, patched to either true or false>
-//
-// If call site patching is requested the stack will have the delta from the
-// return address to the cmp instruction just below the return address. This
-// also means that call site patching can only take place with arguments in
-// registers. TOS looks like this when call site patching is requested
-//
-// esp[0] : return address
-// esp[4] : delta from return address to cmp instruction
-//
-void InstanceofStub::Generate(MacroAssembler* masm) {
- // Call site inlining and patching implies arguments in registers.
- ASSERT(HasArgsInRegisters() || !HasCallSiteInlineCheck());
-
- // Fixed register usage throughout the stub.
- Register object = eax; // Object (lhs).
- Register map = ebx; // Map of the object.
- Register function = edx; // Function (rhs).
- Register prototype = edi; // Prototype of the function.
- Register scratch = ecx;
-
- // Constants describing the call site code to patch.
- static const int kDeltaToCmpImmediate = 2;
- static const int kDeltaToMov = 8;
- static const int kDeltaToMovImmediate = 9;
- static const int8_t kCmpEdiOperandByte1 = BitCast<int8_t, uint8_t>(0x3b);
- static const int8_t kCmpEdiOperandByte2 = BitCast<int8_t, uint8_t>(0x3d);
- static const int8_t kMovEaxImmediateByte = BitCast<int8_t, uint8_t>(0xb8);
-
- ExternalReference roots_array_start =
- ExternalReference::roots_array_start(masm->isolate());
-
- ASSERT_EQ(object.code(), InstanceofStub::left().code());
- ASSERT_EQ(function.code(), InstanceofStub::right().code());
-
- // Get the object and function - they are always both needed.
- Label slow, not_js_object;
- if (!HasArgsInRegisters()) {
- __ mov(object, Operand(esp, 2 * kPointerSize));
- __ mov(function, Operand(esp, 1 * kPointerSize));
- }
-
- // Check that the left hand is a JS object.
- __ JumpIfSmi(object, &not_js_object);
- __ IsObjectJSObjectType(object, map, scratch, &not_js_object);
-
- // If there is a call site cache don't look in the global cache, but do the
- // real lookup and update the call site cache.
- if (!HasCallSiteInlineCheck()) {
- // Look up the function and the map in the instanceof cache.
- Label miss;
- __ mov(scratch, Immediate(Heap::kInstanceofCacheFunctionRootIndex));
- __ cmp(function, Operand::StaticArray(scratch,
- times_pointer_size,
- roots_array_start));
- __ j(not_equal, &miss, Label::kNear);
- __ mov(scratch, Immediate(Heap::kInstanceofCacheMapRootIndex));
- __ cmp(map, Operand::StaticArray(
- scratch, times_pointer_size, roots_array_start));
- __ j(not_equal, &miss, Label::kNear);
- __ mov(scratch, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
- __ mov(eax, Operand::StaticArray(
- scratch, times_pointer_size, roots_array_start));
- __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
- __ bind(&miss);
- }
-
- // Get the prototype of the function.
- __ TryGetFunctionPrototype(function, prototype, scratch, &slow, true);
-
- // Check that the function prototype is a JS object.
- __ JumpIfSmi(prototype, &slow);
- __ IsObjectJSObjectType(prototype, scratch, scratch, &slow);
-
- // Update the global instanceof or call site inlined cache with the current
- // map and function. The cached answer will be set when it is known below.
- if (!HasCallSiteInlineCheck()) {
- __ mov(scratch, Immediate(Heap::kInstanceofCacheMapRootIndex));
- __ mov(Operand::StaticArray(scratch, times_pointer_size, roots_array_start),
- map);
- __ mov(scratch, Immediate(Heap::kInstanceofCacheFunctionRootIndex));
- __ mov(Operand::StaticArray(scratch, times_pointer_size, roots_array_start),
- function);
- } else {
- // The constants for the code patching are based on no push instructions
- // at the call site.
- ASSERT(HasArgsInRegisters());
- // Get return address and delta to inlined map check.
- __ mov(scratch, Operand(esp, 0 * kPointerSize));
- __ sub(scratch, Operand(esp, 1 * kPointerSize));
- if (FLAG_debug_code) {
- __ cmpb(Operand(scratch, 0), kCmpEdiOperandByte1);
- __ Assert(equal, "InstanceofStub unexpected call site cache (cmp 1)");
- __ cmpb(Operand(scratch, 1), kCmpEdiOperandByte2);
- __ Assert(equal, "InstanceofStub unexpected call site cache (cmp 2)");
- }
- __ mov(scratch, Operand(scratch, kDeltaToCmpImmediate));
- __ mov(Operand(scratch, 0), map);
- }
-
- // Loop through the prototype chain of the object looking for the function
- // prototype.
- __ mov(scratch, FieldOperand(map, Map::kPrototypeOffset));
- Label loop, is_instance, is_not_instance;
- __ bind(&loop);
- __ cmp(scratch, prototype);
- __ j(equal, &is_instance, Label::kNear);
- Factory* factory = masm->isolate()->factory();
- __ cmp(scratch, Immediate(factory->null_value()));
- __ j(equal, &is_not_instance, Label::kNear);
- __ mov(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
- __ mov(scratch, FieldOperand(scratch, Map::kPrototypeOffset));
- __ jmp(&loop);
-
- __ bind(&is_instance);
- if (!HasCallSiteInlineCheck()) {
- __ Set(eax, Immediate(0));
- __ mov(scratch, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
- __ mov(Operand::StaticArray(scratch,
- times_pointer_size, roots_array_start), eax);
- } else {
- // Get return address and delta to inlined map check.
- __ mov(eax, factory->true_value());
- __ mov(scratch, Operand(esp, 0 * kPointerSize));
- __ sub(scratch, Operand(esp, 1 * kPointerSize));
- if (FLAG_debug_code) {
- __ cmpb(Operand(scratch, kDeltaToMov), kMovEaxImmediateByte);
- __ Assert(equal, "InstanceofStub unexpected call site cache (mov)");
- }
- __ mov(Operand(scratch, kDeltaToMovImmediate), eax);
- if (!ReturnTrueFalseObject()) {
- __ Set(eax, Immediate(0));
- }
- }
- __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
-
- __ bind(&is_not_instance);
- if (!HasCallSiteInlineCheck()) {
- __ Set(eax, Immediate(Smi::FromInt(1)));
- __ mov(scratch, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
- __ mov(Operand::StaticArray(
- scratch, times_pointer_size, roots_array_start), eax);
- } else {
- // Get return address and delta to inlined map check.
- __ mov(eax, factory->false_value());
- __ mov(scratch, Operand(esp, 0 * kPointerSize));
- __ sub(scratch, Operand(esp, 1 * kPointerSize));
- if (FLAG_debug_code) {
- __ cmpb(Operand(scratch, kDeltaToMov), kMovEaxImmediateByte);
- __ Assert(equal, "InstanceofStub unexpected call site cache (mov)");
- }
- __ mov(Operand(scratch, kDeltaToMovImmediate), eax);
- if (!ReturnTrueFalseObject()) {
- __ Set(eax, Immediate(Smi::FromInt(1)));
- }
- }
- __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
-
- Label object_not_null, object_not_null_or_smi;
- __ bind(&not_js_object);
- // Before null, smi and string value checks, check that the rhs is a function
- // as for a non-function rhs an exception needs to be thrown.
- __ JumpIfSmi(function, &slow, Label::kNear);
- __ CmpObjectType(function, JS_FUNCTION_TYPE, scratch);
- __ j(not_equal, &slow, Label::kNear);
-
- // Null is not instance of anything.
- __ cmp(object, factory->null_value());
- __ j(not_equal, &object_not_null, Label::kNear);
- __ Set(eax, Immediate(Smi::FromInt(1)));
- __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
-
- __ bind(&object_not_null);
- // Smi values is not instance of anything.
- __ JumpIfNotSmi(object, &object_not_null_or_smi, Label::kNear);
- __ Set(eax, Immediate(Smi::FromInt(1)));
- __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
-
- __ bind(&object_not_null_or_smi);
- // String values is not instance of anything.
- Condition is_string = masm->IsObjectStringType(object, scratch, scratch);
- __ j(NegateCondition(is_string), &slow, Label::kNear);
- __ Set(eax, Immediate(Smi::FromInt(1)));
- __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
-
- // Slow-case: Go through the JavaScript implementation.
- __ bind(&slow);
- if (!ReturnTrueFalseObject()) {
- // Tail call the builtin which returns 0 or 1.
- if (HasArgsInRegisters()) {
- // Push arguments below return address.
- __ pop(scratch);
- __ push(object);
- __ push(function);
- __ push(scratch);
- }
- __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
- } else {
- // Call the builtin and convert 0/1 to true/false.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(object);
- __ push(function);
- __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
- }
- Label true_value, done;
- __ test(eax, eax);
- __ j(zero, &true_value, Label::kNear);
- __ mov(eax, factory->false_value());
- __ jmp(&done, Label::kNear);
- __ bind(&true_value);
- __ mov(eax, factory->true_value());
- __ bind(&done);
- __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
- }
-}
-
-
-Register InstanceofStub::left() { return eax; }
-
-
-Register InstanceofStub::right() { return edx; }
-
-
-// -------------------------------------------------------------------------
-// StringCharCodeAtGenerator
-
-void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
- // If the receiver is a smi trigger the non-string case.
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfSmi(object_, receiver_not_string_);
-
- // Fetch the instance type of the receiver into result register.
- __ mov(result_, FieldOperand(object_, HeapObject::kMapOffset));
- __ movzx_b(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
- // If the receiver is not a string trigger the non-string case.
- __ test(result_, Immediate(kIsNotStringMask));
- __ j(not_zero, receiver_not_string_);
-
- // If the index is non-smi trigger the non-smi case.
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfNotSmi(index_, &index_not_smi_);
- __ bind(&got_smi_index_);
-
- // Check for index out of range.
- __ cmp(index_, FieldOperand(object_, String::kLengthOffset));
- __ j(above_equal, index_out_of_range_);
-
- __ SmiUntag(index_);
-
- Factory* factory = masm->isolate()->factory();
- StringCharLoadGenerator::Generate(
- masm, factory, object_, index_, result_, &call_runtime_);
-
- __ SmiTag(result_);
- __ bind(&exit_);
-}
-
-
-void StringCharCodeAtGenerator::GenerateSlow(
- MacroAssembler* masm,
- const RuntimeCallHelper& call_helper) {
- __ Abort("Unexpected fallthrough to CharCodeAt slow case");
-
- // Index is not a smi.
- __ bind(&index_not_smi_);
- // If index is a heap number, try converting it to an integer.
- __ CheckMap(index_,
- masm->isolate()->factory()->heap_number_map(),
- index_not_number_,
- DONT_DO_SMI_CHECK);
- call_helper.BeforeCall(masm);
- __ push(object_);
- __ push(index_); // Consumed by runtime conversion function.
- if (index_flags_ == STRING_INDEX_IS_NUMBER) {
- __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
- } else {
- ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
- // NumberToSmi discards numbers that are not exact integers.
- __ CallRuntime(Runtime::kNumberToSmi, 1);
- }
- if (!index_.is(eax)) {
- // Save the conversion result before the pop instructions below
- // have a chance to overwrite it.
- __ mov(index_, eax);
- }
- __ pop(object_);
- // Reload the instance type.
- __ mov(result_, FieldOperand(object_, HeapObject::kMapOffset));
- __ movzx_b(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
- call_helper.AfterCall(masm);
- // If index is still not a smi, it must be out of range.
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfNotSmi(index_, index_out_of_range_);
- // Otherwise, return to the fast path.
- __ jmp(&got_smi_index_);
-
- // Call runtime. We get here when the receiver is a string and the
- // index is a number, but the code of getting the actual character
- // is too complex (e.g., when the string needs to be flattened).
- __ bind(&call_runtime_);
- call_helper.BeforeCall(masm);
- __ push(object_);
- __ SmiTag(index_);
- __ push(index_);
- __ CallRuntime(Runtime::kStringCharCodeAt, 2);
- if (!result_.is(eax)) {
- __ mov(result_, eax);
- }
- call_helper.AfterCall(masm);
- __ jmp(&exit_);
-
- __ Abort("Unexpected fallthrough from CharCodeAt slow case");
-}
-
-
-// -------------------------------------------------------------------------
-// StringCharFromCodeGenerator
-
-void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
- // Fast case of Heap::LookupSingleCharacterStringFromCode.
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiShiftSize == 0);
- ASSERT(IsPowerOf2(String::kMaxOneByteCharCode + 1));
- __ test(code_,
- Immediate(kSmiTagMask |
- ((~String::kMaxOneByteCharCode) << kSmiTagSize)));
- __ j(not_zero, &slow_case_);
-
- Factory* factory = masm->isolate()->factory();
- __ Set(result_, Immediate(factory->single_character_string_cache()));
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize == 1);
- STATIC_ASSERT(kSmiShiftSize == 0);
- // At this point code register contains smi tagged ASCII char code.
- __ mov(result_, FieldOperand(result_,
- code_, times_half_pointer_size,
- FixedArray::kHeaderSize));
- __ cmp(result_, factory->undefined_value());
- __ j(equal, &slow_case_);
- __ bind(&exit_);
-}
-
-
-void StringCharFromCodeGenerator::GenerateSlow(
- MacroAssembler* masm,
- const RuntimeCallHelper& call_helper) {
- __ Abort("Unexpected fallthrough to CharFromCode slow case");
-
- __ bind(&slow_case_);
- call_helper.BeforeCall(masm);
- __ push(code_);
- __ CallRuntime(Runtime::kCharFromCode, 1);
- if (!result_.is(eax)) {
- __ mov(result_, eax);
- }
- call_helper.AfterCall(masm);
- __ jmp(&exit_);
-
- __ Abort("Unexpected fallthrough from CharFromCode slow case");
-}
-
-
-void StringAddStub::Generate(MacroAssembler* masm) {
- Label call_runtime, call_builtin;
- Builtins::JavaScript builtin_id = Builtins::ADD;
-
- // Load the two arguments.
- __ mov(eax, Operand(esp, 2 * kPointerSize)); // First argument.
- __ mov(edx, Operand(esp, 1 * kPointerSize)); // Second argument.
-
- // Make sure that both arguments are strings if not known in advance.
- if (flags_ == NO_STRING_ADD_FLAGS) {
- __ JumpIfSmi(eax, &call_runtime);
- __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, ebx);
- __ j(above_equal, &call_runtime);
-
- // First argument is a a string, test second.
- __ JumpIfSmi(edx, &call_runtime);
- __ CmpObjectType(edx, FIRST_NONSTRING_TYPE, ebx);
- __ j(above_equal, &call_runtime);
- } else {
- // Here at least one of the arguments is definitely a string.
- // We convert the one that is not known to be a string.
- if ((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) == 0) {
- ASSERT((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) != 0);
- GenerateConvertArgument(masm, 2 * kPointerSize, eax, ebx, ecx, edi,
- &call_builtin);
- builtin_id = Builtins::STRING_ADD_RIGHT;
- } else if ((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) == 0) {
- ASSERT((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) != 0);
- GenerateConvertArgument(masm, 1 * kPointerSize, edx, ebx, ecx, edi,
- &call_builtin);
- builtin_id = Builtins::STRING_ADD_LEFT;
- }
- }
-
- // Both arguments are strings.
- // eax: first string
- // edx: second string
- // Check if either of the strings are empty. In that case return the other.
- Label second_not_zero_length, both_not_zero_length;
- __ mov(ecx, FieldOperand(edx, String::kLengthOffset));
- STATIC_ASSERT(kSmiTag == 0);
- __ test(ecx, ecx);
- __ j(not_zero, &second_not_zero_length, Label::kNear);
- // Second string is empty, result is first string which is already in eax.
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->string_add_native(), 1);
- __ ret(2 * kPointerSize);
- __ bind(&second_not_zero_length);
- __ mov(ebx, FieldOperand(eax, String::kLengthOffset));
- STATIC_ASSERT(kSmiTag == 0);
- __ test(ebx, ebx);
- __ j(not_zero, &both_not_zero_length, Label::kNear);
- // First string is empty, result is second string which is in edx.
- __ mov(eax, edx);
- __ IncrementCounter(counters->string_add_native(), 1);
- __ ret(2 * kPointerSize);
-
- // Both strings are non-empty.
- // eax: first string
- // ebx: length of first string as a smi
- // ecx: length of second string as a smi
- // edx: second string
- // Look at the length of the result of adding the two strings.
- Label string_add_flat_result, longer_than_two;
- __ bind(&both_not_zero_length);
- __ add(ebx, ecx);
- STATIC_ASSERT(Smi::kMaxValue == String::kMaxLength);
- // Handle exceptionally long strings in the runtime system.
- __ j(overflow, &call_runtime);
- // Use the string table when adding two one character strings, as it
- // helps later optimizations to return an internalized string here.
- __ cmp(ebx, Immediate(Smi::FromInt(2)));
- __ j(not_equal, &longer_than_two);
-
- // Check that both strings are non-external ASCII strings.
- __ JumpIfNotBothSequentialAsciiStrings(eax, edx, ebx, ecx, &call_runtime);
-
- // Get the two characters forming the new string.
- __ movzx_b(ebx, FieldOperand(eax, SeqOneByteString::kHeaderSize));
- __ movzx_b(ecx, FieldOperand(edx, SeqOneByteString::kHeaderSize));
-
- // Try to lookup two character string in string table. If it is not found
- // just allocate a new one.
- Label make_two_character_string, make_two_character_string_no_reload;
- StringHelper::GenerateTwoCharacterStringTableProbe(
- masm, ebx, ecx, eax, edx, edi,
- &make_two_character_string_no_reload, &make_two_character_string);
- __ IncrementCounter(counters->string_add_native(), 1);
- __ ret(2 * kPointerSize);
-
- // Allocate a two character string.
- __ bind(&make_two_character_string);
- // Reload the arguments.
- __ mov(eax, Operand(esp, 2 * kPointerSize)); // First argument.
- __ mov(edx, Operand(esp, 1 * kPointerSize)); // Second argument.
- // Get the two characters forming the new string.
- __ movzx_b(ebx, FieldOperand(eax, SeqOneByteString::kHeaderSize));
- __ movzx_b(ecx, FieldOperand(edx, SeqOneByteString::kHeaderSize));
- __ bind(&make_two_character_string_no_reload);
- __ IncrementCounter(counters->string_add_make_two_char(), 1);
- __ AllocateAsciiString(eax, 2, edi, edx, &call_runtime);
- // Pack both characters in ebx.
- __ shl(ecx, kBitsPerByte);
- __ or_(ebx, ecx);
- // Set the characters in the new string.
- __ mov_w(FieldOperand(eax, SeqOneByteString::kHeaderSize), ebx);
- __ IncrementCounter(counters->string_add_native(), 1);
- __ ret(2 * kPointerSize);
-
- __ bind(&longer_than_two);
- // Check if resulting string will be flat.
- __ cmp(ebx, Immediate(Smi::FromInt(ConsString::kMinLength)));
- __ j(below, &string_add_flat_result);
-
- // If result is not supposed to be flat allocate a cons string object. If both
- // strings are ASCII the result is an ASCII cons string.
- Label non_ascii, allocated, ascii_data;
- __ mov(edi, FieldOperand(eax, HeapObject::kMapOffset));
- __ movzx_b(ecx, FieldOperand(edi, Map::kInstanceTypeOffset));
- __ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
- __ movzx_b(edi, FieldOperand(edi, Map::kInstanceTypeOffset));
- __ and_(ecx, edi);
- STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
- STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
- __ test(ecx, Immediate(kStringEncodingMask));
- __ j(zero, &non_ascii);
- __ bind(&ascii_data);
- // Allocate an ASCII cons string.
- __ AllocateAsciiConsString(ecx, edi, no_reg, &call_runtime);
- __ bind(&allocated);
- // Fill the fields of the cons string.
- __ AssertSmi(ebx);
- __ mov(FieldOperand(ecx, ConsString::kLengthOffset), ebx);
- __ mov(FieldOperand(ecx, ConsString::kHashFieldOffset),
- Immediate(String::kEmptyHashField));
- __ mov(FieldOperand(ecx, ConsString::kFirstOffset), eax);
- __ mov(FieldOperand(ecx, ConsString::kSecondOffset), edx);
- __ mov(eax, ecx);
- __ IncrementCounter(counters->string_add_native(), 1);
- __ ret(2 * kPointerSize);
- __ bind(&non_ascii);
- // At least one of the strings is two-byte. Check whether it happens
- // to contain only ASCII characters.
- // ecx: first instance type AND second instance type.
- // edi: second instance type.
- __ test(ecx, Immediate(kAsciiDataHintMask));
- __ j(not_zero, &ascii_data);
- __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
- __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
- __ xor_(edi, ecx);
- STATIC_ASSERT(kOneByteStringTag != 0 && kAsciiDataHintTag != 0);
- __ and_(edi, kOneByteStringTag | kAsciiDataHintTag);
- __ cmp(edi, kOneByteStringTag | kAsciiDataHintTag);
- __ j(equal, &ascii_data);
- // Allocate a two byte cons string.
- __ AllocateTwoByteConsString(ecx, edi, no_reg, &call_runtime);
- __ jmp(&allocated);
-
- // We cannot encounter sliced strings or cons strings here since:
- STATIC_ASSERT(SlicedString::kMinLength >= ConsString::kMinLength);
- // Handle creating a flat result from either external or sequential strings.
- // Locate the first characters' locations.
- // eax: first string
- // ebx: length of resulting flat string as a smi
- // edx: second string
- Label first_prepared, second_prepared;
- Label first_is_sequential, second_is_sequential;
- __ bind(&string_add_flat_result);
- __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
- __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
- // ecx: instance type of first string
- STATIC_ASSERT(kSeqStringTag == 0);
- __ test_b(ecx, kStringRepresentationMask);
- __ j(zero, &first_is_sequential, Label::kNear);
- // Rule out short external string and load string resource.
- STATIC_ASSERT(kShortExternalStringTag != 0);
- __ test_b(ecx, kShortExternalStringMask);
- __ j(not_zero, &call_runtime);
- __ mov(eax, FieldOperand(eax, ExternalString::kResourceDataOffset));
- STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize);
- __ jmp(&first_prepared, Label::kNear);
- __ bind(&first_is_sequential);
- __ add(eax, Immediate(SeqOneByteString::kHeaderSize - kHeapObjectTag));
- __ bind(&first_prepared);
-
- __ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
- __ movzx_b(edi, FieldOperand(edi, Map::kInstanceTypeOffset));
- // Check whether both strings have same encoding.
- // edi: instance type of second string
- __ xor_(ecx, edi);
- __ test_b(ecx, kStringEncodingMask);
- __ j(not_zero, &call_runtime);
- STATIC_ASSERT(kSeqStringTag == 0);
- __ test_b(edi, kStringRepresentationMask);
- __ j(zero, &second_is_sequential, Label::kNear);
- // Rule out short external string and load string resource.
- STATIC_ASSERT(kShortExternalStringTag != 0);
- __ test_b(edi, kShortExternalStringMask);
- __ j(not_zero, &call_runtime);
- __ mov(edx, FieldOperand(edx, ExternalString::kResourceDataOffset));
- STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize);
- __ jmp(&second_prepared, Label::kNear);
- __ bind(&second_is_sequential);
- __ add(edx, Immediate(SeqOneByteString::kHeaderSize - kHeapObjectTag));
- __ bind(&second_prepared);
-
- // Push the addresses of both strings' first characters onto the stack.
- __ push(edx);
- __ push(eax);
-
- Label non_ascii_string_add_flat_result, call_runtime_drop_two;
- // edi: instance type of second string
- // First string and second string have the same encoding.
- STATIC_ASSERT(kTwoByteStringTag == 0);
- __ test_b(edi, kStringEncodingMask);
- __ j(zero, &non_ascii_string_add_flat_result);
-
- // Both strings are ASCII strings.
- // ebx: length of resulting flat string as a smi
- __ SmiUntag(ebx);
- __ AllocateAsciiString(eax, ebx, ecx, edx, edi, &call_runtime_drop_two);
- // eax: result string
- __ mov(ecx, eax);
- // Locate first character of result.
- __ add(ecx, Immediate(SeqOneByteString::kHeaderSize - kHeapObjectTag));
- // Load first argument's length and first character location. Account for
- // values currently on the stack when fetching arguments from it.
- __ mov(edx, Operand(esp, 4 * kPointerSize));
- __ mov(edi, FieldOperand(edx, String::kLengthOffset));
- __ SmiUntag(edi);
- __ pop(edx);
- // eax: result string
- // ecx: first character of result
- // edx: first char of first argument
- // edi: length of first argument
- StringHelper::GenerateCopyCharacters(masm, ecx, edx, edi, ebx, true);
- // Load second argument's length and first character location. Account for
- // values currently on the stack when fetching arguments from it.
- __ mov(edx, Operand(esp, 2 * kPointerSize));
- __ mov(edi, FieldOperand(edx, String::kLengthOffset));
- __ SmiUntag(edi);
- __ pop(edx);
- // eax: result string
- // ecx: next character of result
- // edx: first char of second argument
- // edi: length of second argument
- StringHelper::GenerateCopyCharacters(masm, ecx, edx, edi, ebx, true);
- __ IncrementCounter(counters->string_add_native(), 1);
- __ ret(2 * kPointerSize);
-
- // Handle creating a flat two byte result.
- // eax: first string - known to be two byte
- // ebx: length of resulting flat string as a smi
- // edx: second string
- __ bind(&non_ascii_string_add_flat_result);
- // Both strings are two byte strings.
- __ SmiUntag(ebx);
- __ AllocateTwoByteString(eax, ebx, ecx, edx, edi, &call_runtime_drop_two);
- // eax: result string
- __ mov(ecx, eax);
- // Locate first character of result.
- __ add(ecx, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
- // Load second argument's length and first character location. Account for
- // values currently on the stack when fetching arguments from it.
- __ mov(edx, Operand(esp, 4 * kPointerSize));
- __ mov(edi, FieldOperand(edx, String::kLengthOffset));
- __ SmiUntag(edi);
- __ pop(edx);
- // eax: result string
- // ecx: first character of result
- // edx: first char of first argument
- // edi: length of first argument
- StringHelper::GenerateCopyCharacters(masm, ecx, edx, edi, ebx, false);
- // Load second argument's length and first character location. Account for
- // values currently on the stack when fetching arguments from it.
- __ mov(edx, Operand(esp, 2 * kPointerSize));
- __ mov(edi, FieldOperand(edx, String::kLengthOffset));
- __ SmiUntag(edi);
- __ pop(edx);
- // eax: result string
- // ecx: next character of result
- // edx: first char of second argument
- // edi: length of second argument
- StringHelper::GenerateCopyCharacters(masm, ecx, edx, edi, ebx, false);
- __ IncrementCounter(counters->string_add_native(), 1);
- __ ret(2 * kPointerSize);
-
- // Recover stack pointer before jumping to runtime.
- __ bind(&call_runtime_drop_two);
- __ Drop(2);
- // Just jump to runtime to add the two strings.
- __ bind(&call_runtime);
- __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
-
- if (call_builtin.is_linked()) {
- __ bind(&call_builtin);
- __ InvokeBuiltin(builtin_id, JUMP_FUNCTION);
- }
-}
-
-
-void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
- int stack_offset,
- Register arg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* slow) {
- // First check if the argument is already a string.
- Label not_string, done;
- __ JumpIfSmi(arg, &not_string);
- __ CmpObjectType(arg, FIRST_NONSTRING_TYPE, scratch1);
- __ j(below, &done);
-
- // Check the number to string cache.
- Label not_cached;
- __ bind(&not_string);
- // Puts the cached result into scratch1.
- NumberToStringStub::GenerateLookupNumberStringCache(masm,
- arg,
- scratch1,
- scratch2,
- scratch3,
- false,
- &not_cached);
- __ mov(arg, scratch1);
- __ mov(Operand(esp, stack_offset), arg);
- __ jmp(&done);
-
- // Check if the argument is a safe string wrapper.
- __ bind(&not_cached);
- __ JumpIfSmi(arg, slow);
- __ CmpObjectType(arg, JS_VALUE_TYPE, scratch1); // map -> scratch1.
- __ j(not_equal, slow);
- __ test_b(FieldOperand(scratch1, Map::kBitField2Offset),
- 1 << Map::kStringWrapperSafeForDefaultValueOf);
- __ j(zero, slow);
- __ mov(arg, FieldOperand(arg, JSValue::kValueOffset));
- __ mov(Operand(esp, stack_offset), arg);
-
- __ bind(&done);
-}
-
-
-void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
- Register dest,
- Register src,
- Register count,
- Register scratch,
- bool ascii) {
- Label loop;
- __ bind(&loop);
- // This loop just copies one character at a time, as it is only used for very
- // short strings.
- if (ascii) {
- __ mov_b(scratch, Operand(src, 0));
- __ mov_b(Operand(dest, 0), scratch);
- __ add(src, Immediate(1));
- __ add(dest, Immediate(1));
- } else {
- __ mov_w(scratch, Operand(src, 0));
- __ mov_w(Operand(dest, 0), scratch);
- __ add(src, Immediate(2));
- __ add(dest, Immediate(2));
- }
- __ sub(count, Immediate(1));
- __ j(not_zero, &loop);
-}
-
-
-void StringHelper::GenerateCopyCharactersREP(MacroAssembler* masm,
- Register dest,
- Register src,
- Register count,
- Register scratch,
- bool ascii) {
- // Copy characters using rep movs of doublewords.
- // The destination is aligned on a 4 byte boundary because we are
- // copying to the beginning of a newly allocated string.
- ASSERT(dest.is(edi)); // rep movs destination
- ASSERT(src.is(esi)); // rep movs source
- ASSERT(count.is(ecx)); // rep movs count
- ASSERT(!scratch.is(dest));
- ASSERT(!scratch.is(src));
- ASSERT(!scratch.is(count));
-
- // Nothing to do for zero characters.
- Label done;
- __ test(count, count);
- __ j(zero, &done);
-
- // Make count the number of bytes to copy.
- if (!ascii) {
- __ shl(count, 1);
- }
-
- // Don't enter the rep movs if there are less than 4 bytes to copy.
- Label last_bytes;
- __ test(count, Immediate(~3));
- __ j(zero, &last_bytes, Label::kNear);
-
- // Copy from edi to esi using rep movs instruction.
- __ mov(scratch, count);
- __ sar(count, 2); // Number of doublewords to copy.
- __ cld();
- __ rep_movs();
-
- // Find number of bytes left.
- __ mov(count, scratch);
- __ and_(count, 3);
-
- // Check if there are more bytes to copy.
- __ bind(&last_bytes);
- __ test(count, count);
- __ j(zero, &done);
-
- // Copy remaining characters.
- Label loop;
- __ bind(&loop);
- __ mov_b(scratch, Operand(src, 0));
- __ mov_b(Operand(dest, 0), scratch);
- __ add(src, Immediate(1));
- __ add(dest, Immediate(1));
- __ sub(count, Immediate(1));
- __ j(not_zero, &loop);
-
- __ bind(&done);
-}
-
-
-void StringHelper::GenerateTwoCharacterStringTableProbe(MacroAssembler* masm,
- Register c1,
- Register c2,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* not_probed,
- Label* not_found) {
- // Register scratch3 is the general scratch register in this function.
- Register scratch = scratch3;
-
- // Make sure that both characters are not digits as such strings has a
- // different hash algorithm. Don't try to look for these in the string table.
- Label not_array_index;
- __ mov(scratch, c1);
- __ sub(scratch, Immediate(static_cast<int>('0')));
- __ cmp(scratch, Immediate(static_cast<int>('9' - '0')));
- __ j(above, &not_array_index, Label::kNear);
- __ mov(scratch, c2);
- __ sub(scratch, Immediate(static_cast<int>('0')));
- __ cmp(scratch, Immediate(static_cast<int>('9' - '0')));
- __ j(below_equal, not_probed);
-
- __ bind(&not_array_index);
- // Calculate the two character string hash.
- Register hash = scratch1;
- GenerateHashInit(masm, hash, c1, scratch);
- GenerateHashAddCharacter(masm, hash, c2, scratch);
- GenerateHashGetHash(masm, hash, scratch);
-
- // Collect the two characters in a register.
- Register chars = c1;
- __ shl(c2, kBitsPerByte);
- __ or_(chars, c2);
-
- // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
- // hash: hash of two character string.
-
- // Load the string table.
- Register string_table = c2;
- ExternalReference roots_array_start =
- ExternalReference::roots_array_start(masm->isolate());
- __ mov(scratch, Immediate(Heap::kStringTableRootIndex));
- __ mov(string_table,
- Operand::StaticArray(scratch, times_pointer_size, roots_array_start));
-
- // Calculate capacity mask from the string table capacity.
- Register mask = scratch2;
- __ mov(mask, FieldOperand(string_table, StringTable::kCapacityOffset));
- __ SmiUntag(mask);
- __ sub(mask, Immediate(1));
-
- // Registers
- // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
- // hash: hash of two character string
- // string_table: string table
- // mask: capacity mask
- // scratch: -
-
- // Perform a number of probes in the string table.
- static const int kProbes = 4;
- Label found_in_string_table;
- Label next_probe[kProbes], next_probe_pop_mask[kProbes];
- Register candidate = scratch; // Scratch register contains candidate.
- for (int i = 0; i < kProbes; i++) {
- // Calculate entry in string table.
- __ mov(scratch, hash);
- if (i > 0) {
- __ add(scratch, Immediate(StringTable::GetProbeOffset(i)));
- }
- __ and_(scratch, mask);
-
- // Load the entry from the string table.
- STATIC_ASSERT(StringTable::kEntrySize == 1);
- __ mov(candidate,
- FieldOperand(string_table,
- scratch,
- times_pointer_size,
- StringTable::kElementsStartOffset));
-
- // If entry is undefined no string with this hash can be found.
- Factory* factory = masm->isolate()->factory();
- __ cmp(candidate, factory->undefined_value());
- __ j(equal, not_found);
- __ cmp(candidate, factory->the_hole_value());
- __ j(equal, &next_probe[i]);
-
- // If length is not 2 the string is not a candidate.
- __ cmp(FieldOperand(candidate, String::kLengthOffset),
- Immediate(Smi::FromInt(2)));
- __ j(not_equal, &next_probe[i]);
-
- // As we are out of registers save the mask on the stack and use that
- // register as a temporary.
- __ push(mask);
- Register temp = mask;
-
- // Check that the candidate is a non-external ASCII string.
- __ mov(temp, FieldOperand(candidate, HeapObject::kMapOffset));
- __ movzx_b(temp, FieldOperand(temp, Map::kInstanceTypeOffset));
- __ JumpIfInstanceTypeIsNotSequentialAscii(
- temp, temp, &next_probe_pop_mask[i]);
-
- // Check if the two characters match.
- __ mov(temp, FieldOperand(candidate, SeqOneByteString::kHeaderSize));
- __ and_(temp, 0x0000ffff);
- __ cmp(chars, temp);
- __ j(equal, &found_in_string_table);
- __ bind(&next_probe_pop_mask[i]);
- __ pop(mask);
- __ bind(&next_probe[i]);
- }
-
- // No matching 2 character string found by probing.
- __ jmp(not_found);
-
- // Scratch register contains result when we fall through to here.
- Register result = candidate;
- __ bind(&found_in_string_table);
- __ pop(mask); // Pop saved mask from the stack.
- if (!result.is(eax)) {
- __ mov(eax, result);
- }
-}
-
-
-void StringHelper::GenerateHashInit(MacroAssembler* masm,
- Register hash,
- Register character,
- Register scratch) {
- // hash = (seed + character) + ((seed + character) << 10);
- if (Serializer::enabled()) {
- ExternalReference roots_array_start =
- ExternalReference::roots_array_start(masm->isolate());
- __ mov(scratch, Immediate(Heap::kHashSeedRootIndex));
- __ mov(scratch, Operand::StaticArray(scratch,
- times_pointer_size,
- roots_array_start));
- __ SmiUntag(scratch);
- __ add(scratch, character);
- __ mov(hash, scratch);
- __ shl(scratch, 10);
- __ add(hash, scratch);
- } else {
- int32_t seed = masm->isolate()->heap()->HashSeed();
- __ lea(scratch, Operand(character, seed));
- __ shl(scratch, 10);
- __ lea(hash, Operand(scratch, character, times_1, seed));
- }
- // hash ^= hash >> 6;
- __ mov(scratch, hash);
- __ shr(scratch, 6);
- __ xor_(hash, scratch);
-}
-
-
-void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
- Register hash,
- Register character,
- Register scratch) {
- // hash += character;
- __ add(hash, character);
- // hash += hash << 10;
- __ mov(scratch, hash);
- __ shl(scratch, 10);
- __ add(hash, scratch);
- // hash ^= hash >> 6;
- __ mov(scratch, hash);
- __ shr(scratch, 6);
- __ xor_(hash, scratch);
-}
-
-
-void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
- Register hash,
- Register scratch) {
- // hash += hash << 3;
- __ mov(scratch, hash);
- __ shl(scratch, 3);
- __ add(hash, scratch);
- // hash ^= hash >> 11;
- __ mov(scratch, hash);
- __ shr(scratch, 11);
- __ xor_(hash, scratch);
- // hash += hash << 15;
- __ mov(scratch, hash);
- __ shl(scratch, 15);
- __ add(hash, scratch);
-
- __ and_(hash, String::kHashBitMask);
-
- // if (hash == 0) hash = 27;
- Label hash_not_zero;
- __ j(not_zero, &hash_not_zero, Label::kNear);
- __ mov(hash, Immediate(StringHasher::kZeroHash));
- __ bind(&hash_not_zero);
-}
-
-
-void SubStringStub::Generate(MacroAssembler* masm) {
- Label runtime;
-
- // Stack frame on entry.
- // esp[0]: return address
- // esp[4]: to
- // esp[8]: from
- // esp[12]: string
-
- // Make sure first argument is a string.
- __ mov(eax, Operand(esp, 3 * kPointerSize));
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfSmi(eax, &runtime);
- Condition is_string = masm->IsObjectStringType(eax, ebx, ebx);
- __ j(NegateCondition(is_string), &runtime);
-
- // eax: string
- // ebx: instance type
-
- // Calculate length of sub string using the smi values.
- __ mov(ecx, Operand(esp, 1 * kPointerSize)); // To index.
- __ JumpIfNotSmi(ecx, &runtime);
- __ mov(edx, Operand(esp, 2 * kPointerSize)); // From index.
- __ JumpIfNotSmi(edx, &runtime);
- __ sub(ecx, edx);
- __ cmp(ecx, FieldOperand(eax, String::kLengthOffset));
- Label not_original_string;
- // Shorter than original string's length: an actual substring.
- __ j(below, &not_original_string, Label::kNear);
- // Longer than original string's length or negative: unsafe arguments.
- __ j(above, &runtime);
- // Return original string.
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->sub_string_native(), 1);
- __ ret(3 * kPointerSize);
- __ bind(&not_original_string);
-
- Label single_char;
- __ cmp(ecx, Immediate(Smi::FromInt(1)));
- __ j(equal, &single_char);
-
- // eax: string
- // ebx: instance type
- // ecx: sub string length (smi)
- // edx: from index (smi)
- // Deal with different string types: update the index if necessary
- // and put the underlying string into edi.
- Label underlying_unpacked, sliced_string, seq_or_external_string;
- // If the string is not indirect, it can only be sequential or external.
- STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
- STATIC_ASSERT(kIsIndirectStringMask != 0);
- __ test(ebx, Immediate(kIsIndirectStringMask));
- __ j(zero, &seq_or_external_string, Label::kNear);
-
- Factory* factory = masm->isolate()->factory();
- __ test(ebx, Immediate(kSlicedNotConsMask));
- __ j(not_zero, &sliced_string, Label::kNear);
- // Cons string. Check whether it is flat, then fetch first part.
- // Flat cons strings have an empty second part.
- __ cmp(FieldOperand(eax, ConsString::kSecondOffset),
- factory->empty_string());
- __ j(not_equal, &runtime);
- __ mov(edi, FieldOperand(eax, ConsString::kFirstOffset));
- // Update instance type.
- __ mov(ebx, FieldOperand(edi, HeapObject::kMapOffset));
- __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
- __ jmp(&underlying_unpacked, Label::kNear);
-
- __ bind(&sliced_string);
- // Sliced string. Fetch parent and adjust start index by offset.
- __ add(edx, FieldOperand(eax, SlicedString::kOffsetOffset));
- __ mov(edi, FieldOperand(eax, SlicedString::kParentOffset));
- // Update instance type.
- __ mov(ebx, FieldOperand(edi, HeapObject::kMapOffset));
- __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
- __ jmp(&underlying_unpacked, Label::kNear);
-
- __ bind(&seq_or_external_string);
- // Sequential or external string. Just move string to the expected register.
- __ mov(edi, eax);
-
- __ bind(&underlying_unpacked);
-
- if (FLAG_string_slices) {
- Label copy_routine;
- // edi: underlying subject string
- // ebx: instance type of underlying subject string
- // edx: adjusted start index (smi)
- // ecx: length (smi)
- __ cmp(ecx, Immediate(Smi::FromInt(SlicedString::kMinLength)));
- // Short slice. Copy instead of slicing.
- __ j(less, &copy_routine);
- // Allocate new sliced string. At this point we do not reload the instance
- // type including the string encoding because we simply rely on the info
- // provided by the original string. It does not matter if the original
- // string's encoding is wrong because we always have to recheck encoding of
- // the newly created string's parent anyways due to externalized strings.
- Label two_byte_slice, set_slice_header;
- STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
- STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
- __ test(ebx, Immediate(kStringEncodingMask));
- __ j(zero, &two_byte_slice, Label::kNear);
- __ AllocateAsciiSlicedString(eax, ebx, no_reg, &runtime);
- __ jmp(&set_slice_header, Label::kNear);
- __ bind(&two_byte_slice);
- __ AllocateTwoByteSlicedString(eax, ebx, no_reg, &runtime);
- __ bind(&set_slice_header);
- __ mov(FieldOperand(eax, SlicedString::kLengthOffset), ecx);
- __ mov(FieldOperand(eax, SlicedString::kHashFieldOffset),
- Immediate(String::kEmptyHashField));
- __ mov(FieldOperand(eax, SlicedString::kParentOffset), edi);
- __ mov(FieldOperand(eax, SlicedString::kOffsetOffset), edx);
- __ IncrementCounter(counters->sub_string_native(), 1);
- __ ret(3 * kPointerSize);
-
- __ bind(&copy_routine);
- }
-
- // edi: underlying subject string
- // ebx: instance type of underlying subject string
- // edx: adjusted start index (smi)
- // ecx: length (smi)
- // The subject string can only be external or sequential string of either
- // encoding at this point.
- Label two_byte_sequential, runtime_drop_two, sequential_string;
- STATIC_ASSERT(kExternalStringTag != 0);
- STATIC_ASSERT(kSeqStringTag == 0);
- __ test_b(ebx, kExternalStringTag);
- __ j(zero, &sequential_string);
-
- // Handle external string.
- // Rule out short external strings.
- STATIC_CHECK(kShortExternalStringTag != 0);
- __ test_b(ebx, kShortExternalStringMask);
- __ j(not_zero, &runtime);
- __ mov(edi, FieldOperand(edi, ExternalString::kResourceDataOffset));
- // Move the pointer so that offset-wise, it looks like a sequential string.
- STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
- __ sub(edi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
-
- __ bind(&sequential_string);
- // Stash away (adjusted) index and (underlying) string.
- __ push(edx);
- __ push(edi);
- __ SmiUntag(ecx);
- STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
- __ test_b(ebx, kStringEncodingMask);
- __ j(zero, &two_byte_sequential);
-
- // Sequential ASCII string. Allocate the result.
- __ AllocateAsciiString(eax, ecx, ebx, edx, edi, &runtime_drop_two);
-
- // eax: result string
- // ecx: result string length
- __ mov(edx, esi); // esi used by following code.
- // Locate first character of result.
- __ mov(edi, eax);
- __ add(edi, Immediate(SeqOneByteString::kHeaderSize - kHeapObjectTag));
- // Load string argument and locate character of sub string start.
- __ pop(esi);
- __ pop(ebx);
- __ SmiUntag(ebx);
- __ lea(esi, FieldOperand(esi, ebx, times_1, SeqOneByteString::kHeaderSize));
-
- // eax: result string
- // ecx: result length
- // edx: original value of esi
- // edi: first character of result
- // esi: character of sub string start
- StringHelper::GenerateCopyCharactersREP(masm, edi, esi, ecx, ebx, true);
- __ mov(esi, edx); // Restore esi.
- __ IncrementCounter(counters->sub_string_native(), 1);
- __ ret(3 * kPointerSize);
-
- __ bind(&two_byte_sequential);
- // Sequential two-byte string. Allocate the result.
- __ AllocateTwoByteString(eax, ecx, ebx, edx, edi, &runtime_drop_two);
-
- // eax: result string
- // ecx: result string length
- __ mov(edx, esi); // esi used by following code.
- // Locate first character of result.
- __ mov(edi, eax);
- __ add(edi,
- Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
- // Load string argument and locate character of sub string start.
- __ pop(esi);
- __ pop(ebx);
- // As from is a smi it is 2 times the value which matches the size of a two
- // byte character.
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
- __ lea(esi, FieldOperand(esi, ebx, times_1, SeqTwoByteString::kHeaderSize));
-
- // eax: result string
- // ecx: result length
- // edx: original value of esi
- // edi: first character of result
- // esi: character of sub string start
- StringHelper::GenerateCopyCharactersREP(masm, edi, esi, ecx, ebx, false);
- __ mov(esi, edx); // Restore esi.
- __ IncrementCounter(counters->sub_string_native(), 1);
- __ ret(3 * kPointerSize);
-
- // Drop pushed values on the stack before tail call.
- __ bind(&runtime_drop_two);
- __ Drop(2);
-
- // Just jump to runtime to create the sub string.
- __ bind(&runtime);
- __ TailCallRuntime(Runtime::kSubString, 3, 1);
-
- __ bind(&single_char);
- // eax: string
- // ebx: instance type
- // ecx: sub string length (smi)
- // edx: from index (smi)
- StringCharAtGenerator generator(
- eax, edx, ecx, eax, &runtime, &runtime, &runtime, STRING_INDEX_IS_NUMBER);
- generator.GenerateFast(masm);
- __ ret(3 * kPointerSize);
- generator.SkipSlow(masm, &runtime);
-}
-
-
-void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
- Register left,
- Register right,
- Register scratch1,
- Register scratch2) {
- Register length = scratch1;
-
- // Compare lengths.
- Label strings_not_equal, check_zero_length;
- __ mov(length, FieldOperand(left, String::kLengthOffset));
- __ cmp(length, FieldOperand(right, String::kLengthOffset));
- __ j(equal, &check_zero_length, Label::kNear);
- __ bind(&strings_not_equal);
- __ Set(eax, Immediate(Smi::FromInt(NOT_EQUAL)));
- __ ret(0);
-
- // Check if the length is zero.
- Label compare_chars;
- __ bind(&check_zero_length);
- STATIC_ASSERT(kSmiTag == 0);
- __ test(length, length);
- __ j(not_zero, &compare_chars, Label::kNear);
- __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
- __ ret(0);
-
- // Compare characters.
- __ bind(&compare_chars);
- GenerateAsciiCharsCompareLoop(masm, left, right, length, scratch2,
- &strings_not_equal, Label::kNear);
-
- // Characters are equal.
- __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
- __ ret(0);
-}
-
-
-void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
- Register left,
- Register right,
- Register scratch1,
- Register scratch2,
- Register scratch3) {
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->string_compare_native(), 1);
-
- // Find minimum length.
- Label left_shorter;
- __ mov(scratch1, FieldOperand(left, String::kLengthOffset));
- __ mov(scratch3, scratch1);
- __ sub(scratch3, FieldOperand(right, String::kLengthOffset));
-
- Register length_delta = scratch3;
-
- __ j(less_equal, &left_shorter, Label::kNear);
- // Right string is shorter. Change scratch1 to be length of right string.
- __ sub(scratch1, length_delta);
- __ bind(&left_shorter);
-
- Register min_length = scratch1;
-
- // If either length is zero, just compare lengths.
- Label compare_lengths;
- __ test(min_length, min_length);
- __ j(zero, &compare_lengths, Label::kNear);
-
- // Compare characters.
- Label result_not_equal;
- GenerateAsciiCharsCompareLoop(masm, left, right, min_length, scratch2,
- &result_not_equal, Label::kNear);
-
- // Compare lengths - strings up to min-length are equal.
- __ bind(&compare_lengths);
- __ test(length_delta, length_delta);
-#ifndef ENABLE_LATIN_1
- __ j(not_zero, &result_not_equal, Label::kNear);
-#else
- Label length_not_equal;
- __ j(not_zero, &length_not_equal, Label::kNear);
-#endif
-
- // Result is EQUAL.
- STATIC_ASSERT(EQUAL == 0);
- STATIC_ASSERT(kSmiTag == 0);
- __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
- __ ret(0);
-
- Label result_greater;
-#ifdef ENABLE_LATIN_1
- Label result_less;
- __ bind(&length_not_equal);
- __ j(greater, &result_greater, Label::kNear);
- __ jmp(&result_less, Label::kNear);
-#endif
- __ bind(&result_not_equal);
-#ifndef ENABLE_LATIN_1
- __ j(greater, &result_greater, Label::kNear);
-#else
- __ j(above, &result_greater, Label::kNear);
- __ bind(&result_less);
-#endif
-
- // Result is LESS.
- __ Set(eax, Immediate(Smi::FromInt(LESS)));
- __ ret(0);
-
- // Result is GREATER.
- __ bind(&result_greater);
- __ Set(eax, Immediate(Smi::FromInt(GREATER)));
- __ ret(0);
-}
-
-
-void StringCompareStub::GenerateAsciiCharsCompareLoop(
- MacroAssembler* masm,
- Register left,
- Register right,
- Register length,
- Register scratch,
- Label* chars_not_equal,
- Label::Distance chars_not_equal_near) {
- // Change index to run from -length to -1 by adding length to string
- // start. This means that loop ends when index reaches zero, which
- // doesn't need an additional compare.
- __ SmiUntag(length);
- __ lea(left,
- FieldOperand(left, length, times_1, SeqOneByteString::kHeaderSize));
- __ lea(right,
- FieldOperand(right, length, times_1, SeqOneByteString::kHeaderSize));
- __ neg(length);
- Register index = length; // index = -length;
-
- // Compare loop.
- Label loop;
- __ bind(&loop);
- __ mov_b(scratch, Operand(left, index, times_1, 0));
- __ cmpb(scratch, Operand(right, index, times_1, 0));
- __ j(not_equal, chars_not_equal, chars_not_equal_near);
- __ inc(index);
- __ j(not_zero, &loop);
-}
-
-
-void StringCompareStub::Generate(MacroAssembler* masm) {
- Label runtime;
-
- // Stack frame on entry.
- // esp[0]: return address
- // esp[4]: right string
- // esp[8]: left string
-
- __ mov(edx, Operand(esp, 2 * kPointerSize)); // left
- __ mov(eax, Operand(esp, 1 * kPointerSize)); // right
-
- Label not_same;
- __ cmp(edx, eax);
- __ j(not_equal, &not_same, Label::kNear);
- STATIC_ASSERT(EQUAL == 0);
- STATIC_ASSERT(kSmiTag == 0);
- __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
- __ IncrementCounter(masm->isolate()->counters()->string_compare_native(), 1);
- __ ret(2 * kPointerSize);
-
- __ bind(&not_same);
-
- // Check that both objects are sequential ASCII strings.
- __ JumpIfNotBothSequentialAsciiStrings(edx, eax, ecx, ebx, &runtime);
-
- // Compare flat ASCII strings.
- // Drop arguments from the stack.
- __ pop(ecx);
- __ add(esp, Immediate(2 * kPointerSize));
- __ push(ecx);
- GenerateCompareFlatAsciiStrings(masm, edx, eax, ecx, ebx, edi);
-
- // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
- // tagged as a small integer.
- __ bind(&runtime);
- __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
-}
-
-
-void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::SMI);
- Label miss;
- __ mov(ecx, edx);
- __ or_(ecx, eax);
- __ JumpIfNotSmi(ecx, &miss, Label::kNear);
-
- if (GetCondition() == equal) {
- // For equality we do not care about the sign of the result.
- __ sub(eax, edx);
- } else {
- Label done;
- __ sub(edx, eax);
- __ j(no_overflow, &done, Label::kNear);
- // Correct sign of result in case of overflow.
- __ not_(edx);
- __ bind(&done);
- __ mov(eax, edx);
- }
- __ ret(0);
-
- __ bind(&miss);
- GenerateMiss(masm);
-}
-
-
-void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::NUMBER);
-
- Label generic_stub;
- Label unordered, maybe_undefined1, maybe_undefined2;
- Label miss;
-
- if (left_ == CompareIC::SMI) {
- __ JumpIfNotSmi(edx, &miss);
- }
- if (right_ == CompareIC::SMI) {
- __ JumpIfNotSmi(eax, &miss);
- }
-
- // Inlining the double comparison and falling back to the general compare
- // stub if NaN is involved or SSE2 or CMOV is unsupported.
- if (CpuFeatures::IsSupported(SSE2) && CpuFeatures::IsSupported(CMOV)) {
- CpuFeatures::Scope scope1(SSE2);
- CpuFeatures::Scope scope2(CMOV);
-
- // Load left and right operand.
- Label done, left, left_smi, right_smi;
- __ JumpIfSmi(eax, &right_smi, Label::kNear);
- __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
- masm->isolate()->factory()->heap_number_map());
- __ j(not_equal, &maybe_undefined1, Label::kNear);
- __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
- __ jmp(&left, Label::kNear);
- __ bind(&right_smi);
- __ mov(ecx, eax); // Can't clobber eax because we can still jump away.
- __ SmiUntag(ecx);
- __ cvtsi2sd(xmm1, ecx);
-
- __ bind(&left);
- __ JumpIfSmi(edx, &left_smi, Label::kNear);
- __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
- masm->isolate()->factory()->heap_number_map());
- __ j(not_equal, &maybe_undefined2, Label::kNear);
- __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
- __ jmp(&done);
- __ bind(&left_smi);
- __ mov(ecx, edx); // Can't clobber edx because we can still jump away.
- __ SmiUntag(ecx);
- __ cvtsi2sd(xmm0, ecx);
-
- __ bind(&done);
- // Compare operands.
- __ ucomisd(xmm0, xmm1);
-
- // Don't base result on EFLAGS when a NaN is involved.
- __ j(parity_even, &unordered, Label::kNear);
-
- // Return a result of -1, 0, or 1, based on EFLAGS.
- // Performing mov, because xor would destroy the flag register.
- __ mov(eax, 0); // equal
- __ mov(ecx, Immediate(Smi::FromInt(1)));
- __ cmov(above, eax, ecx);
- __ mov(ecx, Immediate(Smi::FromInt(-1)));
- __ cmov(below, eax, ecx);
- __ ret(0);
- } else {
- __ mov(ecx, edx);
- __ and_(ecx, eax);
- __ JumpIfSmi(ecx, &generic_stub, Label::kNear);
-
- __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
- masm->isolate()->factory()->heap_number_map());
- __ j(not_equal, &maybe_undefined1, Label::kNear);
- __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
- masm->isolate()->factory()->heap_number_map());
- __ j(not_equal, &maybe_undefined2, Label::kNear);
- }
-
- __ bind(&unordered);
- __ bind(&generic_stub);
- ICCompareStub stub(op_, CompareIC::GENERIC, CompareIC::GENERIC,
- CompareIC::GENERIC);
- __ jmp(stub.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
-
- __ bind(&maybe_undefined1);
- if (Token::IsOrderedRelationalCompareOp(op_)) {
- __ cmp(eax, Immediate(masm->isolate()->factory()->undefined_value()));
- __ j(not_equal, &miss);
- __ JumpIfSmi(edx, &unordered);
- __ CmpObjectType(edx, HEAP_NUMBER_TYPE, ecx);
- __ j(not_equal, &maybe_undefined2, Label::kNear);
- __ jmp(&unordered);
- }
-
- __ bind(&maybe_undefined2);
- if (Token::IsOrderedRelationalCompareOp(op_)) {
- __ cmp(edx, Immediate(masm->isolate()->factory()->undefined_value()));
- __ j(equal, &unordered);
- }
-
- __ bind(&miss);
- GenerateMiss(masm);
-}
-
-
-void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::INTERNALIZED_STRING);
- ASSERT(GetCondition() == equal);
-
- // Registers containing left and right operands respectively.
- Register left = edx;
- Register right = eax;
- Register tmp1 = ecx;
- Register tmp2 = ebx;
-
- // Check that both operands are heap objects.
- Label miss;
- __ mov(tmp1, left);
- STATIC_ASSERT(kSmiTag == 0);
- __ and_(tmp1, right);
- __ JumpIfSmi(tmp1, &miss, Label::kNear);
-
- // Check that both operands are internalized strings.
- __ mov(tmp1, FieldOperand(left, HeapObject::kMapOffset));
- __ mov(tmp2, FieldOperand(right, HeapObject::kMapOffset));
- __ movzx_b(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
- __ movzx_b(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
- STATIC_ASSERT(kInternalizedTag != 0);
- __ and_(tmp1, tmp2);
- __ test(tmp1, Immediate(kIsInternalizedMask));
- __ j(zero, &miss, Label::kNear);
-
- // Internalized strings are compared by identity.
- Label done;
- __ cmp(left, right);
- // Make sure eax is non-zero. At this point input operands are
- // guaranteed to be non-zero.
- ASSERT(right.is(eax));
- __ j(not_equal, &done, Label::kNear);
- STATIC_ASSERT(EQUAL == 0);
- STATIC_ASSERT(kSmiTag == 0);
- __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
- __ bind(&done);
- __ ret(0);
-
- __ bind(&miss);
- GenerateMiss(masm);
-}
-
-
-void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::UNIQUE_NAME);
- ASSERT(GetCondition() == equal);
-
- // Registers containing left and right operands respectively.
- Register left = edx;
- Register right = eax;
- Register tmp1 = ecx;
- Register tmp2 = ebx;
-
- // Check that both operands are heap objects.
- Label miss;
- __ mov(tmp1, left);
- STATIC_ASSERT(kSmiTag == 0);
- __ and_(tmp1, right);
- __ JumpIfSmi(tmp1, &miss, Label::kNear);
-
- // Check that both operands are unique names. This leaves the instance
- // types loaded in tmp1 and tmp2.
- STATIC_ASSERT(kInternalizedTag != 0);
- __ mov(tmp1, FieldOperand(left, HeapObject::kMapOffset));
- __ mov(tmp2, FieldOperand(right, HeapObject::kMapOffset));
- __ movzx_b(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
- __ movzx_b(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
-
- Label succeed1;
- __ test(tmp1, Immediate(kIsInternalizedMask));
- __ j(not_zero, &succeed1);
- __ cmpb(tmp1, static_cast<uint8_t>(SYMBOL_TYPE));
- __ j(not_equal, &miss);
- __ bind(&succeed1);
-
- Label succeed2;
- __ test(tmp2, Immediate(kIsInternalizedMask));
- __ j(not_zero, &succeed2);
- __ cmpb(tmp2, static_cast<uint8_t>(SYMBOL_TYPE));
- __ j(not_equal, &miss);
- __ bind(&succeed2);
-
- // Unique names are compared by identity.
- Label done;
- __ cmp(left, right);
- // Make sure eax is non-zero. At this point input operands are
- // guaranteed to be non-zero.
- ASSERT(right.is(eax));
- __ j(not_equal, &done, Label::kNear);
- STATIC_ASSERT(EQUAL == 0);
- STATIC_ASSERT(kSmiTag == 0);
- __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
- __ bind(&done);
- __ ret(0);
-
- __ bind(&miss);
- GenerateMiss(masm);
-}
-
-
-void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::STRING);
- Label miss;
-
- bool equality = Token::IsEqualityOp(op_);
-
- // Registers containing left and right operands respectively.
- Register left = edx;
- Register right = eax;
- Register tmp1 = ecx;
- Register tmp2 = ebx;
- Register tmp3 = edi;
-
- // Check that both operands are heap objects.
- __ mov(tmp1, left);
- STATIC_ASSERT(kSmiTag == 0);
- __ and_(tmp1, right);
- __ JumpIfSmi(tmp1, &miss);
-
- // Check that both operands are strings. This leaves the instance
- // types loaded in tmp1 and tmp2.
- __ mov(tmp1, FieldOperand(left, HeapObject::kMapOffset));
- __ mov(tmp2, FieldOperand(right, HeapObject::kMapOffset));
- __ movzx_b(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
- __ movzx_b(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
- __ mov(tmp3, tmp1);
- STATIC_ASSERT(kNotStringTag != 0);
- __ or_(tmp3, tmp2);
- __ test(tmp3, Immediate(kIsNotStringMask));
- __ j(not_zero, &miss);
-
- // Fast check for identical strings.
- Label not_same;
- __ cmp(left, right);
- __ j(not_equal, &not_same, Label::kNear);
- STATIC_ASSERT(EQUAL == 0);
- STATIC_ASSERT(kSmiTag == 0);
- __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
- __ ret(0);
-
- // Handle not identical strings.
- __ bind(&not_same);
-
- // Check that both strings are internalized. If they are, we're done
- // because we already know they are not identical. But in the case of
- // non-equality compare, we still need to determine the order.
- if (equality) {
- Label do_compare;
- STATIC_ASSERT(kInternalizedTag != 0);
- __ and_(tmp1, tmp2);
- __ test(tmp1, Immediate(kIsInternalizedMask));
- __ j(zero, &do_compare, Label::kNear);
- // Make sure eax is non-zero. At this point input operands are
- // guaranteed to be non-zero.
- ASSERT(right.is(eax));
- __ ret(0);
- __ bind(&do_compare);
- }
-
- // Check that both strings are sequential ASCII.
- Label runtime;
- __ JumpIfNotBothSequentialAsciiStrings(left, right, tmp1, tmp2, &runtime);
-
- // Compare flat ASCII strings. Returns when done.
- if (equality) {
- StringCompareStub::GenerateFlatAsciiStringEquals(
- masm, left, right, tmp1, tmp2);
- } else {
- StringCompareStub::GenerateCompareFlatAsciiStrings(
- masm, left, right, tmp1, tmp2, tmp3);
- }
-
- // Handle more complex cases in runtime.
- __ bind(&runtime);
- __ pop(tmp1); // Return address.
- __ push(left);
- __ push(right);
- __ push(tmp1);
- if (equality) {
- __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
- } else {
- __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
- }
-
- __ bind(&miss);
- GenerateMiss(masm);
-}
-
-
-void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::OBJECT);
- Label miss;
- __ mov(ecx, edx);
- __ and_(ecx, eax);
- __ JumpIfSmi(ecx, &miss, Label::kNear);
-
- __ CmpObjectType(eax, JS_OBJECT_TYPE, ecx);
- __ j(not_equal, &miss, Label::kNear);
- __ test_b(FieldOperand(ecx, Map::kBitField2Offset),
- 1 << Map::kUseUserObjectComparison);
- __ j(not_zero, &miss, Label::kNear);
- __ CmpObjectType(edx, JS_OBJECT_TYPE, ecx);
- __ j(not_equal, &miss, Label::kNear);
- __ test_b(FieldOperand(ecx, Map::kBitField2Offset),
- 1 << Map::kUseUserObjectComparison);
- __ j(not_zero, &miss, Label::kNear);
-
- ASSERT(GetCondition() == equal);
- __ sub(eax, edx);
- __ ret(0);
-
- __ bind(&miss);
- GenerateMiss(masm);
-}
-
-
-void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) {
- Label miss;
- __ mov(ecx, edx);
- __ and_(ecx, eax);
- __ JumpIfSmi(ecx, &miss, Label::kNear);
-
- __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
- __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
- __ cmp(ecx, known_map_);
- __ j(not_equal, &miss, Label::kNear);
- __ test_b(FieldOperand(ecx, Map::kBitField2Offset),
- 1 << Map::kUseUserObjectComparison);
- __ j(not_zero, &miss, Label::kNear);
- __ cmp(ebx, known_map_);
- __ j(not_equal, &miss, Label::kNear);
- __ test_b(FieldOperand(ebx, Map::kBitField2Offset),
- 1 << Map::kUseUserObjectComparison);
- __ j(not_zero, &miss, Label::kNear);
-
- __ sub(eax, edx);
- __ ret(0);
-
- __ bind(&miss);
- GenerateMiss(masm);
-}
-
-
-void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
- {
- // Call the runtime system in a fresh internal frame.
- ExternalReference miss = ExternalReference(IC_Utility(IC::kCompareIC_Miss),
- masm->isolate());
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(edx); // Preserve edx and eax.
- __ push(eax);
- __ push(edx); // And also use them as the arguments.
- __ push(eax);
- __ push(Immediate(Smi::FromInt(op_)));
- __ CallExternalReference(miss, 3);
- // Compute the entry point of the rewritten stub.
- __ lea(edi, FieldOperand(eax, Code::kHeaderSize));
- __ pop(eax);
- __ pop(edx);
- }
-
- // Do a tail call to the rewritten stub.
- __ jmp(edi);
-}
-
-
-// Helper function used to check that the dictionary doesn't contain
-// the property. This function may return false negatives, so miss_label
-// must always call a backup property check that is complete.
-// This function is safe to call if the receiver has fast properties.
-// Name must be an internalized string and receiver must be a heap object.
-void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
- Label* miss,
- Label* done,
- Register properties,
- Handle<String> name,
- Register r0) {
- ASSERT(name->IsInternalizedString());
-
- // If names of slots in range from 1 to kProbes - 1 for the hash value are
- // not equal to the name and kProbes-th slot is not used (its name is the
- // undefined value), it guarantees the hash table doesn't contain the
- // property. It's true even if some slots represent deleted properties
- // (their names are the hole value).
- for (int i = 0; i < kInlinedProbes; i++) {
- // Compute the masked index: (hash + i + i * i) & mask.
- Register index = r0;
- // Capacity is smi 2^n.
- __ mov(index, FieldOperand(properties, kCapacityOffset));
- __ dec(index);
- __ and_(index,
- Immediate(Smi::FromInt(name->Hash() +
- StringDictionary::GetProbeOffset(i))));
-
- // Scale the index by multiplying by the entry size.
- ASSERT(StringDictionary::kEntrySize == 3);
- __ lea(index, Operand(index, index, times_2, 0)); // index *= 3.
- Register entity_name = r0;
- // Having undefined at this place means the name is not contained.
- ASSERT_EQ(kSmiTagSize, 1);
- __ mov(entity_name, Operand(properties, index, times_half_pointer_size,
- kElementsStartOffset - kHeapObjectTag));
- __ cmp(entity_name, masm->isolate()->factory()->undefined_value());
- __ j(equal, done);
-
- // Stop if found the property.
- __ cmp(entity_name, Handle<String>(name));
- __ j(equal, miss);
-
- Label the_hole;
- // Check for the hole and skip.
- __ cmp(entity_name, masm->isolate()->factory()->the_hole_value());
- __ j(equal, &the_hole, Label::kNear);
-
- // Check if the entry name is not an internalized string.
- __ mov(entity_name, FieldOperand(entity_name, HeapObject::kMapOffset));
- __ test_b(FieldOperand(entity_name, Map::kInstanceTypeOffset),
- kIsInternalizedMask);
- __ j(zero, miss);
- __ bind(&the_hole);
- }
-
- StringDictionaryLookupStub stub(properties,
- r0,
- r0,
- StringDictionaryLookupStub::NEGATIVE_LOOKUP);
- __ push(Immediate(Handle<Object>(name)));
- __ push(Immediate(name->Hash()));
- __ CallStub(&stub);
- __ test(r0, r0);
- __ j(not_zero, miss);
- __ jmp(done);
-}
-
-
-// Probe the string dictionary in the |elements| register. Jump to the
-// |done| label if a property with the given name is found leaving the
-// index into the dictionary in |r0|. Jump to the |miss| label
-// otherwise.
-void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
- Label* miss,
- Label* done,
- Register elements,
- Register name,
- Register r0,
- Register r1) {
- ASSERT(!elements.is(r0));
- ASSERT(!elements.is(r1));
- ASSERT(!name.is(r0));
- ASSERT(!name.is(r1));
-
- __ AssertString(name);
-
- __ mov(r1, FieldOperand(elements, kCapacityOffset));
- __ shr(r1, kSmiTagSize); // convert smi to int
- __ dec(r1);
-
- // Generate an unrolled loop that performs a few probes before
- // giving up. Measurements done on Gmail indicate that 2 probes
- // cover ~93% of loads from dictionaries.
- for (int i = 0; i < kInlinedProbes; i++) {
- // Compute the masked index: (hash + i + i * i) & mask.
- __ mov(r0, FieldOperand(name, String::kHashFieldOffset));
- __ shr(r0, String::kHashShift);
- if (i > 0) {
- __ add(r0, Immediate(StringDictionary::GetProbeOffset(i)));
- }
- __ and_(r0, r1);
-
- // Scale the index by multiplying by the entry size.
- ASSERT(StringDictionary::kEntrySize == 3);
- __ lea(r0, Operand(r0, r0, times_2, 0)); // r0 = r0 * 3
-
- // Check if the key is identical to the name.
- __ cmp(name, Operand(elements,
- r0,
- times_4,
- kElementsStartOffset - kHeapObjectTag));
- __ j(equal, done);
- }
-
- StringDictionaryLookupStub stub(elements,
- r1,
- r0,
- POSITIVE_LOOKUP);
- __ push(name);
- __ mov(r0, FieldOperand(name, String::kHashFieldOffset));
- __ shr(r0, String::kHashShift);
- __ push(r0);
- __ CallStub(&stub);
-
- __ test(r1, r1);
- __ j(zero, miss);
- __ jmp(done);
-}
-
-
-void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
- // This stub overrides SometimesSetsUpAFrame() to return false. That means
- // we cannot call anything that could cause a GC from this stub.
- // Stack frame on entry:
- // esp[0 * kPointerSize]: return address.
- // esp[1 * kPointerSize]: key's hash.
- // esp[2 * kPointerSize]: key.
- // Registers:
- // dictionary_: StringDictionary to probe.
- // result_: used as scratch.
- // index_: will hold an index of entry if lookup is successful.
- // might alias with result_.
- // Returns:
- // result_ is zero if lookup failed, non zero otherwise.
-
- Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
-
- Register scratch = result_;
-
- __ mov(scratch, FieldOperand(dictionary_, kCapacityOffset));
- __ dec(scratch);
- __ SmiUntag(scratch);
- __ push(scratch);
-
- // If names of slots in range from 1 to kProbes - 1 for the hash value are
- // not equal to the name and kProbes-th slot is not used (its name is the
- // undefined value), it guarantees the hash table doesn't contain the
- // property. It's true even if some slots represent deleted properties
- // (their names are the null value).
- for (int i = kInlinedProbes; i < kTotalProbes; i++) {
- // Compute the masked index: (hash + i + i * i) & mask.
- __ mov(scratch, Operand(esp, 2 * kPointerSize));
- if (i > 0) {
- __ add(scratch, Immediate(StringDictionary::GetProbeOffset(i)));
- }
- __ and_(scratch, Operand(esp, 0));
-
- // Scale the index by multiplying by the entry size.
- ASSERT(StringDictionary::kEntrySize == 3);
- __ lea(index_, Operand(scratch, scratch, times_2, 0)); // index *= 3.
-
- // Having undefined at this place means the name is not contained.
- ASSERT_EQ(kSmiTagSize, 1);
- __ mov(scratch, Operand(dictionary_,
- index_,
- times_pointer_size,
- kElementsStartOffset - kHeapObjectTag));
- __ cmp(scratch, masm->isolate()->factory()->undefined_value());
- __ j(equal, &not_in_dictionary);
-
- // Stop if found the property.
- __ cmp(scratch, Operand(esp, 3 * kPointerSize));
- __ j(equal, &in_dictionary);
-
- if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) {
- // If we hit a key that is not an internalized string during negative
- // lookup we have to bailout as this key might be equal to the
- // key we are looking for.
-
- // Check if the entry name is not an internalized string.
- __ mov(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
- __ test_b(FieldOperand(scratch, Map::kInstanceTypeOffset),
- kIsInternalizedMask);
- __ j(zero, &maybe_in_dictionary);
- }
- }
-
- __ bind(&maybe_in_dictionary);
- // If we are doing negative lookup then probing failure should be
- // treated as a lookup success. For positive lookup probing failure
- // should be treated as lookup failure.
- if (mode_ == POSITIVE_LOOKUP) {
- __ mov(result_, Immediate(0));
- __ Drop(1);
- __ ret(2 * kPointerSize);
- }
-
- __ bind(&in_dictionary);
- __ mov(result_, Immediate(1));
- __ Drop(1);
- __ ret(2 * kPointerSize);
-
- __ bind(&not_in_dictionary);
- __ mov(result_, Immediate(0));
- __ Drop(1);
- __ ret(2 * kPointerSize);
-}
-
-
-struct AheadOfTimeWriteBarrierStubList {
- Register object, value, address;
- RememberedSetAction action;
-};
-
-
-#define REG(Name) { kRegister_ ## Name ## _Code }
-
-static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
- // Used in RegExpExecStub.
- { REG(ebx), REG(eax), REG(edi), EMIT_REMEMBERED_SET },
- // Used in CompileArrayPushCall.
- { REG(ebx), REG(ecx), REG(edx), EMIT_REMEMBERED_SET },
- { REG(ebx), REG(edi), REG(edx), OMIT_REMEMBERED_SET },
- // Used in CompileStoreGlobal and CallFunctionStub.
- { REG(ebx), REG(ecx), REG(edx), OMIT_REMEMBERED_SET },
- // Used in StoreStubCompiler::CompileStoreField and
- // KeyedStoreStubCompiler::CompileStoreField via GenerateStoreField.
- { REG(edx), REG(ecx), REG(ebx), EMIT_REMEMBERED_SET },
- // GenerateStoreField calls the stub with two different permutations of
- // registers. This is the second.
- { REG(ebx), REG(ecx), REG(edx), EMIT_REMEMBERED_SET },
- // StoreIC::GenerateNormal via GenerateDictionaryStore
- { REG(ebx), REG(edi), REG(edx), EMIT_REMEMBERED_SET },
- // KeyedStoreIC::GenerateGeneric.
- { REG(ebx), REG(edx), REG(ecx), EMIT_REMEMBERED_SET},
- // KeyedStoreStubCompiler::GenerateStoreFastElement.
- { REG(edi), REG(ebx), REG(ecx), EMIT_REMEMBERED_SET},
- { REG(edx), REG(edi), REG(ebx), EMIT_REMEMBERED_SET},
- // ElementsTransitionGenerator::GenerateMapChangeElementTransition
- // and ElementsTransitionGenerator::GenerateSmiToDouble
- // and ElementsTransitionGenerator::GenerateDoubleToObject
- { REG(edx), REG(ebx), REG(edi), EMIT_REMEMBERED_SET},
- { REG(edx), REG(ebx), REG(edi), OMIT_REMEMBERED_SET},
- // ElementsTransitionGenerator::GenerateDoubleToObject
- { REG(eax), REG(edx), REG(esi), EMIT_REMEMBERED_SET},
- { REG(edx), REG(eax), REG(edi), EMIT_REMEMBERED_SET},
- // StoreArrayLiteralElementStub::Generate
- { REG(ebx), REG(eax), REG(ecx), EMIT_REMEMBERED_SET},
- // FastNewClosureStub
- { REG(ecx), REG(edx), REG(ebx), EMIT_REMEMBERED_SET},
- // Null termination.
- { REG(no_reg), REG(no_reg), REG(no_reg), EMIT_REMEMBERED_SET}
-};
-
-#undef REG
-
-bool RecordWriteStub::IsPregenerated() {
- for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
- !entry->object.is(no_reg);
- entry++) {
- if (object_.is(entry->object) &&
- value_.is(entry->value) &&
- address_.is(entry->address) &&
- remembered_set_action_ == entry->action &&
- save_fp_regs_mode_ == kDontSaveFPRegs) {
- return true;
- }
- }
- return false;
-}
-
-
-void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
- Isolate* isolate) {
- StoreBufferOverflowStub stub1(kDontSaveFPRegs);
- stub1.GetCode(isolate)->set_is_pregenerated(true);
-
- CpuFeatures::TryForceFeatureScope scope(SSE2);
- if (CpuFeatures::IsSupported(SSE2)) {
- StoreBufferOverflowStub stub2(kSaveFPRegs);
- stub2.GetCode(isolate)->set_is_pregenerated(true);
- }
-}
-
-
-void RecordWriteStub::GenerateFixedRegStubsAheadOfTime(Isolate* isolate) {
- for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
- !entry->object.is(no_reg);
- entry++) {
- RecordWriteStub stub(entry->object,
- entry->value,
- entry->address,
- entry->action,
- kDontSaveFPRegs);
- stub.GetCode(isolate)->set_is_pregenerated(true);
- }
-}
-
-
-bool CodeStub::CanUseFPRegisters() {
- return CpuFeatures::IsSupported(SSE2);
-}
-
-
-// Takes the input in 3 registers: address_ value_ and object_. A pointer to
-// the value has just been written into the object, now this stub makes sure
-// we keep the GC informed. The word in the object where the value has been
-// written is in the address register.
-void RecordWriteStub::Generate(MacroAssembler* masm) {
- Label skip_to_incremental_noncompacting;
- Label skip_to_incremental_compacting;
-
- // The first two instructions are generated with labels so as to get the
- // offset fixed up correctly by the bind(Label*) call. We patch it back and
- // forth between a compare instructions (a nop in this position) and the
- // real branch when we start and stop incremental heap marking.
- __ jmp(&skip_to_incremental_noncompacting, Label::kNear);
- __ jmp(&skip_to_incremental_compacting, Label::kFar);
-
- if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
- __ RememberedSetHelper(object_,
- address_,
- value_,
- save_fp_regs_mode_,
- MacroAssembler::kReturnAtEnd);
- } else {
- __ ret(0);
- }
-
- __ bind(&skip_to_incremental_noncompacting);
- GenerateIncremental(masm, INCREMENTAL);
-
- __ bind(&skip_to_incremental_compacting);
- GenerateIncremental(masm, INCREMENTAL_COMPACTION);
-
- // Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
- // Will be checked in IncrementalMarking::ActivateGeneratedStub.
- masm->set_byte_at(0, kTwoByteNopInstruction);
- masm->set_byte_at(2, kFiveByteNopInstruction);
-}
-
-
-void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
- regs_.Save(masm);
-
- if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
- Label dont_need_remembered_set;
-
- __ mov(regs_.scratch0(), Operand(regs_.address(), 0));
- __ JumpIfNotInNewSpace(regs_.scratch0(), // Value.
- regs_.scratch0(),
- &dont_need_remembered_set);
-
- __ CheckPageFlag(regs_.object(),
- regs_.scratch0(),
- 1 << MemoryChunk::SCAN_ON_SCAVENGE,
- not_zero,
- &dont_need_remembered_set);
-
- // First notify the incremental marker if necessary, then update the
- // remembered set.
- CheckNeedsToInformIncrementalMarker(
- masm,
- kUpdateRememberedSetOnNoNeedToInformIncrementalMarker,
- mode);
- InformIncrementalMarker(masm, mode);
- regs_.Restore(masm);
- __ RememberedSetHelper(object_,
- address_,
- value_,
- save_fp_regs_mode_,
- MacroAssembler::kReturnAtEnd);
-
- __ bind(&dont_need_remembered_set);
- }
-
- CheckNeedsToInformIncrementalMarker(
- masm,
- kReturnOnNoNeedToInformIncrementalMarker,
- mode);
- InformIncrementalMarker(masm, mode);
- regs_.Restore(masm);
- __ ret(0);
-}
-
-
-void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
- regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
- int argument_count = 3;
- __ PrepareCallCFunction(argument_count, regs_.scratch0());
- __ mov(Operand(esp, 0 * kPointerSize), regs_.object());
- __ mov(Operand(esp, 1 * kPointerSize), regs_.address()); // Slot.
- __ mov(Operand(esp, 2 * kPointerSize),
- Immediate(ExternalReference::isolate_address()));
-
- AllowExternalCallThatCantCauseGC scope(masm);
- if (mode == INCREMENTAL_COMPACTION) {
- __ CallCFunction(
- ExternalReference::incremental_evacuation_record_write_function(
- masm->isolate()),
- argument_count);
- } else {
- ASSERT(mode == INCREMENTAL);
- __ CallCFunction(
- ExternalReference::incremental_marking_record_write_function(
- masm->isolate()),
- argument_count);
- }
- regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_);
-}
-
-
-void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
- MacroAssembler* masm,
- OnNoNeedToInformIncrementalMarker on_no_need,
- Mode mode) {
- Label object_is_black, need_incremental, need_incremental_pop_object;
-
- __ mov(regs_.scratch0(), Immediate(~Page::kPageAlignmentMask));
- __ and_(regs_.scratch0(), regs_.object());
- __ mov(regs_.scratch1(),
- Operand(regs_.scratch0(),
- MemoryChunk::kWriteBarrierCounterOffset));
- __ sub(regs_.scratch1(), Immediate(1));
- __ mov(Operand(regs_.scratch0(),
- MemoryChunk::kWriteBarrierCounterOffset),
- regs_.scratch1());
- __ j(negative, &need_incremental);
-
- // Let's look at the color of the object: If it is not black we don't have
- // to inform the incremental marker.
- __ JumpIfBlack(regs_.object(),
- regs_.scratch0(),
- regs_.scratch1(),
- &object_is_black,
- Label::kNear);
-
- regs_.Restore(masm);
- if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
- __ RememberedSetHelper(object_,
- address_,
- value_,
- save_fp_regs_mode_,
- MacroAssembler::kReturnAtEnd);
- } else {
- __ ret(0);
- }
-
- __ bind(&object_is_black);
-
- // Get the value from the slot.
- __ mov(regs_.scratch0(), Operand(regs_.address(), 0));
-
- if (mode == INCREMENTAL_COMPACTION) {
- Label ensure_not_white;
-
- __ CheckPageFlag(regs_.scratch0(), // Contains value.
- regs_.scratch1(), // Scratch.
- MemoryChunk::kEvacuationCandidateMask,
- zero,
- &ensure_not_white,
- Label::kNear);
-
- __ CheckPageFlag(regs_.object(),
- regs_.scratch1(), // Scratch.
- MemoryChunk::kSkipEvacuationSlotsRecordingMask,
- not_zero,
- &ensure_not_white,
- Label::kNear);
-
- __ jmp(&need_incremental);
-
- __ bind(&ensure_not_white);
- }
-
- // We need an extra register for this, so we push the object register
- // temporarily.
- __ push(regs_.object());
- __ EnsureNotWhite(regs_.scratch0(), // The value.
- regs_.scratch1(), // Scratch.
- regs_.object(), // Scratch.
- &need_incremental_pop_object,
- Label::kNear);
- __ pop(regs_.object());
-
- regs_.Restore(masm);
- if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
- __ RememberedSetHelper(object_,
- address_,
- value_,
- save_fp_regs_mode_,
- MacroAssembler::kReturnAtEnd);
- } else {
- __ ret(0);
- }
-
- __ bind(&need_incremental_pop_object);
- __ pop(regs_.object());
-
- __ bind(&need_incremental);
-
- // Fall through when we need to inform the incremental marker.
-}
-
-
-void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax : element value to store
- // -- ebx : array literal
- // -- edi : map of array literal
- // -- ecx : element index as smi
- // -- edx : array literal index in function
- // -- esp[0] : return address
- // -----------------------------------
-
- Label element_done;
- Label double_elements;
- Label smi_element;
- Label slow_elements;
- Label slow_elements_from_double;
- Label fast_elements;
-
- __ CheckFastElements(edi, &double_elements);
-
- // Check for FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS elements
- __ JumpIfSmi(eax, &smi_element);
- __ CheckFastSmiElements(edi, &fast_elements, Label::kNear);
-
- // Store into the array literal requires a elements transition. Call into
- // the runtime.
-
- __ bind(&slow_elements);
- __ pop(edi); // Pop return address and remember to put back later for tail
- // call.
- __ push(ebx);
- __ push(ecx);
- __ push(eax);
- __ mov(ebx, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- __ push(FieldOperand(ebx, JSFunction::kLiteralsOffset));
- __ push(edx);
- __ push(edi); // Return return address so that tail call returns to right
- // place.
- __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
-
- __ bind(&slow_elements_from_double);
- __ pop(edx);
- __ jmp(&slow_elements);
-
- // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object.
- __ bind(&fast_elements);
- __ mov(ebx, FieldOperand(ebx, JSObject::kElementsOffset));
- __ lea(ecx, FieldOperand(ebx, ecx, times_half_pointer_size,
- FixedArrayBase::kHeaderSize));
- __ mov(Operand(ecx, 0), eax);
- // Update the write barrier for the array store.
- __ RecordWrite(ebx, ecx, eax,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- __ ret(0);
-
- // Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS,
- // and value is Smi.
- __ bind(&smi_element);
- __ mov(ebx, FieldOperand(ebx, JSObject::kElementsOffset));
- __ mov(FieldOperand(ebx, ecx, times_half_pointer_size,
- FixedArrayBase::kHeaderSize), eax);
- __ ret(0);
-
- // Array literal has ElementsKind of FAST_*_DOUBLE_ELEMENTS.
- __ bind(&double_elements);
-
- __ push(edx);
- __ mov(edx, FieldOperand(ebx, JSObject::kElementsOffset));
- __ StoreNumberToDoubleElements(eax,
- edx,
- ecx,
- edi,
- xmm0,
- &slow_elements_from_double,
- false);
- __ pop(edx);
- __ ret(0);
-}
-
-
-void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
- ASSERT(!Serializer::enabled());
- bool save_fp_regs = CpuFeatures::IsSupported(SSE2);
- CEntryStub ces(1, save_fp_regs ? kSaveFPRegs : kDontSaveFPRegs);
- __ call(ces.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
- int parameter_count_offset =
- StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
- __ mov(ebx, MemOperand(ebp, parameter_count_offset));
- masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
- __ pop(ecx);
- __ lea(esp, MemOperand(esp, ebx, times_pointer_size,
- extra_expression_stack_count_ * kPointerSize));
- __ jmp(ecx); // Return to IC Miss stub, continuation still on stack.
-}
-
-
-void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
- if (entry_hook_ != NULL) {
- ProfileEntryHookStub stub;
- masm->CallStub(&stub);
- }
-}
-
-
-void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
- // Ecx is the only volatile register we must save.
- __ push(ecx);
-
- // Calculate and push the original stack pointer.
- __ lea(eax, Operand(esp, kPointerSize));
- __ push(eax);
-
- // Calculate and push the function address.
- __ mov(eax, Operand(eax, 0));
- __ sub(eax, Immediate(Assembler::kCallInstructionLength));
- __ push(eax);
-
- // Call the entry hook.
- int32_t hook_location = reinterpret_cast<int32_t>(&entry_hook_);
- __ call(Operand(hook_location, RelocInfo::NONE32));
- __ add(esp, Immediate(2 * kPointerSize));
-
- // Restore ecx.
- __ pop(ecx);
- __ ret(0);
-}
-
-#undef __
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_IA32
diff --git a/src/3rdparty/v8/src/ia32/code-stubs-ia32.h b/src/3rdparty/v8/src/ia32/code-stubs-ia32.h
deleted file mode 100644
index e6bb38a..0000000
--- a/src/3rdparty/v8/src/ia32/code-stubs-ia32.h
+++ /dev/null
@@ -1,646 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_IA32_CODE_STUBS_IA32_H_
-#define V8_IA32_CODE_STUBS_IA32_H_
-
-#include "macro-assembler.h"
-#include "code-stubs.h"
-#include "ic-inl.h"
-
-namespace v8 {
-namespace internal {
-
-
-// Compute a transcendental math function natively, or call the
-// TranscendentalCache runtime function.
-class TranscendentalCacheStub: public PlatformCodeStub {
- public:
- enum ArgumentType {
- TAGGED = 0,
- UNTAGGED = 1 << TranscendentalCache::kTranscendentalTypeBits
- };
-
- TranscendentalCacheStub(TranscendentalCache::Type type,
- ArgumentType argument_type)
- : type_(type), argument_type_(argument_type) {}
- void Generate(MacroAssembler* masm);
- static void GenerateOperation(MacroAssembler* masm,
- TranscendentalCache::Type type);
- private:
- TranscendentalCache::Type type_;
- ArgumentType argument_type_;
-
- Major MajorKey() { return TranscendentalCache; }
- int MinorKey() { return type_ | argument_type_; }
- Runtime::FunctionId RuntimeFunction();
-};
-
-
-class StoreBufferOverflowStub: public PlatformCodeStub {
- public:
- explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp)
- : save_doubles_(save_fp) { }
-
- void Generate(MacroAssembler* masm);
-
- virtual bool IsPregenerated() { return true; }
- static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
- virtual bool SometimesSetsUpAFrame() { return false; }
-
- private:
- SaveFPRegsMode save_doubles_;
-
- Major MajorKey() { return StoreBufferOverflow; }
- int MinorKey() { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; }
-};
-
-
-class UnaryOpStub: public PlatformCodeStub {
- public:
- UnaryOpStub(Token::Value op,
- UnaryOverwriteMode mode,
- UnaryOpIC::TypeInfo operand_type = UnaryOpIC::UNINITIALIZED)
- : op_(op),
- mode_(mode),
- operand_type_(operand_type) {
- }
-
- private:
- Token::Value op_;
- UnaryOverwriteMode mode_;
-
- // Operand type information determined at runtime.
- UnaryOpIC::TypeInfo operand_type_;
-
- virtual void PrintName(StringStream* stream);
-
- class ModeBits: public BitField<UnaryOverwriteMode, 0, 1> {};
- class OpBits: public BitField<Token::Value, 1, 7> {};
- class OperandTypeInfoBits: public BitField<UnaryOpIC::TypeInfo, 8, 3> {};
-
- Major MajorKey() { return UnaryOp; }
- int MinorKey() {
- return ModeBits::encode(mode_)
- | OpBits::encode(op_)
- | OperandTypeInfoBits::encode(operand_type_);
- }
-
- // Note: A lot of the helper functions below will vanish when we use virtual
- // function instead of switch more often.
- void Generate(MacroAssembler* masm);
-
- void GenerateTypeTransition(MacroAssembler* masm);
-
- void GenerateSmiStub(MacroAssembler* masm);
- void GenerateSmiStubSub(MacroAssembler* masm);
- void GenerateSmiStubBitNot(MacroAssembler* masm);
- void GenerateSmiCodeSub(MacroAssembler* masm,
- Label* non_smi,
- Label* undo,
- Label* slow,
- Label::Distance non_smi_near = Label::kFar,
- Label::Distance undo_near = Label::kFar,
- Label::Distance slow_near = Label::kFar);
- void GenerateSmiCodeBitNot(MacroAssembler* masm,
- Label* non_smi,
- Label::Distance non_smi_near = Label::kFar);
- void GenerateSmiCodeUndo(MacroAssembler* masm);
-
- void GenerateNumberStub(MacroAssembler* masm);
- void GenerateNumberStubSub(MacroAssembler* masm);
- void GenerateNumberStubBitNot(MacroAssembler* masm);
- void GenerateHeapNumberCodeSub(MacroAssembler* masm, Label* slow);
- void GenerateHeapNumberCodeBitNot(MacroAssembler* masm, Label* slow);
-
- void GenerateGenericStub(MacroAssembler* masm);
- void GenerateGenericStubSub(MacroAssembler* masm);
- void GenerateGenericStubBitNot(MacroAssembler* masm);
- void GenerateGenericCodeFallback(MacroAssembler* masm);
-
- virtual int GetCodeKind() { return Code::UNARY_OP_IC; }
-
- virtual InlineCacheState GetICState() {
- return UnaryOpIC::ToState(operand_type_);
- }
-
- virtual void FinishCode(Handle<Code> code) {
- code->set_unary_op_type(operand_type_);
- }
-};
-
-
-class StringHelper : public AllStatic {
- public:
- // Generate code for copying characters using a simple loop. This should only
- // be used in places where the number of characters is small and the
- // additional setup and checking in GenerateCopyCharactersREP adds too much
- // overhead. Copying of overlapping regions is not supported.
- static void GenerateCopyCharacters(MacroAssembler* masm,
- Register dest,
- Register src,
- Register count,
- Register scratch,
- bool ascii);
-
- // Generate code for copying characters using the rep movs instruction.
- // Copies ecx characters from esi to edi. Copying of overlapping regions is
- // not supported.
- static void GenerateCopyCharactersREP(MacroAssembler* masm,
- Register dest, // Must be edi.
- Register src, // Must be esi.
- Register count, // Must be ecx.
- Register scratch, // Neither of above.
- bool ascii);
-
- // Probe the string table for a two character string. If the string
- // requires non-standard hashing a jump to the label not_probed is
- // performed and registers c1 and c2 are preserved. In all other
- // cases they are clobbered. If the string is not found by probing a
- // jump to the label not_found is performed. This jump does not
- // guarantee that the string is not in the string table. If the
- // string is found the code falls through with the string in
- // register eax.
- static void GenerateTwoCharacterStringTableProbe(MacroAssembler* masm,
- Register c1,
- Register c2,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* not_probed,
- Label* not_found);
-
- // Generate string hash.
- static void GenerateHashInit(MacroAssembler* masm,
- Register hash,
- Register character,
- Register scratch);
- static void GenerateHashAddCharacter(MacroAssembler* masm,
- Register hash,
- Register character,
- Register scratch);
- static void GenerateHashGetHash(MacroAssembler* masm,
- Register hash,
- Register scratch);
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
-};
-
-
-// Flag that indicates how to generate code for the stub StringAddStub.
-enum StringAddFlags {
- NO_STRING_ADD_FLAGS = 0,
- // Omit left string check in stub (left is definitely a string).
- NO_STRING_CHECK_LEFT_IN_STUB = 1 << 0,
- // Omit right string check in stub (right is definitely a string).
- NO_STRING_CHECK_RIGHT_IN_STUB = 1 << 1,
- // Omit both string checks in stub.
- NO_STRING_CHECK_IN_STUB =
- NO_STRING_CHECK_LEFT_IN_STUB | NO_STRING_CHECK_RIGHT_IN_STUB
-};
-
-
-class StringAddStub: public PlatformCodeStub {
- public:
- explicit StringAddStub(StringAddFlags flags) : flags_(flags) {}
-
- private:
- Major MajorKey() { return StringAdd; }
- int MinorKey() { return flags_; }
-
- void Generate(MacroAssembler* masm);
-
- void GenerateConvertArgument(MacroAssembler* masm,
- int stack_offset,
- Register arg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* slow);
-
- const StringAddFlags flags_;
-};
-
-
-class SubStringStub: public PlatformCodeStub {
- public:
- SubStringStub() {}
-
- private:
- Major MajorKey() { return SubString; }
- int MinorKey() { return 0; }
-
- void Generate(MacroAssembler* masm);
-};
-
-
-class StringCompareStub: public PlatformCodeStub {
- public:
- StringCompareStub() { }
-
- // Compares two flat ASCII strings and returns result in eax.
- static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
- Register left,
- Register right,
- Register scratch1,
- Register scratch2,
- Register scratch3);
-
- // Compares two flat ASCII strings for equality and returns result
- // in eax.
- static void GenerateFlatAsciiStringEquals(MacroAssembler* masm,
- Register left,
- Register right,
- Register scratch1,
- Register scratch2);
-
- private:
- virtual Major MajorKey() { return StringCompare; }
- virtual int MinorKey() { return 0; }
- virtual void Generate(MacroAssembler* masm);
-
- static void GenerateAsciiCharsCompareLoop(
- MacroAssembler* masm,
- Register left,
- Register right,
- Register length,
- Register scratch,
- Label* chars_not_equal,
- Label::Distance chars_not_equal_near = Label::kFar);
-};
-
-
-class NumberToStringStub: public PlatformCodeStub {
- public:
- NumberToStringStub() { }
-
- // Generate code to do a lookup in the number string cache. If the number in
- // the register object is found in the cache the generated code falls through
- // with the result in the result register. The object and the result register
- // can be the same. If the number is not found in the cache the code jumps to
- // the label not_found with only the content of register object unchanged.
- static void GenerateLookupNumberStringCache(MacroAssembler* masm,
- Register object,
- Register result,
- Register scratch1,
- Register scratch2,
- bool object_is_smi,
- Label* not_found);
-
- private:
- Major MajorKey() { return NumberToString; }
- int MinorKey() { return 0; }
-
- void Generate(MacroAssembler* masm);
-};
-
-
-class StringDictionaryLookupStub: public PlatformCodeStub {
- public:
- enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
-
- StringDictionaryLookupStub(Register dictionary,
- Register result,
- Register index,
- LookupMode mode)
- : dictionary_(dictionary), result_(result), index_(index), mode_(mode) { }
-
- void Generate(MacroAssembler* masm);
-
- static void GenerateNegativeLookup(MacroAssembler* masm,
- Label* miss,
- Label* done,
- Register properties,
- Handle<String> name,
- Register r0);
-
- static void GeneratePositiveLookup(MacroAssembler* masm,
- Label* miss,
- Label* done,
- Register elements,
- Register name,
- Register r0,
- Register r1);
-
- virtual bool SometimesSetsUpAFrame() { return false; }
-
- private:
- static const int kInlinedProbes = 4;
- static const int kTotalProbes = 20;
-
- static const int kCapacityOffset =
- StringDictionary::kHeaderSize +
- StringDictionary::kCapacityIndex * kPointerSize;
-
- static const int kElementsStartOffset =
- StringDictionary::kHeaderSize +
- StringDictionary::kElementsStartIndex * kPointerSize;
-
- Major MajorKey() { return StringDictionaryLookup; }
-
- int MinorKey() {
- return DictionaryBits::encode(dictionary_.code()) |
- ResultBits::encode(result_.code()) |
- IndexBits::encode(index_.code()) |
- LookupModeBits::encode(mode_);
- }
-
- class DictionaryBits: public BitField<int, 0, 3> {};
- class ResultBits: public BitField<int, 3, 3> {};
- class IndexBits: public BitField<int, 6, 3> {};
- class LookupModeBits: public BitField<LookupMode, 9, 1> {};
-
- Register dictionary_;
- Register result_;
- Register index_;
- LookupMode mode_;
-};
-
-
-class RecordWriteStub: public PlatformCodeStub {
- public:
- RecordWriteStub(Register object,
- Register value,
- Register address,
- RememberedSetAction remembered_set_action,
- SaveFPRegsMode fp_mode)
- : object_(object),
- value_(value),
- address_(address),
- remembered_set_action_(remembered_set_action),
- save_fp_regs_mode_(fp_mode),
- regs_(object, // An input reg.
- address, // An input reg.
- value) { // One scratch reg.
- }
-
- enum Mode {
- STORE_BUFFER_ONLY,
- INCREMENTAL,
- INCREMENTAL_COMPACTION
- };
-
- virtual bool IsPregenerated();
- static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
- virtual bool SometimesSetsUpAFrame() { return false; }
-
- static const byte kTwoByteNopInstruction = 0x3c; // Cmpb al, #imm8.
- static const byte kTwoByteJumpInstruction = 0xeb; // Jmp #imm8.
-
- static const byte kFiveByteNopInstruction = 0x3d; // Cmpl eax, #imm32.
- static const byte kFiveByteJumpInstruction = 0xe9; // Jmp #imm32.
-
- static Mode GetMode(Code* stub) {
- byte first_instruction = stub->instruction_start()[0];
- byte second_instruction = stub->instruction_start()[2];
-
- if (first_instruction == kTwoByteJumpInstruction) {
- return INCREMENTAL;
- }
-
- ASSERT(first_instruction == kTwoByteNopInstruction);
-
- if (second_instruction == kFiveByteJumpInstruction) {
- return INCREMENTAL_COMPACTION;
- }
-
- ASSERT(second_instruction == kFiveByteNopInstruction);
-
- return STORE_BUFFER_ONLY;
- }
-
- static void Patch(Code* stub, Mode mode) {
- switch (mode) {
- case STORE_BUFFER_ONLY:
- ASSERT(GetMode(stub) == INCREMENTAL ||
- GetMode(stub) == INCREMENTAL_COMPACTION);
- stub->instruction_start()[0] = kTwoByteNopInstruction;
- stub->instruction_start()[2] = kFiveByteNopInstruction;
- break;
- case INCREMENTAL:
- ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
- stub->instruction_start()[0] = kTwoByteJumpInstruction;
- break;
- case INCREMENTAL_COMPACTION:
- ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
- stub->instruction_start()[0] = kTwoByteNopInstruction;
- stub->instruction_start()[2] = kFiveByteJumpInstruction;
- break;
- }
- ASSERT(GetMode(stub) == mode);
- CPU::FlushICache(stub->instruction_start(), 7);
- }
-
- private:
- // This is a helper class for freeing up 3 scratch registers, where the third
- // is always ecx (needed for shift operations). The input is two registers
- // that must be preserved and one scratch register provided by the caller.
- class RegisterAllocation {
- public:
- RegisterAllocation(Register object,
- Register address,
- Register scratch0)
- : object_orig_(object),
- address_orig_(address),
- scratch0_orig_(scratch0),
- object_(object),
- address_(address),
- scratch0_(scratch0) {
- ASSERT(!AreAliased(scratch0, object, address, no_reg));
- scratch1_ = GetRegThatIsNotEcxOr(object_, address_, scratch0_);
- if (scratch0.is(ecx)) {
- scratch0_ = GetRegThatIsNotEcxOr(object_, address_, scratch1_);
- }
- if (object.is(ecx)) {
- object_ = GetRegThatIsNotEcxOr(address_, scratch0_, scratch1_);
- }
- if (address.is(ecx)) {
- address_ = GetRegThatIsNotEcxOr(object_, scratch0_, scratch1_);
- }
- ASSERT(!AreAliased(scratch0_, object_, address_, ecx));
- }
-
- void Save(MacroAssembler* masm) {
- ASSERT(!address_orig_.is(object_));
- ASSERT(object_.is(object_orig_) || address_.is(address_orig_));
- ASSERT(!AreAliased(object_, address_, scratch1_, scratch0_));
- ASSERT(!AreAliased(object_orig_, address_, scratch1_, scratch0_));
- ASSERT(!AreAliased(object_, address_orig_, scratch1_, scratch0_));
- // We don't have to save scratch0_orig_ because it was given to us as
- // a scratch register. But if we had to switch to a different reg then
- // we should save the new scratch0_.
- if (!scratch0_.is(scratch0_orig_)) masm->push(scratch0_);
- if (!ecx.is(scratch0_orig_) &&
- !ecx.is(object_orig_) &&
- !ecx.is(address_orig_)) {
- masm->push(ecx);
- }
- masm->push(scratch1_);
- if (!address_.is(address_orig_)) {
- masm->push(address_);
- masm->mov(address_, address_orig_);
- }
- if (!object_.is(object_orig_)) {
- masm->push(object_);
- masm->mov(object_, object_orig_);
- }
- }
-
- void Restore(MacroAssembler* masm) {
- // These will have been preserved the entire time, so we just need to move
- // them back. Only in one case is the orig_ reg different from the plain
- // one, since only one of them can alias with ecx.
- if (!object_.is(object_orig_)) {
- masm->mov(object_orig_, object_);
- masm->pop(object_);
- }
- if (!address_.is(address_orig_)) {
- masm->mov(address_orig_, address_);
- masm->pop(address_);
- }
- masm->pop(scratch1_);
- if (!ecx.is(scratch0_orig_) &&
- !ecx.is(object_orig_) &&
- !ecx.is(address_orig_)) {
- masm->pop(ecx);
- }
- if (!scratch0_.is(scratch0_orig_)) masm->pop(scratch0_);
- }
-
- // If we have to call into C then we need to save and restore all caller-
- // saved registers that were not already preserved. The caller saved
- // registers are eax, ecx and edx. The three scratch registers (incl. ecx)
- // will be restored by other means so we don't bother pushing them here.
- void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) {
- if (!scratch0_.is(eax) && !scratch1_.is(eax)) masm->push(eax);
- if (!scratch0_.is(edx) && !scratch1_.is(edx)) masm->push(edx);
- if (mode == kSaveFPRegs) {
- CpuFeatures::Scope scope(SSE2);
- masm->sub(esp,
- Immediate(kDoubleSize * (XMMRegister::kNumRegisters - 1)));
- // Save all XMM registers except XMM0.
- for (int i = XMMRegister::kNumRegisters - 1; i > 0; i--) {
- XMMRegister reg = XMMRegister::from_code(i);
- masm->movdbl(Operand(esp, (i - 1) * kDoubleSize), reg);
- }
- }
- }
-
- inline void RestoreCallerSaveRegisters(MacroAssembler*masm,
- SaveFPRegsMode mode) {
- if (mode == kSaveFPRegs) {
- CpuFeatures::Scope scope(SSE2);
- // Restore all XMM registers except XMM0.
- for (int i = XMMRegister::kNumRegisters - 1; i > 0; i--) {
- XMMRegister reg = XMMRegister::from_code(i);
- masm->movdbl(reg, Operand(esp, (i - 1) * kDoubleSize));
- }
- masm->add(esp,
- Immediate(kDoubleSize * (XMMRegister::kNumRegisters - 1)));
- }
- if (!scratch0_.is(edx) && !scratch1_.is(edx)) masm->pop(edx);
- if (!scratch0_.is(eax) && !scratch1_.is(eax)) masm->pop(eax);
- }
-
- inline Register object() { return object_; }
- inline Register address() { return address_; }
- inline Register scratch0() { return scratch0_; }
- inline Register scratch1() { return scratch1_; }
-
- private:
- Register object_orig_;
- Register address_orig_;
- Register scratch0_orig_;
- Register object_;
- Register address_;
- Register scratch0_;
- Register scratch1_;
- // Third scratch register is always ecx.
-
- Register GetRegThatIsNotEcxOr(Register r1,
- Register r2,
- Register r3) {
- for (int i = 0; i < Register::NumAllocatableRegisters(); i++) {
- Register candidate = Register::FromAllocationIndex(i);
- if (candidate.is(ecx)) continue;
- if (candidate.is(r1)) continue;
- if (candidate.is(r2)) continue;
- if (candidate.is(r3)) continue;
- return candidate;
- }
- UNREACHABLE();
- return no_reg;
- }
- friend class RecordWriteStub;
- };
-
- enum OnNoNeedToInformIncrementalMarker {
- kReturnOnNoNeedToInformIncrementalMarker,
- kUpdateRememberedSetOnNoNeedToInformIncrementalMarker
- }
-;
- void Generate(MacroAssembler* masm);
- void GenerateIncremental(MacroAssembler* masm, Mode mode);
- void CheckNeedsToInformIncrementalMarker(
- MacroAssembler* masm,
- OnNoNeedToInformIncrementalMarker on_no_need,
- Mode mode);
- void InformIncrementalMarker(MacroAssembler* masm, Mode mode);
-
- Major MajorKey() { return RecordWrite; }
-
- int MinorKey() {
- return ObjectBits::encode(object_.code()) |
- ValueBits::encode(value_.code()) |
- AddressBits::encode(address_.code()) |
- RememberedSetActionBits::encode(remembered_set_action_) |
- SaveFPRegsModeBits::encode(save_fp_regs_mode_);
- }
-
- void Activate(Code* code) {
- code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
- }
-
- class ObjectBits: public BitField<int, 0, 3> {};
- class ValueBits: public BitField<int, 3, 3> {};
- class AddressBits: public BitField<int, 6, 3> {};
- class RememberedSetActionBits: public BitField<RememberedSetAction, 9, 1> {};
- class SaveFPRegsModeBits: public BitField<SaveFPRegsMode, 10, 1> {};
-
- Register object_;
- Register value_;
- Register address_;
- RememberedSetAction remembered_set_action_;
- SaveFPRegsMode save_fp_regs_mode_;
- RegisterAllocation regs_;
-};
-
-
-} } // namespace v8::internal
-
-#endif // V8_IA32_CODE_STUBS_IA32_H_
diff --git a/src/3rdparty/v8/src/ia32/codegen-ia32.cc b/src/3rdparty/v8/src/ia32/codegen-ia32.cc
deleted file mode 100644
index 5368811..0000000
--- a/src/3rdparty/v8/src/ia32/codegen-ia32.cc
+++ /dev/null
@@ -1,967 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_IA32)
-
-#include "codegen.h"
-#include "heap.h"
-#include "macro-assembler.h"
-
-namespace v8 {
-namespace internal {
-
-
-// -------------------------------------------------------------------------
-// Platform-specific RuntimeCallHelper functions.
-
-void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
- masm->EnterFrame(StackFrame::INTERNAL);
- ASSERT(!masm->has_frame());
- masm->set_has_frame(true);
-}
-
-
-void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
- masm->LeaveFrame(StackFrame::INTERNAL);
- ASSERT(masm->has_frame());
- masm->set_has_frame(false);
-}
-
-
-#define __ masm.
-
-
-UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type) {
- size_t actual_size;
- // Allocate buffer in executable space.
- byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB,
- &actual_size,
- true));
- if (buffer == NULL) {
- // Fallback to library function if function cannot be created.
- switch (type) {
- case TranscendentalCache::SIN: return &sin;
- case TranscendentalCache::COS: return &cos;
- case TranscendentalCache::TAN: return &tan;
- case TranscendentalCache::LOG: return &log;
- default: UNIMPLEMENTED();
- }
- }
-
- MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
- // esp[1 * kPointerSize]: raw double input
- // esp[0 * kPointerSize]: return address
- // Move double input into registers.
-
- __ push(ebx);
- __ push(edx);
- __ push(edi);
- __ fld_d(Operand(esp, 4 * kPointerSize));
- __ mov(ebx, Operand(esp, 4 * kPointerSize));
- __ mov(edx, Operand(esp, 5 * kPointerSize));
- TranscendentalCacheStub::GenerateOperation(&masm, type);
- // The return value is expected to be on ST(0) of the FPU stack.
- __ pop(edi);
- __ pop(edx);
- __ pop(ebx);
- __ Ret();
-
- CodeDesc desc;
- masm.GetCode(&desc);
- ASSERT(!RelocInfo::RequiresRelocation(desc));
-
- CPU::FlushICache(buffer, actual_size);
- OS::ProtectCode(buffer, actual_size);
- return FUNCTION_CAST<UnaryMathFunction>(buffer);
-}
-
-
-UnaryMathFunction CreateExpFunction() {
- if (!CpuFeatures::IsSupported(SSE2)) return &exp;
- if (!FLAG_fast_math) return &exp;
- size_t actual_size;
- byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
- if (buffer == NULL) return &exp;
- ExternalReference::InitializeMathExpData();
-
- MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
- // esp[1 * kPointerSize]: raw double input
- // esp[0 * kPointerSize]: return address
- {
- CpuFeatures::Scope use_sse2(SSE2);
- XMMRegister input = xmm1;
- XMMRegister result = xmm2;
- __ movdbl(input, Operand(esp, 1 * kPointerSize));
- __ push(eax);
- __ push(ebx);
-
- MathExpGenerator::EmitMathExp(&masm, input, result, xmm0, eax, ebx);
-
- __ pop(ebx);
- __ pop(eax);
- __ movdbl(Operand(esp, 1 * kPointerSize), result);
- __ fld_d(Operand(esp, 1 * kPointerSize));
- __ Ret();
- }
-
- CodeDesc desc;
- masm.GetCode(&desc);
- ASSERT(!RelocInfo::RequiresRelocation(desc));
-
- CPU::FlushICache(buffer, actual_size);
- OS::ProtectCode(buffer, actual_size);
- return FUNCTION_CAST<UnaryMathFunction>(buffer);
-}
-
-
-UnaryMathFunction CreateSqrtFunction() {
- size_t actual_size;
- // Allocate buffer in executable space.
- byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB,
- &actual_size,
- true));
- // If SSE2 is not available, we can use libc's implementation to ensure
- // consistency since code by fullcodegen's calls into runtime in that case.
- if (buffer == NULL || !CpuFeatures::IsSupported(SSE2)) return &sqrt;
- MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
- // esp[1 * kPointerSize]: raw double input
- // esp[0 * kPointerSize]: return address
- // Move double input into registers.
- {
- CpuFeatures::Scope use_sse2(SSE2);
- __ movdbl(xmm0, Operand(esp, 1 * kPointerSize));
- __ sqrtsd(xmm0, xmm0);
- __ movdbl(Operand(esp, 1 * kPointerSize), xmm0);
- // Load result into floating point register as return value.
- __ fld_d(Operand(esp, 1 * kPointerSize));
- __ Ret();
- }
-
- CodeDesc desc;
- masm.GetCode(&desc);
- ASSERT(!RelocInfo::RequiresRelocation(desc));
-
- CPU::FlushICache(buffer, actual_size);
- OS::ProtectCode(buffer, actual_size);
- return FUNCTION_CAST<UnaryMathFunction>(buffer);
-}
-
-
-static void MemCopyWrapper(void* dest, const void* src, size_t size) {
- memcpy(dest, src, size);
-}
-
-
-OS::MemCopyFunction CreateMemCopyFunction() {
- size_t actual_size;
- // Allocate buffer in executable space.
- byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB,
- &actual_size,
- true));
- if (buffer == NULL) return &MemCopyWrapper;
- MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
-
- // Generated code is put into a fixed, unmovable, buffer, and not into
- // the V8 heap. We can't, and don't, refer to any relocatable addresses
- // (e.g. the JavaScript nan-object).
-
- // 32-bit C declaration function calls pass arguments on stack.
-
- // Stack layout:
- // esp[12]: Third argument, size.
- // esp[8]: Second argument, source pointer.
- // esp[4]: First argument, destination pointer.
- // esp[0]: return address
-
- const int kDestinationOffset = 1 * kPointerSize;
- const int kSourceOffset = 2 * kPointerSize;
- const int kSizeOffset = 3 * kPointerSize;
-
- int stack_offset = 0; // Update if we change the stack height.
-
- if (FLAG_debug_code) {
- __ cmp(Operand(esp, kSizeOffset + stack_offset),
- Immediate(OS::kMinComplexMemCopy));
- Label ok;
- __ j(greater_equal, &ok);
- __ int3();
- __ bind(&ok);
- }
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope enable(SSE2);
- __ push(edi);
- __ push(esi);
- stack_offset += 2 * kPointerSize;
- Register dst = edi;
- Register src = esi;
- Register count = ecx;
- __ mov(dst, Operand(esp, stack_offset + kDestinationOffset));
- __ mov(src, Operand(esp, stack_offset + kSourceOffset));
- __ mov(count, Operand(esp, stack_offset + kSizeOffset));
-
-
- __ movdqu(xmm0, Operand(src, 0));
- __ movdqu(Operand(dst, 0), xmm0);
- __ mov(edx, dst);
- __ and_(edx, 0xF);
- __ neg(edx);
- __ add(edx, Immediate(16));
- __ add(dst, edx);
- __ add(src, edx);
- __ sub(count, edx);
-
- // edi is now aligned. Check if esi is also aligned.
- Label unaligned_source;
- __ test(src, Immediate(0x0F));
- __ j(not_zero, &unaligned_source);
- {
- // Copy loop for aligned source and destination.
- __ mov(edx, count);
- Register loop_count = ecx;
- Register count = edx;
- __ shr(loop_count, 5);
- {
- // Main copy loop.
- Label loop;
- __ bind(&loop);
- __ prefetch(Operand(src, 0x20), 1);
- __ movdqa(xmm0, Operand(src, 0x00));
- __ movdqa(xmm1, Operand(src, 0x10));
- __ add(src, Immediate(0x20));
-
- __ movdqa(Operand(dst, 0x00), xmm0);
- __ movdqa(Operand(dst, 0x10), xmm1);
- __ add(dst, Immediate(0x20));
-
- __ dec(loop_count);
- __ j(not_zero, &loop);
- }
-
- // At most 31 bytes to copy.
- Label move_less_16;
- __ test(count, Immediate(0x10));
- __ j(zero, &move_less_16);
- __ movdqa(xmm0, Operand(src, 0));
- __ add(src, Immediate(0x10));
- __ movdqa(Operand(dst, 0), xmm0);
- __ add(dst, Immediate(0x10));
- __ bind(&move_less_16);
-
- // At most 15 bytes to copy. Copy 16 bytes at end of string.
- __ and_(count, 0xF);
- __ movdqu(xmm0, Operand(src, count, times_1, -0x10));
- __ movdqu(Operand(dst, count, times_1, -0x10), xmm0);
-
- __ mov(eax, Operand(esp, stack_offset + kDestinationOffset));
- __ pop(esi);
- __ pop(edi);
- __ ret(0);
- }
- __ Align(16);
- {
- // Copy loop for unaligned source and aligned destination.
- // If source is not aligned, we can't read it as efficiently.
- __ bind(&unaligned_source);
- __ mov(edx, ecx);
- Register loop_count = ecx;
- Register count = edx;
- __ shr(loop_count, 5);
- {
- // Main copy loop
- Label loop;
- __ bind(&loop);
- __ prefetch(Operand(src, 0x20), 1);
- __ movdqu(xmm0, Operand(src, 0x00));
- __ movdqu(xmm1, Operand(src, 0x10));
- __ add(src, Immediate(0x20));
-
- __ movdqa(Operand(dst, 0x00), xmm0);
- __ movdqa(Operand(dst, 0x10), xmm1);
- __ add(dst, Immediate(0x20));
-
- __ dec(loop_count);
- __ j(not_zero, &loop);
- }
-
- // At most 31 bytes to copy.
- Label move_less_16;
- __ test(count, Immediate(0x10));
- __ j(zero, &move_less_16);
- __ movdqu(xmm0, Operand(src, 0));
- __ add(src, Immediate(0x10));
- __ movdqa(Operand(dst, 0), xmm0);
- __ add(dst, Immediate(0x10));
- __ bind(&move_less_16);
-
- // At most 15 bytes to copy. Copy 16 bytes at end of string.
- __ and_(count, 0x0F);
- __ movdqu(xmm0, Operand(src, count, times_1, -0x10));
- __ movdqu(Operand(dst, count, times_1, -0x10), xmm0);
-
- __ mov(eax, Operand(esp, stack_offset + kDestinationOffset));
- __ pop(esi);
- __ pop(edi);
- __ ret(0);
- }
-
- } else {
- // SSE2 not supported. Unlikely to happen in practice.
- __ push(edi);
- __ push(esi);
- stack_offset += 2 * kPointerSize;
- __ cld();
- Register dst = edi;
- Register src = esi;
- Register count = ecx;
- __ mov(dst, Operand(esp, stack_offset + kDestinationOffset));
- __ mov(src, Operand(esp, stack_offset + kSourceOffset));
- __ mov(count, Operand(esp, stack_offset + kSizeOffset));
-
- // Copy the first word.
- __ mov(eax, Operand(src, 0));
- __ mov(Operand(dst, 0), eax);
-
- // Increment src,dstso that dst is aligned.
- __ mov(edx, dst);
- __ and_(edx, 0x03);
- __ neg(edx);
- __ add(edx, Immediate(4)); // edx = 4 - (dst & 3)
- __ add(dst, edx);
- __ add(src, edx);
- __ sub(count, edx);
- // edi is now aligned, ecx holds number of remaning bytes to copy.
-
- __ mov(edx, count);
- count = edx;
- __ shr(ecx, 2); // Make word count instead of byte count.
- __ rep_movs();
-
- // At most 3 bytes left to copy. Copy 4 bytes at end of string.
- __ and_(count, 3);
- __ mov(eax, Operand(src, count, times_1, -4));
- __ mov(Operand(dst, count, times_1, -4), eax);
-
- __ mov(eax, Operand(esp, stack_offset + kDestinationOffset));
- __ pop(esi);
- __ pop(edi);
- __ ret(0);
- }
-
- CodeDesc desc;
- masm.GetCode(&desc);
- ASSERT(!RelocInfo::RequiresRelocation(desc));
-
- CPU::FlushICache(buffer, actual_size);
- OS::ProtectCode(buffer, actual_size);
- return FUNCTION_CAST<OS::MemCopyFunction>(buffer);
-}
-
-#undef __
-
-// -------------------------------------------------------------------------
-// Code generators
-
-#define __ ACCESS_MASM(masm)
-
-
-void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
- MacroAssembler* masm, AllocationSiteMode mode,
- Label* allocation_site_info_found) {
- // ----------- S t a t e -------------
- // -- eax : value
- // -- ebx : target map
- // -- ecx : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- if (mode == TRACK_ALLOCATION_SITE) {
- ASSERT(allocation_site_info_found != NULL);
- __ TestJSArrayForAllocationSiteInfo(edx, edi);
- __ j(equal, allocation_site_info_found);
- }
-
- // Set transitioned map.
- __ mov(FieldOperand(edx, HeapObject::kMapOffset), ebx);
- __ RecordWriteField(edx,
- HeapObject::kMapOffset,
- ebx,
- edi,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
-}
-
-
-void ElementsTransitionGenerator::GenerateSmiToDouble(
- MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
- // ----------- S t a t e -------------
- // -- eax : value
- // -- ebx : target map
- // -- ecx : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label loop, entry, convert_hole, gc_required, only_change_map;
-
- if (mode == TRACK_ALLOCATION_SITE) {
- __ TestJSArrayForAllocationSiteInfo(edx, edi);
- __ j(equal, fail);
- }
-
- // Check for empty arrays, which only require a map transition and no changes
- // to the backing store.
- __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
- __ cmp(edi, Immediate(masm->isolate()->factory()->empty_fixed_array()));
- __ j(equal, &only_change_map);
-
- __ push(eax);
- __ push(ebx);
-
- __ mov(edi, FieldOperand(edi, FixedArray::kLengthOffset));
-
- // Allocate new FixedDoubleArray.
- // edx: receiver
- // edi: length of source FixedArray (smi-tagged)
- AllocationFlags flags =
- static_cast<AllocationFlags>(TAG_OBJECT | DOUBLE_ALIGNMENT);
- __ AllocateInNewSpace(FixedDoubleArray::kHeaderSize, times_8,
- edi, REGISTER_VALUE_IS_SMI,
- eax, ebx, no_reg, &gc_required, flags);
-
- // eax: destination FixedDoubleArray
- // edi: number of elements
- // edx: receiver
- __ mov(FieldOperand(eax, HeapObject::kMapOffset),
- Immediate(masm->isolate()->factory()->fixed_double_array_map()));
- __ mov(FieldOperand(eax, FixedDoubleArray::kLengthOffset), edi);
- __ mov(esi, FieldOperand(edx, JSObject::kElementsOffset));
- // Replace receiver's backing store with newly created FixedDoubleArray.
- __ mov(FieldOperand(edx, JSObject::kElementsOffset), eax);
- __ mov(ebx, eax);
- __ RecordWriteField(edx,
- JSObject::kElementsOffset,
- ebx,
- edi,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
-
- __ mov(edi, FieldOperand(esi, FixedArray::kLengthOffset));
-
- // Prepare for conversion loop.
- ExternalReference canonical_the_hole_nan_reference =
- ExternalReference::address_of_the_hole_nan();
- XMMRegister the_hole_nan = xmm1;
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope use_sse2(SSE2);
- __ movdbl(the_hole_nan,
- Operand::StaticVariable(canonical_the_hole_nan_reference));
- }
- __ jmp(&entry);
-
- // Call into runtime if GC is required.
- __ bind(&gc_required);
- // Restore registers before jumping into runtime.
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- __ pop(ebx);
- __ pop(eax);
- __ jmp(fail);
-
- // Convert and copy elements
- // esi: source FixedArray
- __ bind(&loop);
- __ mov(ebx, FieldOperand(esi, edi, times_2, FixedArray::kHeaderSize));
- // ebx: current element from source
- // edi: index of current element
- __ JumpIfNotSmi(ebx, &convert_hole);
-
- // Normal smi, convert it to double and store.
- __ SmiUntag(ebx);
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope fscope(SSE2);
- __ cvtsi2sd(xmm0, ebx);
- __ movdbl(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize),
- xmm0);
- } else {
- __ push(ebx);
- __ fild_s(Operand(esp, 0));
- __ pop(ebx);
- __ fstp_d(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize));
- }
- __ jmp(&entry);
-
- // Found hole, store hole_nan_as_double instead.
- __ bind(&convert_hole);
-
- if (FLAG_debug_code) {
- __ cmp(ebx, masm->isolate()->factory()->the_hole_value());
- __ Assert(equal, "object found in smi-only array");
- }
-
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope use_sse2(SSE2);
- __ movdbl(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize),
- the_hole_nan);
- } else {
- __ fld_d(Operand::StaticVariable(canonical_the_hole_nan_reference));
- __ fstp_d(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize));
- }
-
- __ bind(&entry);
- __ sub(edi, Immediate(Smi::FromInt(1)));
- __ j(not_sign, &loop);
-
- __ pop(ebx);
- __ pop(eax);
-
- // Restore esi.
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
-
- __ bind(&only_change_map);
- // eax: value
- // ebx: target map
- // Set transitioned map.
- __ mov(FieldOperand(edx, HeapObject::kMapOffset), ebx);
- __ RecordWriteField(edx,
- HeapObject::kMapOffset,
- ebx,
- edi,
- kDontSaveFPRegs,
- OMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
-}
-
-
-void ElementsTransitionGenerator::GenerateDoubleToObject(
- MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
- // ----------- S t a t e -------------
- // -- eax : value
- // -- ebx : target map
- // -- ecx : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label loop, entry, convert_hole, gc_required, only_change_map, success;
-
- if (mode == TRACK_ALLOCATION_SITE) {
- __ TestJSArrayForAllocationSiteInfo(edx, edi);
- __ j(equal, fail);
- }
-
- // Check for empty arrays, which only require a map transition and no changes
- // to the backing store.
- __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
- __ cmp(edi, Immediate(masm->isolate()->factory()->empty_fixed_array()));
- __ j(equal, &only_change_map);
-
- __ push(eax);
- __ push(edx);
- __ push(ebx);
-
- __ mov(ebx, FieldOperand(edi, FixedDoubleArray::kLengthOffset));
-
- // Allocate new FixedArray.
- // ebx: length of source FixedDoubleArray (smi-tagged)
- __ lea(edi, Operand(ebx, times_2, FixedArray::kHeaderSize));
- __ AllocateInNewSpace(edi, eax, esi, no_reg, &gc_required, TAG_OBJECT);
-
- // eax: destination FixedArray
- // ebx: number of elements
- __ mov(FieldOperand(eax, HeapObject::kMapOffset),
- Immediate(masm->isolate()->factory()->fixed_array_map()));
- __ mov(FieldOperand(eax, FixedArray::kLengthOffset), ebx);
- __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
-
- __ jmp(&entry);
-
- // ebx: target map
- // edx: receiver
- // Set transitioned map.
- __ bind(&only_change_map);
- __ mov(FieldOperand(edx, HeapObject::kMapOffset), ebx);
- __ RecordWriteField(edx,
- HeapObject::kMapOffset,
- ebx,
- edi,
- kDontSaveFPRegs,
- OMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- __ jmp(&success);
-
- // Call into runtime if GC is required.
- __ bind(&gc_required);
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- __ pop(ebx);
- __ pop(edx);
- __ pop(eax);
- __ jmp(fail);
-
- // Box doubles into heap numbers.
- // edi: source FixedDoubleArray
- // eax: destination FixedArray
- __ bind(&loop);
- // ebx: index of current element (smi-tagged)
- uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
- __ cmp(FieldOperand(edi, ebx, times_4, offset), Immediate(kHoleNanUpper32));
- __ j(equal, &convert_hole);
-
- // Non-hole double, copy value into a heap number.
- __ AllocateHeapNumber(edx, esi, no_reg, &gc_required);
- // edx: new heap number
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope fscope(SSE2);
- __ movdbl(xmm0,
- FieldOperand(edi, ebx, times_4, FixedDoubleArray::kHeaderSize));
- __ movdbl(FieldOperand(edx, HeapNumber::kValueOffset), xmm0);
- } else {
- __ mov(esi, FieldOperand(edi, ebx, times_4, FixedDoubleArray::kHeaderSize));
- __ mov(FieldOperand(edx, HeapNumber::kValueOffset), esi);
- __ mov(esi, FieldOperand(edi, ebx, times_4, offset));
- __ mov(FieldOperand(edx, HeapNumber::kValueOffset + kPointerSize), esi);
- }
- __ mov(FieldOperand(eax, ebx, times_2, FixedArray::kHeaderSize), edx);
- __ mov(esi, ebx);
- __ RecordWriteArray(eax,
- edx,
- esi,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- __ jmp(&entry, Label::kNear);
-
- // Replace the-hole NaN with the-hole pointer.
- __ bind(&convert_hole);
- __ mov(FieldOperand(eax, ebx, times_2, FixedArray::kHeaderSize),
- masm->isolate()->factory()->the_hole_value());
-
- __ bind(&entry);
- __ sub(ebx, Immediate(Smi::FromInt(1)));
- __ j(not_sign, &loop);
-
- __ pop(ebx);
- __ pop(edx);
- // ebx: target map
- // edx: receiver
- // Set transitioned map.
- __ mov(FieldOperand(edx, HeapObject::kMapOffset), ebx);
- __ RecordWriteField(edx,
- HeapObject::kMapOffset,
- ebx,
- edi,
- kDontSaveFPRegs,
- OMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- // Replace receiver's backing store with newly created and filled FixedArray.
- __ mov(FieldOperand(edx, JSObject::kElementsOffset), eax);
- __ RecordWriteField(edx,
- JSObject::kElementsOffset,
- eax,
- edi,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
-
- // Restore registers.
- __ pop(eax);
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
-
- __ bind(&success);
-}
-
-
-void StringCharLoadGenerator::Generate(MacroAssembler* masm,
- Factory* factory,
- Register string,
- Register index,
- Register result,
- Label* call_runtime) {
- // Fetch the instance type of the receiver into result register.
- __ mov(result, FieldOperand(string, HeapObject::kMapOffset));
- __ movzx_b(result, FieldOperand(result, Map::kInstanceTypeOffset));
-
- // We need special handling for indirect strings.
- Label check_sequential;
- __ test(result, Immediate(kIsIndirectStringMask));
- __ j(zero, &check_sequential, Label::kNear);
-
- // Dispatch on the indirect string shape: slice or cons.
- Label cons_string;
- __ test(result, Immediate(kSlicedNotConsMask));
- __ j(zero, &cons_string, Label::kNear);
-
- // Handle slices.
- Label indirect_string_loaded;
- __ mov(result, FieldOperand(string, SlicedString::kOffsetOffset));
- __ SmiUntag(result);
- __ add(index, result);
- __ mov(string, FieldOperand(string, SlicedString::kParentOffset));
- __ jmp(&indirect_string_loaded, Label::kNear);
-
- // Handle cons strings.
- // Check whether the right hand side is the empty string (i.e. if
- // this is really a flat string in a cons string). If that is not
- // the case we would rather go to the runtime system now to flatten
- // the string.
- __ bind(&cons_string);
- __ cmp(FieldOperand(string, ConsString::kSecondOffset),
- Immediate(factory->empty_string()));
- __ j(not_equal, call_runtime);
- __ mov(string, FieldOperand(string, ConsString::kFirstOffset));
-
- __ bind(&indirect_string_loaded);
- __ mov(result, FieldOperand(string, HeapObject::kMapOffset));
- __ movzx_b(result, FieldOperand(result, Map::kInstanceTypeOffset));
-
- // Distinguish sequential and external strings. Only these two string
- // representations can reach here (slices and flat cons strings have been
- // reduced to the underlying sequential or external string).
- Label seq_string;
- __ bind(&check_sequential);
- STATIC_ASSERT(kSeqStringTag == 0);
- __ test(result, Immediate(kStringRepresentationMask));
- __ j(zero, &seq_string, Label::kNear);
-
- // Handle external strings.
- Label ascii_external, done;
- if (FLAG_debug_code) {
- // Assert that we do not have a cons or slice (indirect strings) here.
- // Sequential strings have already been ruled out.
- __ test(result, Immediate(kIsIndirectStringMask));
- __ Assert(zero, "external string expected, but not found");
- }
- // Rule out short external strings.
- STATIC_CHECK(kShortExternalStringTag != 0);
- __ test_b(result, kShortExternalStringMask);
- __ j(not_zero, call_runtime);
- // Check encoding.
- STATIC_ASSERT(kTwoByteStringTag == 0);
- __ test_b(result, kStringEncodingMask);
- __ mov(result, FieldOperand(string, ExternalString::kResourceDataOffset));
- __ j(not_equal, &ascii_external, Label::kNear);
- // Two-byte string.
- __ movzx_w(result, Operand(result, index, times_2, 0));
- __ jmp(&done, Label::kNear);
- __ bind(&ascii_external);
- // Ascii string.
- __ movzx_b(result, Operand(result, index, times_1, 0));
- __ jmp(&done, Label::kNear);
-
- // Dispatch on the encoding: ASCII or two-byte.
- Label ascii;
- __ bind(&seq_string);
- STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
- STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
- __ test(result, Immediate(kStringEncodingMask));
- __ j(not_zero, &ascii, Label::kNear);
-
- // Two-byte string.
- // Load the two-byte character code into the result register.
- __ movzx_w(result, FieldOperand(string,
- index,
- times_2,
- SeqTwoByteString::kHeaderSize));
- __ jmp(&done, Label::kNear);
-
- // Ascii string.
- // Load the byte into the result register.
- __ bind(&ascii);
- __ movzx_b(result, FieldOperand(string,
- index,
- times_1,
- SeqOneByteString::kHeaderSize));
- __ bind(&done);
-}
-
-
-void SeqStringSetCharGenerator::Generate(MacroAssembler* masm,
- String::Encoding encoding,
- Register string,
- Register index,
- Register value) {
- if (FLAG_debug_code) {
- __ test(index, Immediate(kSmiTagMask));
- __ Check(zero, "Non-smi index");
- __ test(value, Immediate(kSmiTagMask));
- __ Check(zero, "Non-smi value");
-
- __ cmp(index, FieldOperand(string, String::kLengthOffset));
- __ Check(less, "Index is too large");
-
- __ cmp(index, Immediate(Smi::FromInt(0)));
- __ Check(greater_equal, "Index is negative");
-
- __ push(value);
- __ mov(value, FieldOperand(string, HeapObject::kMapOffset));
- __ movzx_b(value, FieldOperand(value, Map::kInstanceTypeOffset));
-
- __ and_(value, Immediate(kStringRepresentationMask | kStringEncodingMask));
- static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
- static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
- __ cmp(value, Immediate(encoding == String::ONE_BYTE_ENCODING
- ? one_byte_seq_type : two_byte_seq_type));
- __ Check(equal, "Unexpected string type");
- __ pop(value);
- }
-
- __ SmiUntag(value);
- STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
- if (encoding == String::ONE_BYTE_ENCODING) {
- __ SmiUntag(index);
- __ mov_b(FieldOperand(string, index, times_1, SeqString::kHeaderSize),
- value);
- } else {
- // No need to untag a smi for two-byte addressing.
- __ mov_w(FieldOperand(string, index, times_1, SeqString::kHeaderSize),
- value);
- }
-}
-
-
-static Operand ExpConstant(int index) {
- return Operand::StaticVariable(ExternalReference::math_exp_constants(index));
-}
-
-
-void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
- XMMRegister input,
- XMMRegister result,
- XMMRegister double_scratch,
- Register temp1,
- Register temp2) {
- ASSERT(!input.is(double_scratch));
- ASSERT(!input.is(result));
- ASSERT(!result.is(double_scratch));
- ASSERT(!temp1.is(temp2));
- ASSERT(ExternalReference::math_exp_constants(0).address() != NULL);
-
- Label done;
-
- __ movdbl(double_scratch, ExpConstant(0));
- __ xorpd(result, result);
- __ ucomisd(double_scratch, input);
- __ j(above_equal, &done);
- __ ucomisd(input, ExpConstant(1));
- __ movdbl(result, ExpConstant(2));
- __ j(above_equal, &done);
- __ movdbl(double_scratch, ExpConstant(3));
- __ movdbl(result, ExpConstant(4));
- __ mulsd(double_scratch, input);
- __ addsd(double_scratch, result);
- __ movd(temp2, double_scratch);
- __ subsd(double_scratch, result);
- __ movdbl(result, ExpConstant(6));
- __ mulsd(double_scratch, ExpConstant(5));
- __ subsd(double_scratch, input);
- __ subsd(result, double_scratch);
- __ movsd(input, double_scratch);
- __ mulsd(input, double_scratch);
- __ mulsd(result, input);
- __ mov(temp1, temp2);
- __ mulsd(result, ExpConstant(7));
- __ subsd(result, double_scratch);
- __ add(temp1, Immediate(0x1ff800));
- __ addsd(result, ExpConstant(8));
- __ and_(temp2, Immediate(0x7ff));
- __ shr(temp1, 11);
- __ shl(temp1, 20);
- __ movd(input, temp1);
- __ pshufd(input, input, static_cast<uint8_t>(0xe1)); // Order: 11 10 00 01
- __ movdbl(double_scratch, Operand::StaticArray(
- temp2, times_8, ExternalReference::math_exp_log_table()));
- __ por(input, double_scratch);
- __ mulsd(result, input);
- __ bind(&done);
-}
-
-#undef __
-
-static const int kNoCodeAgeSequenceLength = 5;
-
-static byte* GetNoCodeAgeSequence(uint32_t* length) {
- static bool initialized = false;
- static byte sequence[kNoCodeAgeSequenceLength];
- *length = kNoCodeAgeSequenceLength;
- if (!initialized) {
- // The sequence of instructions that is patched out for aging code is the
- // following boilerplate stack-building prologue that is found both in
- // FUNCTION and OPTIMIZED_FUNCTION code:
- CodePatcher patcher(sequence, kNoCodeAgeSequenceLength);
- patcher.masm()->push(ebp);
- patcher.masm()->mov(ebp, esp);
- patcher.masm()->push(esi);
- patcher.masm()->push(edi);
- initialized = true;
- }
- return sequence;
-}
-
-
-bool Code::IsYoungSequence(byte* sequence) {
- uint32_t young_length;
- byte* young_sequence = GetNoCodeAgeSequence(&young_length);
- bool result = (!memcmp(sequence, young_sequence, young_length));
- ASSERT(result || *sequence == kCallOpcode);
- return result;
-}
-
-
-void Code::GetCodeAgeAndParity(byte* sequence, Age* age,
- MarkingParity* parity) {
- if (IsYoungSequence(sequence)) {
- *age = kNoAge;
- *parity = NO_MARKING_PARITY;
- } else {
- sequence++; // Skip the kCallOpcode byte
- Address target_address = sequence + *reinterpret_cast<int*>(sequence) +
- Assembler::kCallTargetAddressOffset;
- Code* stub = GetCodeFromTargetAddress(target_address);
- GetCodeAgeAndParity(stub, age, parity);
- }
-}
-
-
-void Code::PatchPlatformCodeAge(byte* sequence,
- Code::Age age,
- MarkingParity parity) {
- uint32_t young_length;
- byte* young_sequence = GetNoCodeAgeSequence(&young_length);
- if (age == kNoAge) {
- memcpy(sequence, young_sequence, young_length);
- CPU::FlushICache(sequence, young_length);
- } else {
- Code* stub = GetCodeAgeStub(age, parity);
- CodePatcher patcher(sequence, young_length);
- patcher.masm()->call(stub->instruction_start(), RelocInfo::NONE32);
- }
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_IA32
diff --git a/src/3rdparty/v8/src/ia32/codegen-ia32.h b/src/3rdparty/v8/src/ia32/codegen-ia32.h
deleted file mode 100644
index 5137274..0000000
--- a/src/3rdparty/v8/src/ia32/codegen-ia32.h
+++ /dev/null
@@ -1,107 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_IA32_CODEGEN_IA32_H_
-#define V8_IA32_CODEGEN_IA32_H_
-
-#include "ast.h"
-#include "ic-inl.h"
-
-namespace v8 {
-namespace internal {
-
-// Forward declarations
-class CompilationInfo;
-
-// -------------------------------------------------------------------------
-// CodeGenerator
-
-class CodeGenerator {
- public:
- // Printing of AST, etc. as requested by flags.
- static void MakeCodePrologue(CompilationInfo* info);
-
- // Allocate and install the code.
- static Handle<Code> MakeCodeEpilogue(MacroAssembler* masm,
- Code::Flags flags,
- CompilationInfo* info);
-
- // Print the code after compiling it.
- static void PrintCode(Handle<Code> code, CompilationInfo* info);
-
- static bool ShouldGenerateLog(Expression* type);
-
- static bool RecordPositions(MacroAssembler* masm,
- int pos,
- bool right_here = false);
-
-
- static Operand FixedArrayElementOperand(Register array,
- Register index_as_smi,
- int additional_offset = 0) {
- int offset = FixedArray::kHeaderSize + additional_offset * kPointerSize;
- return FieldOperand(array, index_as_smi, times_half_pointer_size, offset);
- }
-
- private:
- DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
-};
-
-
-class StringCharLoadGenerator : public AllStatic {
- public:
- // Generates the code for handling different string types and loading the
- // indexed character into |result|. We expect |index| as untagged input and
- // |result| as untagged output.
- static void Generate(MacroAssembler* masm,
- Factory* factory,
- Register string,
- Register index,
- Register result,
- Label* call_runtime);
-
- private:
- DISALLOW_COPY_AND_ASSIGN(StringCharLoadGenerator);
-};
-
-
-class MathExpGenerator : public AllStatic {
- public:
- static void EmitMathExp(MacroAssembler* masm,
- XMMRegister input,
- XMMRegister result,
- XMMRegister double_scratch,
- Register temp1,
- Register temp2);
-
- private:
- DISALLOW_COPY_AND_ASSIGN(MathExpGenerator);
-};
-
-} } // namespace v8::internal
-
-#endif // V8_IA32_CODEGEN_IA32_H_
diff --git a/src/3rdparty/v8/src/ia32/cpu-ia32.cc b/src/3rdparty/v8/src/ia32/cpu-ia32.cc
deleted file mode 100644
index 9eabb2a..0000000
--- a/src/3rdparty/v8/src/ia32/cpu-ia32.cc
+++ /dev/null
@@ -1,89 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// CPU specific code for ia32 independent of OS goes here.
-
-#ifdef __GNUC__
-#include "third_party/valgrind/valgrind.h"
-#endif
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_IA32)
-
-#include "cpu.h"
-#include "macro-assembler.h"
-
-namespace v8 {
-namespace internal {
-
-void CPU::SetUp() {
- CpuFeatures::Probe();
-}
-
-
-bool CPU::SupportsCrankshaft() {
- return CpuFeatures::IsSupported(SSE2);
-}
-
-
-void CPU::FlushICache(void* start, size_t size) {
- // No need to flush the instruction cache on Intel. On Intel instruction
- // cache flushing is only necessary when multiple cores running the same
- // code simultaneously. V8 (and JavaScript) is single threaded and when code
- // is patched on an intel CPU the core performing the patching will have its
- // own instruction cache updated automatically.
-
- // If flushing of the instruction cache becomes necessary Windows has the
- // API function FlushInstructionCache.
-
- // By default, valgrind only checks the stack for writes that might need to
- // invalidate already cached translated code. This leads to random
- // instability when code patches or moves are sometimes unnoticed. One
- // solution is to run valgrind with --smc-check=all, but this comes at a big
- // performance cost. We can notify valgrind to invalidate its cache.
-#ifdef VALGRIND_DISCARD_TRANSLATIONS
- unsigned res = VALGRIND_DISCARD_TRANSLATIONS(start, size);
- USE(res);
-#endif
-}
-
-
-void CPU::DebugBreak() {
-#ifdef _MSC_VER
- // To avoid Visual Studio runtime support the following code can be used
- // instead
- // __asm { int 3 }
- __debugbreak();
-#else
- asm("int $3");
-#endif
-}
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_IA32
diff --git a/src/3rdparty/v8/src/ia32/debug-ia32.cc b/src/3rdparty/v8/src/ia32/debug-ia32.cc
deleted file mode 100644
index d153e18..0000000
--- a/src/3rdparty/v8/src/ia32/debug-ia32.cc
+++ /dev/null
@@ -1,362 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_IA32)
-
-#include "codegen.h"
-#include "debug.h"
-
-
-namespace v8 {
-namespace internal {
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-
-bool BreakLocationIterator::IsDebugBreakAtReturn() {
- return Debug::IsDebugBreakAtReturn(rinfo());
-}
-
-
-// Patch the JS frame exit code with a debug break call. See
-// CodeGenerator::VisitReturnStatement and VirtualFrame::Exit in codegen-ia32.cc
-// for the precise return instructions sequence.
-void BreakLocationIterator::SetDebugBreakAtReturn() {
- ASSERT(Assembler::kJSReturnSequenceLength >=
- Assembler::kCallInstructionLength);
- Isolate* isolate = Isolate::Current();
- rinfo()->PatchCodeWithCall(isolate->debug()->debug_break_return()->entry(),
- Assembler::kJSReturnSequenceLength - Assembler::kCallInstructionLength);
-}
-
-
-// Restore the JS frame exit code.
-void BreakLocationIterator::ClearDebugBreakAtReturn() {
- rinfo()->PatchCode(original_rinfo()->pc(),
- Assembler::kJSReturnSequenceLength);
-}
-
-
-// A debug break in the frame exit code is identified by the JS frame exit code
-// having been patched with a call instruction.
-bool Debug::IsDebugBreakAtReturn(RelocInfo* rinfo) {
- ASSERT(RelocInfo::IsJSReturn(rinfo->rmode()));
- return rinfo->IsPatchedReturnSequence();
-}
-
-
-bool BreakLocationIterator::IsDebugBreakAtSlot() {
- ASSERT(IsDebugBreakSlot());
- // Check whether the debug break slot instructions have been patched.
- return rinfo()->IsPatchedDebugBreakSlotSequence();
-}
-
-
-void BreakLocationIterator::SetDebugBreakAtSlot() {
- ASSERT(IsDebugBreakSlot());
- Isolate* isolate = Isolate::Current();
- rinfo()->PatchCodeWithCall(
- isolate->debug()->debug_break_slot()->entry(),
- Assembler::kDebugBreakSlotLength - Assembler::kCallInstructionLength);
-}
-
-
-void BreakLocationIterator::ClearDebugBreakAtSlot() {
- ASSERT(IsDebugBreakSlot());
- rinfo()->PatchCode(original_rinfo()->pc(), Assembler::kDebugBreakSlotLength);
-}
-
-// All debug break stubs support padding for LiveEdit.
-const bool Debug::FramePaddingLayout::kIsSupported = true;
-
-
-#define __ ACCESS_MASM(masm)
-
-static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
- RegList object_regs,
- RegList non_object_regs,
- bool convert_call_to_jmp) {
- // Enter an internal frame.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Load padding words on stack.
- for (int i = 0; i < Debug::FramePaddingLayout::kInitialSize; i++) {
- __ push(Immediate(Smi::FromInt(
- Debug::FramePaddingLayout::kPaddingValue)));
- }
- __ push(Immediate(Smi::FromInt(Debug::FramePaddingLayout::kInitialSize)));
-
- // Store the registers containing live values on the expression stack to
- // make sure that these are correctly updated during GC. Non object values
- // are stored as a smi causing it to be untouched by GC.
- ASSERT((object_regs & ~kJSCallerSaved) == 0);
- ASSERT((non_object_regs & ~kJSCallerSaved) == 0);
- ASSERT((object_regs & non_object_regs) == 0);
- for (int i = 0; i < kNumJSCallerSaved; i++) {
- int r = JSCallerSavedCode(i);
- Register reg = { r };
- if ((object_regs & (1 << r)) != 0) {
- __ push(reg);
- }
- if ((non_object_regs & (1 << r)) != 0) {
- if (FLAG_debug_code) {
- __ test(reg, Immediate(0xc0000000));
- __ Assert(zero, "Unable to encode value as smi");
- }
- __ SmiTag(reg);
- __ push(reg);
- }
- }
-
-#ifdef DEBUG
- __ RecordComment("// Calling from debug break to runtime - come in - over");
-#endif
- __ Set(eax, Immediate(0)); // No arguments.
- __ mov(ebx, Immediate(ExternalReference::debug_break(masm->isolate())));
-
- CEntryStub ceb(1);
- __ CallStub(&ceb);
-
- // Automatically find register that could be used after register restore.
- // We need one register for padding skip instructions.
- Register unused_reg = { -1 };
-
- // Restore the register values containing object pointers from the
- // expression stack.
- for (int i = kNumJSCallerSaved; --i >= 0;) {
- int r = JSCallerSavedCode(i);
- Register reg = { r };
- if (FLAG_debug_code) {
- __ Set(reg, Immediate(kDebugZapValue));
- }
- bool taken = reg.code() == esi.code();
- if ((object_regs & (1 << r)) != 0) {
- __ pop(reg);
- taken = true;
- }
- if ((non_object_regs & (1 << r)) != 0) {
- __ pop(reg);
- __ SmiUntag(reg);
- taken = true;
- }
- if (!taken) {
- unused_reg = reg;
- }
- }
-
- ASSERT(unused_reg.code() != -1);
-
- // Read current padding counter and skip corresponding number of words.
- __ pop(unused_reg);
- // We divide stored value by 2 (untagging) and multiply it by word's size.
- STATIC_ASSERT(kSmiTagSize == 1 && kSmiShiftSize == 0);
- __ lea(esp, Operand(esp, unused_reg, times_half_pointer_size, 0));
-
- // Get rid of the internal frame.
- }
-
- // If this call did not replace a call but patched other code then there will
- // be an unwanted return address left on the stack. Here we get rid of that.
- if (convert_call_to_jmp) {
- __ add(esp, Immediate(kPointerSize));
- }
-
- // Now that the break point has been handled, resume normal execution by
- // jumping to the target address intended by the caller and that was
- // overwritten by the address of DebugBreakXXX.
- ExternalReference after_break_target =
- ExternalReference(Debug_Address::AfterBreakTarget(), masm->isolate());
- __ jmp(Operand::StaticVariable(after_break_target));
-}
-
-
-void Debug::GenerateLoadICDebugBreak(MacroAssembler* masm) {
- // Register state for IC load call (from ic-ia32.cc).
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- edx : receiver
- // -----------------------------------
- Generate_DebugBreakCallHelper(masm, ecx.bit() | edx.bit(), 0, false);
-}
-
-
-void Debug::GenerateStoreICDebugBreak(MacroAssembler* masm) {
- // Register state for IC store call (from ic-ia32.cc).
- // ----------- S t a t e -------------
- // -- eax : value
- // -- ecx : name
- // -- edx : receiver
- // -----------------------------------
- Generate_DebugBreakCallHelper(
- masm, eax.bit() | ecx.bit() | edx.bit(), 0, false);
-}
-
-
-void Debug::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
- // Register state for keyed IC load call (from ic-ia32.cc).
- // ----------- S t a t e -------------
- // -- ecx : key
- // -- edx : receiver
- // -----------------------------------
- Generate_DebugBreakCallHelper(masm, ecx.bit() | edx.bit(), 0, false);
-}
-
-
-void Debug::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
- // Register state for keyed IC load call (from ic-ia32.cc).
- // ----------- S t a t e -------------
- // -- eax : value
- // -- ecx : key
- // -- edx : receiver
- // -----------------------------------
- Generate_DebugBreakCallHelper(
- masm, eax.bit() | ecx.bit() | edx.bit(), 0, false);
-}
-
-
-void Debug::GenerateCallICDebugBreak(MacroAssembler* masm) {
- // Register state for keyed IC call call (from ic-ia32.cc)
- // ----------- S t a t e -------------
- // -- ecx: name
- // -----------------------------------
- Generate_DebugBreakCallHelper(masm, ecx.bit(), 0, false);
-}
-
-
-void Debug::GenerateReturnDebugBreak(MacroAssembler* masm) {
- // Register state just before return from JS function (from codegen-ia32.cc).
- // ----------- S t a t e -------------
- // -- eax: return value
- // -----------------------------------
- Generate_DebugBreakCallHelper(masm, eax.bit(), 0, true);
-}
-
-
-void Debug::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) {
- // Register state for CallFunctionStub (from code-stubs-ia32.cc).
- // ----------- S t a t e -------------
- // -- edi: function
- // -----------------------------------
- Generate_DebugBreakCallHelper(masm, edi.bit(), 0, false);
-}
-
-
-void Debug::GenerateCallFunctionStubRecordDebugBreak(MacroAssembler* masm) {
- // Register state for CallFunctionStub (from code-stubs-ia32.cc).
- // ----------- S t a t e -------------
- // -- ebx: cache cell for call target
- // -- edi: function
- // -----------------------------------
- Generate_DebugBreakCallHelper(masm, ebx.bit() | edi.bit(), 0, false);
-}
-
-
-void Debug::GenerateCallConstructStubDebugBreak(MacroAssembler* masm) {
- // Register state for CallConstructStub (from code-stubs-ia32.cc).
- // eax is the actual number of arguments not encoded as a smi see comment
- // above IC call.
- // ----------- S t a t e -------------
- // -- eax: number of arguments (not smi)
- // -- edi: constructor function
- // -----------------------------------
- // The number of arguments in eax is not smi encoded.
- Generate_DebugBreakCallHelper(masm, edi.bit(), eax.bit(), false);
-}
-
-
-void Debug::GenerateCallConstructStubRecordDebugBreak(MacroAssembler* masm) {
- // Register state for CallConstructStub (from code-stubs-ia32.cc).
- // eax is the actual number of arguments not encoded as a smi see comment
- // above IC call.
- // ----------- S t a t e -------------
- // -- eax: number of arguments (not smi)
- // -- ebx: cache cell for call target
- // -- edi: constructor function
- // -----------------------------------
- // The number of arguments in eax is not smi encoded.
- Generate_DebugBreakCallHelper(masm, ebx.bit() | edi.bit(), eax.bit(), false);
-}
-
-
-void Debug::GenerateSlot(MacroAssembler* masm) {
- // Generate enough nop's to make space for a call instruction.
- Label check_codesize;
- __ bind(&check_codesize);
- __ RecordDebugBreakSlot();
- __ Nop(Assembler::kDebugBreakSlotLength);
- ASSERT_EQ(Assembler::kDebugBreakSlotLength,
- masm->SizeOfCodeGeneratedSince(&check_codesize));
-}
-
-
-void Debug::GenerateSlotDebugBreak(MacroAssembler* masm) {
- // In the places where a debug break slot is inserted no registers can contain
- // object pointers.
- Generate_DebugBreakCallHelper(masm, 0, 0, true);
-}
-
-
-void Debug::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
- masm->ret(0);
-}
-
-
-void Debug::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
- ExternalReference restarter_frame_function_slot =
- ExternalReference(Debug_Address::RestarterFrameFunctionPointer(),
- masm->isolate());
- __ mov(Operand::StaticVariable(restarter_frame_function_slot), Immediate(0));
-
- // We do not know our frame height, but set esp based on ebp.
- __ lea(esp, Operand(ebp, -1 * kPointerSize));
-
- __ pop(edi); // Function.
- __ pop(ebp);
-
- // Load context from the function.
- __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
-
- // Get function code.
- __ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ mov(edx, FieldOperand(edx, SharedFunctionInfo::kCodeOffset));
- __ lea(edx, FieldOperand(edx, Code::kHeaderSize));
-
- // Re-run JSFunction, edi is function, esi is context.
- __ jmp(edx);
-}
-
-const bool Debug::kFrameDropperSupported = true;
-
-#undef __
-
-#endif // ENABLE_DEBUGGER_SUPPORT
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_IA32
diff --git a/src/3rdparty/v8/src/ia32/deoptimizer-ia32.cc b/src/3rdparty/v8/src/ia32/deoptimizer-ia32.cc
deleted file mode 100644
index e27ea4c..0000000
--- a/src/3rdparty/v8/src/ia32/deoptimizer-ia32.cc
+++ /dev/null
@@ -1,1184 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_IA32)
-
-#include "codegen.h"
-#include "deoptimizer.h"
-#include "full-codegen.h"
-#include "safepoint-table.h"
-
-namespace v8 {
-namespace internal {
-
-const int Deoptimizer::table_entry_size_ = 10;
-
-
-int Deoptimizer::patch_size() {
- return Assembler::kCallInstructionLength;
-}
-
-
-void Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(Handle<Code> code) {
- Isolate* isolate = code->GetIsolate();
- HandleScope scope(isolate);
-
- // Compute the size of relocation information needed for the code
- // patching in Deoptimizer::DeoptimizeFunction.
- int min_reloc_size = 0;
- int prev_pc_offset = 0;
- DeoptimizationInputData* deopt_data =
- DeoptimizationInputData::cast(code->deoptimization_data());
- for (int i = 0; i < deopt_data->DeoptCount(); i++) {
- int pc_offset = deopt_data->Pc(i)->value();
- if (pc_offset == -1) continue;
- ASSERT_GE(pc_offset, prev_pc_offset);
- int pc_delta = pc_offset - prev_pc_offset;
- // We use RUNTIME_ENTRY reloc info which has a size of 2 bytes
- // if encodable with small pc delta encoding and up to 6 bytes
- // otherwise.
- if (pc_delta <= RelocInfo::kMaxSmallPCDelta) {
- min_reloc_size += 2;
- } else {
- min_reloc_size += 6;
- }
- prev_pc_offset = pc_offset;
- }
-
- // If the relocation information is not big enough we create a new
- // relocation info object that is padded with comments to make it
- // big enough for lazy doptimization.
- int reloc_length = code->relocation_info()->length();
- if (min_reloc_size > reloc_length) {
- int comment_reloc_size = RelocInfo::kMinRelocCommentSize;
- // Padding needed.
- int min_padding = min_reloc_size - reloc_length;
- // Number of comments needed to take up at least that much space.
- int additional_comments =
- (min_padding + comment_reloc_size - 1) / comment_reloc_size;
- // Actual padding size.
- int padding = additional_comments * comment_reloc_size;
- // Allocate new relocation info and copy old relocation to the end
- // of the new relocation info array because relocation info is
- // written and read backwards.
- Factory* factory = isolate->factory();
- Handle<ByteArray> new_reloc =
- factory->NewByteArray(reloc_length + padding, TENURED);
- memcpy(new_reloc->GetDataStartAddress() + padding,
- code->relocation_info()->GetDataStartAddress(),
- reloc_length);
- // Create a relocation writer to write the comments in the padding
- // space. Use position 0 for everything to ensure short encoding.
- RelocInfoWriter reloc_info_writer(
- new_reloc->GetDataStartAddress() + padding, 0);
- intptr_t comment_string
- = reinterpret_cast<intptr_t>(RelocInfo::kFillerCommentString);
- RelocInfo rinfo(0, RelocInfo::COMMENT, comment_string, NULL);
- for (int i = 0; i < additional_comments; ++i) {
-#ifdef DEBUG
- byte* pos_before = reloc_info_writer.pos();
-#endif
- reloc_info_writer.Write(&rinfo);
- ASSERT(RelocInfo::kMinRelocCommentSize ==
- pos_before - reloc_info_writer.pos());
- }
- // Replace relocation information on the code object.
- code->set_relocation_info(*new_reloc);
- }
-}
-
-
-void Deoptimizer::DeoptimizeFunctionWithPreparedFunctionList(
- JSFunction* function) {
- Isolate* isolate = function->GetIsolate();
- HandleScope scope(isolate);
- AssertNoAllocation no_allocation;
-
- ASSERT(function->IsOptimized());
- ASSERT(function->FunctionsInFunctionListShareSameCode());
-
- // The optimized code is going to be patched, so we cannot use it
- // any more. Play safe and reset the whole cache.
- function->shared()->ClearOptimizedCodeMap();
-
- // Get the optimized code.
- Code* code = function->code();
- Address code_start_address = code->instruction_start();
-
- // We will overwrite the code's relocation info in-place. Relocation info
- // is written backward. The relocation info is the payload of a byte
- // array. Later on we will slide this to the start of the byte array and
- // create a filler object in the remaining space.
- ByteArray* reloc_info = code->relocation_info();
- Address reloc_end_address = reloc_info->address() + reloc_info->Size();
- RelocInfoWriter reloc_info_writer(reloc_end_address, code_start_address);
-
- // For each LLazyBailout instruction insert a call to the corresponding
- // deoptimization entry.
-
- // Since the call is a relative encoding, write new
- // reloc info. We do not need any of the existing reloc info because the
- // existing code will not be used again (we zap it in debug builds).
- //
- // Emit call to lazy deoptimization at all lazy deopt points.
- DeoptimizationInputData* deopt_data =
- DeoptimizationInputData::cast(code->deoptimization_data());
-#ifdef DEBUG
- Address prev_call_address = NULL;
-#endif
- for (int i = 0; i < deopt_data->DeoptCount(); i++) {
- if (deopt_data->Pc(i)->value() == -1) continue;
- // Patch lazy deoptimization entry.
- Address call_address = code_start_address + deopt_data->Pc(i)->value();
- CodePatcher patcher(call_address, patch_size());
- Address deopt_entry = GetDeoptimizationEntry(isolate, i, LAZY);
- patcher.masm()->call(deopt_entry, RelocInfo::NONE32);
- // We use RUNTIME_ENTRY for deoptimization bailouts.
- RelocInfo rinfo(call_address + 1, // 1 after the call opcode.
- RelocInfo::RUNTIME_ENTRY,
- reinterpret_cast<intptr_t>(deopt_entry),
- NULL);
- reloc_info_writer.Write(&rinfo);
- ASSERT_GE(reloc_info_writer.pos(),
- reloc_info->address() + ByteArray::kHeaderSize);
- ASSERT(prev_call_address == NULL ||
- call_address >= prev_call_address + patch_size());
- ASSERT(call_address + patch_size() <= code->instruction_end());
-#ifdef DEBUG
- prev_call_address = call_address;
-#endif
- }
-
- // Move the relocation info to the beginning of the byte array.
- int new_reloc_size = reloc_end_address - reloc_info_writer.pos();
- memmove(code->relocation_start(), reloc_info_writer.pos(), new_reloc_size);
-
- // The relocation info is in place, update the size.
- reloc_info->set_length(new_reloc_size);
-
- // Handle the junk part after the new relocation info. We will create
- // a non-live object in the extra space at the end of the former reloc info.
- Address junk_address = reloc_info->address() + reloc_info->Size();
- ASSERT(junk_address <= reloc_end_address);
- isolate->heap()->CreateFillerObjectAt(junk_address,
- reloc_end_address - junk_address);
-
- // Add the deoptimizing code to the list.
- DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code);
- DeoptimizerData* data = isolate->deoptimizer_data();
- node->set_next(data->deoptimizing_code_list_);
- data->deoptimizing_code_list_ = node;
-
- // We might be in the middle of incremental marking with compaction.
- // Tell collector to treat this code object in a special way and
- // ignore all slots that might have been recorded on it.
- isolate->heap()->mark_compact_collector()->InvalidateCode(code);
-
- ReplaceCodeForRelatedFunctions(function, code);
-
- if (FLAG_trace_deopt) {
- PrintF("[forced deoptimization: ");
- function->PrintName();
- PrintF(" / %x]\n", reinterpret_cast<uint32_t>(function));
- }
-}
-
-
-static const byte kJnsInstruction = 0x79;
-static const byte kJnsOffset = 0x13;
-static const byte kCallInstruction = 0xe8;
-static const byte kNopByteOne = 0x66;
-static const byte kNopByteTwo = 0x90;
-
-
-void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code,
- Address pc_after,
- Code* check_code,
- Code* replacement_code) {
- Address call_target_address = pc_after - kIntSize;
- ASSERT_EQ(check_code->entry(),
- Assembler::target_address_at(call_target_address));
- // The back edge bookkeeping code matches the pattern:
- //
- // sub <profiling_counter>, <delta>
- // jns ok
- // call <stack guard>
- // test eax, <loop nesting depth>
- // ok: ...
- //
- // We will patch away the branch so the code is:
- //
- // sub <profiling_counter>, <delta> ;; Not changed
- // nop
- // nop
- // call <on-stack replacment>
- // test eax, <loop nesting depth>
- // ok:
-
- ASSERT_EQ(kJnsInstruction, *(call_target_address - 3));
- ASSERT_EQ(kJnsOffset, *(call_target_address - 2));
- ASSERT_EQ(kCallInstruction, *(call_target_address - 1));
- *(call_target_address - 3) = kNopByteOne;
- *(call_target_address - 2) = kNopByteTwo;
- Assembler::set_target_address_at(call_target_address,
- replacement_code->entry());
-
- unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
- unoptimized_code, call_target_address, replacement_code);
-}
-
-
-void Deoptimizer::RevertStackCheckCodeAt(Code* unoptimized_code,
- Address pc_after,
- Code* check_code,
- Code* replacement_code) {
- Address call_target_address = pc_after - kIntSize;
- ASSERT_EQ(replacement_code->entry(),
- Assembler::target_address_at(call_target_address));
-
- // Replace the nops from patching (Deoptimizer::PatchStackCheckCode) to
- // restore the conditional branch.
- ASSERT_EQ(kNopByteOne, *(call_target_address - 3));
- ASSERT_EQ(kNopByteTwo, *(call_target_address - 2));
- ASSERT_EQ(kCallInstruction, *(call_target_address - 1));
- *(call_target_address - 3) = kJnsInstruction;
- *(call_target_address - 2) = kJnsOffset;
- Assembler::set_target_address_at(call_target_address,
- check_code->entry());
-
- check_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
- unoptimized_code, call_target_address, check_code);
-}
-
-
-static int LookupBailoutId(DeoptimizationInputData* data, BailoutId ast_id) {
- ByteArray* translations = data->TranslationByteArray();
- int length = data->DeoptCount();
- for (int i = 0; i < length; i++) {
- if (data->AstId(i) == ast_id) {
- TranslationIterator it(translations, data->TranslationIndex(i)->value());
- int value = it.Next();
- ASSERT(Translation::BEGIN == static_cast<Translation::Opcode>(value));
- // Read the number of frames.
- value = it.Next();
- if (value == 1) return i;
- }
- }
- UNREACHABLE();
- return -1;
-}
-
-
-void Deoptimizer::DoComputeOsrOutputFrame() {
- DeoptimizationInputData* data = DeoptimizationInputData::cast(
- compiled_code_->deoptimization_data());
- unsigned ast_id = data->OsrAstId()->value();
- // TODO(kasperl): This should not be the bailout_id_. It should be
- // the ast id. Confusing.
- ASSERT(bailout_id_ == ast_id);
-
- int bailout_id = LookupBailoutId(data, BailoutId(ast_id));
- unsigned translation_index = data->TranslationIndex(bailout_id)->value();
- ByteArray* translations = data->TranslationByteArray();
-
- TranslationIterator iterator(translations, translation_index);
- Translation::Opcode opcode =
- static_cast<Translation::Opcode>(iterator.Next());
- ASSERT(Translation::BEGIN == opcode);
- USE(opcode);
- int count = iterator.Next();
- iterator.Next(); // Drop JS frames count.
- ASSERT(count == 1);
- USE(count);
-
- opcode = static_cast<Translation::Opcode>(iterator.Next());
- USE(opcode);
- ASSERT(Translation::JS_FRAME == opcode);
- unsigned node_id = iterator.Next();
- USE(node_id);
- ASSERT(node_id == ast_id);
- int closure_id = iterator.Next();
- USE(closure_id);
- ASSERT_EQ(Translation::kSelfLiteralId, closure_id);
- unsigned height = iterator.Next();
- unsigned height_in_bytes = height * kPointerSize;
- USE(height_in_bytes);
-
- unsigned fixed_size = ComputeFixedSize(function_);
- unsigned input_frame_size = input_->GetFrameSize();
- ASSERT(fixed_size + height_in_bytes == input_frame_size);
-
- unsigned stack_slot_size = compiled_code_->stack_slots() * kPointerSize;
- unsigned outgoing_height = data->ArgumentsStackHeight(bailout_id)->value();
- unsigned outgoing_size = outgoing_height * kPointerSize;
- unsigned output_frame_size = fixed_size + stack_slot_size + outgoing_size;
- ASSERT(outgoing_size == 0); // OSR does not happen in the middle of a call.
-
- if (FLAG_trace_osr) {
- PrintF("[on-stack replacement: begin 0x%08" V8PRIxPTR " ",
- reinterpret_cast<intptr_t>(function_));
- function_->PrintName();
- PrintF(" => node=%u, frame=%d->%d, ebp:esp=0x%08x:0x%08x]\n",
- ast_id,
- input_frame_size,
- output_frame_size,
- input_->GetRegister(ebp.code()),
- input_->GetRegister(esp.code()));
- }
-
- // There's only one output frame in the OSR case.
- output_count_ = 1;
- output_ = new FrameDescription*[1];
- output_[0] = new(output_frame_size) FrameDescription(
- output_frame_size, function_);
- output_[0]->SetFrameType(StackFrame::JAVA_SCRIPT);
-
- // Clear the incoming parameters in the optimized frame to avoid
- // confusing the garbage collector.
- unsigned output_offset = output_frame_size - kPointerSize;
- int parameter_count = function_->shared()->formal_parameter_count() + 1;
- for (int i = 0; i < parameter_count; ++i) {
- output_[0]->SetFrameSlot(output_offset, 0);
- output_offset -= kPointerSize;
- }
-
- // Translate the incoming parameters. This may overwrite some of the
- // incoming argument slots we've just cleared.
- int input_offset = input_frame_size - kPointerSize;
- bool ok = true;
- int limit = input_offset - (parameter_count * kPointerSize);
- while (ok && input_offset > limit) {
- ok = DoOsrTranslateCommand(&iterator, &input_offset);
- }
-
- // There are no translation commands for the caller's pc and fp, the
- // context, and the function. Set them up explicitly.
- for (int i = StandardFrameConstants::kCallerPCOffset;
- ok && i >= StandardFrameConstants::kMarkerOffset;
- i -= kPointerSize) {
- uint32_t input_value = input_->GetFrameSlot(input_offset);
- if (FLAG_trace_osr) {
- const char* name = "UNKNOWN";
- switch (i) {
- case StandardFrameConstants::kCallerPCOffset:
- name = "caller's pc";
- break;
- case StandardFrameConstants::kCallerFPOffset:
- name = "fp";
- break;
- case StandardFrameConstants::kContextOffset:
- name = "context";
- break;
- case StandardFrameConstants::kMarkerOffset:
- name = "function";
- break;
- }
- PrintF(" [sp + %d] <- 0x%08x ; [sp + %d] (fixed part - %s)\n",
- output_offset,
- input_value,
- input_offset,
- name);
- }
- output_[0]->SetFrameSlot(output_offset, input_->GetFrameSlot(input_offset));
- input_offset -= kPointerSize;
- output_offset -= kPointerSize;
- }
-
- // All OSR stack frames are dynamically aligned to an 8-byte boundary.
- int frame_pointer = input_->GetRegister(ebp.code());
- if ((frame_pointer & kPointerSize) != 0) {
- frame_pointer -= kPointerSize;
- has_alignment_padding_ = 1;
- }
-
- int32_t alignment_state = (has_alignment_padding_ == 1) ?
- kAlignmentPaddingPushed :
- kNoAlignmentPadding;
- if (FLAG_trace_osr) {
- PrintF(" [sp + %d] <- 0x%08x ; (alignment state)\n",
- output_offset,
- alignment_state);
- }
- output_[0]->SetFrameSlot(output_offset, alignment_state);
- output_offset -= kPointerSize;
-
- // Translate the rest of the frame.
- while (ok && input_offset >= 0) {
- ok = DoOsrTranslateCommand(&iterator, &input_offset);
- }
-
- // If translation of any command failed, continue using the input frame.
- if (!ok) {
- delete output_[0];
- output_[0] = input_;
- output_[0]->SetPc(reinterpret_cast<uint32_t>(from_));
- } else {
- // Set up the frame pointer and the context pointer.
- output_[0]->SetRegister(ebp.code(), frame_pointer);
- output_[0]->SetRegister(esi.code(), input_->GetRegister(esi.code()));
-
- unsigned pc_offset = data->OsrPcOffset()->value();
- uint32_t pc = reinterpret_cast<uint32_t>(
- compiled_code_->entry() + pc_offset);
- output_[0]->SetPc(pc);
- }
- Code* continuation =
- function_->GetIsolate()->builtins()->builtin(Builtins::kNotifyOSR);
- output_[0]->SetContinuation(
- reinterpret_cast<uint32_t>(continuation->entry()));
-
- if (FLAG_trace_osr) {
- PrintF("[on-stack replacement translation %s: 0x%08" V8PRIxPTR " ",
- ok ? "finished" : "aborted",
- reinterpret_cast<intptr_t>(function_));
- function_->PrintName();
- PrintF(" => pc=0x%0x]\n", output_[0]->GetPc());
- }
-}
-
-
-void Deoptimizer::DoComputeCompiledStubFrame(TranslationIterator* iterator,
- int frame_index) {
- //
- // FROM TO
- // | .... | | .... |
- // +-------------------------+ +-------------------------+
- // | JSFunction continuation | | JSFunction continuation |
- // +-------------------------+ +-------------------------+
- // | | saved frame (ebp) | | saved frame (ebp) |
- // | +=========================+<-ebp +=========================+<-ebp
- // | | JSFunction context | | JSFunction context |
- // v +-------------------------+ +-------------------------|
- // | COMPILED_STUB marker | | STUB_FAILURE marker |
- // +-------------------------+ +-------------------------+
- // | | | caller args.arguments_ |
- // | ... | +-------------------------+
- // | | | caller args.length_ |
- // |-------------------------|<-esp +-------------------------+
- // | caller args pointer |
- // +-------------------------+
- // | caller stack param 1 |
- // parameters in registers +-------------------------+
- // and spilled to stack | .... |
- // +-------------------------+
- // | caller stack param n |
- // +-------------------------+<-esp
- // eax = number of parameters
- // ebx = failure handler address
- // ebp = saved frame
- // esi = JSFunction context
- //
-
- ASSERT(compiled_code_->kind() == Code::COMPILED_STUB);
- int major_key = compiled_code_->major_key();
- CodeStubInterfaceDescriptor* descriptor =
- isolate_->code_stub_interface_descriptor(major_key);
-
- // The output frame must have room for all pushed register parameters
- // and the standard stack frame slots. Include space for an argument
- // object to the callee and optionally the space to pass the argument
- // object to the stub failure handler.
- int height_in_bytes = kPointerSize * descriptor->register_param_count_ +
- sizeof(Arguments) + kPointerSize;
- int fixed_frame_size = StandardFrameConstants::kFixedFrameSize;
- int input_frame_size = input_->GetFrameSize();
- int output_frame_size = height_in_bytes + fixed_frame_size;
- if (trace_) {
- PrintF(" translating %s => StubFailureTrampolineStub, height=%d\n",
- CodeStub::MajorName(static_cast<CodeStub::Major>(major_key), false),
- height_in_bytes);
- }
-
- // The stub failure trampoline is a single frame.
- FrameDescription* output_frame =
- new(output_frame_size) FrameDescription(output_frame_size, NULL);
- output_frame->SetFrameType(StackFrame::STUB_FAILURE_TRAMPOLINE);
- ASSERT(frame_index == 0);
- output_[frame_index] = output_frame;
-
- // The top address for the output frame can be computed from the input
- // frame pointer and the output frame's height. Subtract space for the
- // context and function slots.
- intptr_t top_address = input_->GetRegister(ebp.code()) - (2 * kPointerSize) -
- height_in_bytes;
- output_frame->SetTop(top_address);
-
- // Read caller's PC (JSFunction continuation) from the input frame.
- intptr_t input_frame_offset = input_frame_size - kPointerSize;
- intptr_t output_frame_offset = output_frame_size - kPointerSize;
- intptr_t value = input_->GetFrameSlot(input_frame_offset);
- output_frame->SetFrameSlot(output_frame_offset, value);
- if (trace_) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's pc\n",
- top_address + output_frame_offset, output_frame_offset, value);
- }
-
- // Read caller's FP from the input frame, and set this frame's FP.
- input_frame_offset -= kPointerSize;
- value = input_->GetFrameSlot(input_frame_offset);
- output_frame_offset -= kPointerSize;
- output_frame->SetFrameSlot(output_frame_offset, value);
- intptr_t frame_ptr = input_->GetRegister(ebp.code());
- output_frame->SetRegister(ebp.code(), frame_ptr);
- output_frame->SetFp(frame_ptr);
- if (trace_) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's fp\n",
- top_address + output_frame_offset, output_frame_offset, value);
- }
-
- // The context can be gotten from the input frame.
- input_frame_offset -= kPointerSize;
- value = input_->GetFrameSlot(input_frame_offset);
- output_frame->SetRegister(esi.code(), value);
- output_frame_offset -= kPointerSize;
- output_frame->SetFrameSlot(output_frame_offset, value);
- if (trace_) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; context\n",
- top_address + output_frame_offset, output_frame_offset, value);
- }
-
- // A marker value is used in place of the function.
- output_frame_offset -= kPointerSize;
- value = reinterpret_cast<intptr_t>(
- Smi::FromInt(StackFrame::STUB_FAILURE_TRAMPOLINE));
- output_frame->SetFrameSlot(output_frame_offset, value);
- if (trace_) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; function (stub fail sentinel)\n",
- top_address + output_frame_offset, output_frame_offset, value);
- }
-
- int caller_arg_count = 0;
- if (descriptor->stack_parameter_count_ != NULL) {
- caller_arg_count =
- input_->GetRegister(descriptor->stack_parameter_count_->code());
- }
-
- // Build the Arguments object for the caller's parameters and a pointer to it.
- output_frame_offset -= kPointerSize;
- value = frame_ptr + StandardFrameConstants::kCallerSPOffset +
- (caller_arg_count - 1) * kPointerSize;
- output_frame->SetFrameSlot(output_frame_offset, value);
- if (trace_) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; args.arguments\n",
- top_address + output_frame_offset, output_frame_offset, value);
- }
-
- output_frame_offset -= kPointerSize;
- value = caller_arg_count;
- output_frame->SetFrameSlot(output_frame_offset, value);
- if (trace_) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; args.length\n",
- top_address + output_frame_offset, output_frame_offset, value);
- }
-
- output_frame_offset -= kPointerSize;
- value = frame_ptr - (output_frame_size - output_frame_offset) -
- StandardFrameConstants::kMarkerOffset + kPointerSize;
- output_frame->SetFrameSlot(output_frame_offset, value);
- if (trace_) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; args*\n",
- top_address + output_frame_offset, output_frame_offset, value);
- }
-
- // Copy the register parameters to the failure frame.
- for (int i = 0; i < descriptor->register_param_count_; ++i) {
- output_frame_offset -= kPointerSize;
- DoTranslateCommand(iterator, 0, output_frame_offset);
- }
-
- ASSERT(0 == output_frame_offset);
-
- for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
- double double_value = input_->GetDoubleRegister(i);
- output_frame->SetDoubleRegister(i, double_value);
- }
-
- intptr_t handler =
- reinterpret_cast<intptr_t>(descriptor->deoptimization_handler_);
- int params = descriptor->register_param_count_;
- if (descriptor->stack_parameter_count_ != NULL) {
- params++;
- }
- output_frame->SetRegister(eax.code(), params);
- output_frame->SetRegister(ebx.code(), handler);
-
- // Compute this frame's PC, state, and continuation.
- Code* trampoline = NULL;
- int extra = descriptor->extra_expression_stack_count_;
- StubFailureTrampolineStub(extra).FindCodeInCache(&trampoline, isolate_);
- ASSERT(trampoline != NULL);
- output_frame->SetPc(reinterpret_cast<intptr_t>(
- trampoline->instruction_start()));
- output_frame->SetState(Smi::FromInt(FullCodeGenerator::NO_REGISTERS));
- Code* notify_failure =
- isolate_->builtins()->builtin(Builtins::kNotifyStubFailure);
- output_frame->SetContinuation(
- reinterpret_cast<intptr_t>(notify_failure->entry()));
-}
-
-
-void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
- int frame_index) {
- Builtins* builtins = isolate_->builtins();
- Code* construct_stub = builtins->builtin(Builtins::kJSConstructStubGeneric);
- JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
- unsigned height = iterator->Next();
- unsigned height_in_bytes = height * kPointerSize;
- if (trace_) {
- PrintF(" translating construct stub => height=%d\n", height_in_bytes);
- }
-
- unsigned fixed_frame_size = 7 * kPointerSize;
- unsigned output_frame_size = height_in_bytes + fixed_frame_size;
-
- // Allocate and store the output frame description.
- FrameDescription* output_frame =
- new(output_frame_size) FrameDescription(output_frame_size, function);
- output_frame->SetFrameType(StackFrame::CONSTRUCT);
-
- // Construct stub can not be topmost or bottommost.
- ASSERT(frame_index > 0 && frame_index < output_count_ - 1);
- ASSERT(output_[frame_index] == NULL);
- output_[frame_index] = output_frame;
-
- // The top address of the frame is computed from the previous
- // frame's top and this frame's size.
- uint32_t top_address;
- top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
- output_frame->SetTop(top_address);
-
- // Compute the incoming parameter translation.
- int parameter_count = height;
- unsigned output_offset = output_frame_size;
- for (int i = 0; i < parameter_count; ++i) {
- output_offset -= kPointerSize;
- DoTranslateCommand(iterator, frame_index, output_offset);
- }
-
- // Read caller's PC from the previous frame.
- output_offset -= kPointerSize;
- intptr_t callers_pc = output_[frame_index - 1]->GetPc();
- output_frame->SetFrameSlot(output_offset, callers_pc);
- if (trace_) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's pc\n",
- top_address + output_offset, output_offset, callers_pc);
- }
-
- // Read caller's FP from the previous frame, and set this frame's FP.
- output_offset -= kPointerSize;
- intptr_t value = output_[frame_index - 1]->GetFp();
- output_frame->SetFrameSlot(output_offset, value);
- intptr_t fp_value = top_address + output_offset;
- output_frame->SetFp(fp_value);
- if (trace_) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's fp\n",
- fp_value, output_offset, value);
- }
-
- // The context can be gotten from the previous frame.
- output_offset -= kPointerSize;
- value = output_[frame_index - 1]->GetContext();
- output_frame->SetFrameSlot(output_offset, value);
- if (trace_) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; context\n",
- top_address + output_offset, output_offset, value);
- }
-
- // A marker value is used in place of the function.
- output_offset -= kPointerSize;
- value = reinterpret_cast<intptr_t>(Smi::FromInt(StackFrame::CONSTRUCT));
- output_frame->SetFrameSlot(output_offset, value);
- if (trace_) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; function (construct sentinel)\n",
- top_address + output_offset, output_offset, value);
- }
-
- // The output frame reflects a JSConstructStubGeneric frame.
- output_offset -= kPointerSize;
- value = reinterpret_cast<intptr_t>(construct_stub);
- output_frame->SetFrameSlot(output_offset, value);
- if (trace_) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; code object\n",
- top_address + output_offset, output_offset, value);
- }
-
- // Number of incoming arguments.
- output_offset -= kPointerSize;
- value = reinterpret_cast<uint32_t>(Smi::FromInt(height - 1));
- output_frame->SetFrameSlot(output_offset, value);
- if (trace_) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; argc (%d)\n",
- top_address + output_offset, output_offset, value, height - 1);
- }
-
- // The newly allocated object was passed as receiver in the artificial
- // constructor stub environment created by HEnvironment::CopyForInlining().
- output_offset -= kPointerSize;
- value = output_frame->GetFrameSlot(output_frame_size - kPointerSize);
- output_frame->SetFrameSlot(output_offset, value);
- if (trace_) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; allocated receiver\n",
- top_address + output_offset, output_offset, value);
- }
-
- ASSERT(0 == output_offset);
-
- uint32_t pc = reinterpret_cast<uint32_t>(
- construct_stub->instruction_start() +
- isolate_->heap()->construct_stub_deopt_pc_offset()->value());
- output_frame->SetPc(pc);
-}
-
-
-void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
- int frame_index) {
- BailoutId node_id = BailoutId(iterator->Next());
- JSFunction* function;
- if (frame_index != 0) {
- function = JSFunction::cast(ComputeLiteral(iterator->Next()));
- } else {
- int closure_id = iterator->Next();
- USE(closure_id);
- ASSERT_EQ(Translation::kSelfLiteralId, closure_id);
- function = function_;
- }
- unsigned height = iterator->Next();
- unsigned height_in_bytes = height * kPointerSize;
- if (trace_) {
- PrintF(" translating ");
- function->PrintName();
- PrintF(" => node=%d, height=%d\n", node_id.ToInt(), height_in_bytes);
- }
-
- // The 'fixed' part of the frame consists of the incoming parameters and
- // the part described by JavaScriptFrameConstants.
- unsigned fixed_frame_size = ComputeFixedSize(function);
- unsigned input_frame_size = input_->GetFrameSize();
- unsigned output_frame_size = height_in_bytes + fixed_frame_size;
-
- // Allocate and store the output frame description.
- FrameDescription* output_frame =
- new(output_frame_size) FrameDescription(output_frame_size, function);
- output_frame->SetFrameType(StackFrame::JAVA_SCRIPT);
-
- bool is_bottommost = (0 == frame_index);
- bool is_topmost = (output_count_ - 1 == frame_index);
- ASSERT(frame_index >= 0 && frame_index < output_count_);
- ASSERT(output_[frame_index] == NULL);
- output_[frame_index] = output_frame;
-
- // Compute the incoming parameter translation.
- int parameter_count = function->shared()->formal_parameter_count() + 1;
- unsigned output_offset = output_frame_size;
- unsigned input_offset = input_frame_size;
-
- unsigned alignment_state_offset =
- input_offset - parameter_count * kPointerSize -
- StandardFrameConstants::kFixedFrameSize -
- kPointerSize;
- ASSERT(JavaScriptFrameConstants::kDynamicAlignmentStateOffset ==
- JavaScriptFrameConstants::kLocal0Offset);
-
- // The top address for the bottommost output frame can be computed from
- // the input frame pointer and the output frame's height. For all
- // subsequent output frames, it can be computed from the previous one's
- // top address and the current frame's size.
- uint32_t top_address;
- if (is_bottommost) {
- int32_t alignment_state = input_->GetFrameSlot(alignment_state_offset);
- has_alignment_padding_ =
- (alignment_state == kAlignmentPaddingPushed) ? 1 : 0;
- // 2 = context and function in the frame.
- // If the optimized frame had alignment padding, adjust the frame pointer
- // to point to the new position of the old frame pointer after padding
- // is removed. Subtract 2 * kPointerSize for the context and function slots.
- top_address = input_->GetRegister(ebp.code()) - (2 * kPointerSize) -
- height_in_bytes + has_alignment_padding_ * kPointerSize;
- } else {
- top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
- }
- output_frame->SetTop(top_address);
-
- for (int i = 0; i < parameter_count; ++i) {
- output_offset -= kPointerSize;
- DoTranslateCommand(iterator, frame_index, output_offset);
- }
- input_offset -= (parameter_count * kPointerSize);
-
- // There are no translation commands for the caller's pc and fp, the
- // context, and the function. Synthesize their values and set them up
- // explicitly.
- //
- // The caller's pc for the bottommost output frame is the same as in the
- // input frame. For all subsequent output frames, it can be read from the
- // previous one. This frame's pc can be computed from the non-optimized
- // function code and AST id of the bailout.
- output_offset -= kPointerSize;
- input_offset -= kPointerSize;
- intptr_t value;
- if (is_bottommost) {
- value = input_->GetFrameSlot(input_offset);
- } else {
- value = output_[frame_index - 1]->GetPc();
- }
- output_frame->SetFrameSlot(output_offset, value);
- if (trace_) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's pc\n",
- top_address + output_offset, output_offset, value);
- }
-
- // The caller's frame pointer for the bottommost output frame is the same
- // as in the input frame. For all subsequent output frames, it can be
- // read from the previous one. Also compute and set this frame's frame
- // pointer.
- output_offset -= kPointerSize;
- input_offset -= kPointerSize;
- if (is_bottommost) {
- value = input_->GetFrameSlot(input_offset);
- } else {
- value = output_[frame_index - 1]->GetFp();
- }
- output_frame->SetFrameSlot(output_offset, value);
- intptr_t fp_value = top_address + output_offset;
- ASSERT(!is_bottommost ||
- (input_->GetRegister(ebp.code()) + has_alignment_padding_ * kPointerSize) ==
- fp_value);
- output_frame->SetFp(fp_value);
- if (is_topmost) output_frame->SetRegister(ebp.code(), fp_value);
- if (trace_) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's fp\n",
- fp_value, output_offset, value);
- }
- ASSERT(!is_bottommost || !has_alignment_padding_ ||
- (fp_value & kPointerSize) != 0);
-
- // For the bottommost output frame the context can be gotten from the input
- // frame. For all subsequent output frames it can be gotten from the function
- // so long as we don't inline functions that need local contexts.
- output_offset -= kPointerSize;
- input_offset -= kPointerSize;
- if (is_bottommost) {
- value = input_->GetFrameSlot(input_offset);
- } else {
- value = reinterpret_cast<uint32_t>(function->context());
- }
- output_frame->SetFrameSlot(output_offset, value);
- output_frame->SetContext(value);
- if (is_topmost) output_frame->SetRegister(esi.code(), value);
- if (trace_) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; context\n",
- top_address + output_offset, output_offset, value);
- }
-
- // The function was mentioned explicitly in the BEGIN_FRAME.
- output_offset -= kPointerSize;
- input_offset -= kPointerSize;
- value = reinterpret_cast<uint32_t>(function);
- // The function for the bottommost output frame should also agree with the
- // input frame.
- ASSERT(!is_bottommost || input_->GetFrameSlot(input_offset) == value);
- output_frame->SetFrameSlot(output_offset, value);
- if (trace_) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; function\n",
- top_address + output_offset, output_offset, value);
- }
-
- // Translate the rest of the frame.
- for (unsigned i = 0; i < height; ++i) {
- output_offset -= kPointerSize;
- DoTranslateCommand(iterator, frame_index, output_offset);
- }
- ASSERT(0 == output_offset);
-
- // Compute this frame's PC, state, and continuation.
- Code* non_optimized_code = function->shared()->code();
- FixedArray* raw_data = non_optimized_code->deoptimization_data();
- DeoptimizationOutputData* data = DeoptimizationOutputData::cast(raw_data);
- Address start = non_optimized_code->instruction_start();
- unsigned pc_and_state = GetOutputInfo(data, node_id, function->shared());
- unsigned pc_offset = FullCodeGenerator::PcField::decode(pc_and_state);
- uint32_t pc_value = reinterpret_cast<uint32_t>(start + pc_offset);
- output_frame->SetPc(pc_value);
-
- FullCodeGenerator::State state =
- FullCodeGenerator::StateField::decode(pc_and_state);
- output_frame->SetState(Smi::FromInt(state));
-
- // Set the continuation for the topmost frame.
- if (is_topmost && bailout_type_ != DEBUGGER) {
- Builtins* builtins = isolate_->builtins();
- Code* continuation = (bailout_type_ == EAGER)
- ? builtins->builtin(Builtins::kNotifyDeoptimized)
- : builtins->builtin(Builtins::kNotifyLazyDeoptimized);
- output_frame->SetContinuation(
- reinterpret_cast<uint32_t>(continuation->entry()));
- }
-}
-
-
-void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
- // Set the register values. The values are not important as there are no
- // callee saved registers in JavaScript frames, so all registers are
- // spilled. Registers ebp and esp are set to the correct values though.
-
- for (int i = 0; i < Register::kNumRegisters; i++) {
- input_->SetRegister(i, i * 4);
- }
- input_->SetRegister(esp.code(), reinterpret_cast<intptr_t>(frame->sp()));
- input_->SetRegister(ebp.code(), reinterpret_cast<intptr_t>(frame->fp()));
- for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); i++) {
- input_->SetDoubleRegister(i, 0.0);
- }
-
- // Fill the frame content from the actual data on the frame.
- for (unsigned i = 0; i < input_->GetFrameSize(); i += kPointerSize) {
- input_->SetFrameSlot(i, Memory::uint32_at(tos + i));
- }
-}
-
-
-#define __ masm()->
-
-void Deoptimizer::EntryGenerator::Generate() {
- GeneratePrologue();
-
- Isolate* isolate = masm()->isolate();
-
- // Save all general purpose registers before messing with them.
- const int kNumberOfRegisters = Register::kNumRegisters;
-
- const int kDoubleRegsSize = kDoubleSize *
- XMMRegister::kNumAllocatableRegisters;
- __ sub(esp, Immediate(kDoubleRegsSize));
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope scope(SSE2);
- for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
- XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
- int offset = i * kDoubleSize;
- __ movdbl(Operand(esp, offset), xmm_reg);
- }
- }
-
- __ pushad();
-
- const int kSavedRegistersAreaSize = kNumberOfRegisters * kPointerSize +
- kDoubleRegsSize;
-
- // Get the bailout id from the stack.
- __ mov(ebx, Operand(esp, kSavedRegistersAreaSize));
-
- // Get the address of the location in the code object if possible
- // and compute the fp-to-sp delta in register edx.
- if (type() == EAGER) {
- __ Set(ecx, Immediate(0));
- __ lea(edx, Operand(esp, kSavedRegistersAreaSize + 1 * kPointerSize));
- } else {
- __ mov(ecx, Operand(esp, kSavedRegistersAreaSize + 1 * kPointerSize));
- __ lea(edx, Operand(esp, kSavedRegistersAreaSize + 2 * kPointerSize));
- }
- __ sub(edx, ebp);
- __ neg(edx);
-
- // Allocate a new deoptimizer object.
- __ PrepareCallCFunction(6, eax);
- __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- __ mov(Operand(esp, 0 * kPointerSize), eax); // Function.
- __ mov(Operand(esp, 1 * kPointerSize), Immediate(type())); // Bailout type.
- __ mov(Operand(esp, 2 * kPointerSize), ebx); // Bailout id.
- __ mov(Operand(esp, 3 * kPointerSize), ecx); // Code address or 0.
- __ mov(Operand(esp, 4 * kPointerSize), edx); // Fp-to-sp delta.
- __ mov(Operand(esp, 5 * kPointerSize),
- Immediate(ExternalReference::isolate_address()));
- {
- AllowExternalCallThatCantCauseGC scope(masm());
- __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 6);
- }
-
- // Preserve deoptimizer object in register eax and get the input
- // frame descriptor pointer.
- __ mov(ebx, Operand(eax, Deoptimizer::input_offset()));
-
- // Fill in the input registers.
- for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
- int offset = (i * kPointerSize) + FrameDescription::registers_offset();
- __ pop(Operand(ebx, offset));
- }
-
- int double_regs_offset = FrameDescription::double_registers_offset();
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope scope(SSE2);
- // Fill in the double input registers.
- for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
- int dst_offset = i * kDoubleSize + double_regs_offset;
- int src_offset = i * kDoubleSize;
- __ movdbl(xmm0, Operand(esp, src_offset));
- __ movdbl(Operand(ebx, dst_offset), xmm0);
- }
- }
-
- // Clear FPU all exceptions.
- // TODO(ulan): Find out why the TOP register is not zero here in some cases,
- // and check that the generated code never deoptimizes with unbalanced stack.
- __ fnclex();
-
- // Remove the bailout id and the double registers from the stack.
- if (type() == EAGER) {
- __ add(esp, Immediate(kDoubleRegsSize + kPointerSize));
- } else {
- __ add(esp, Immediate(kDoubleRegsSize + 2 * kPointerSize));
- }
-
- // Compute a pointer to the unwinding limit in register ecx; that is
- // the first stack slot not part of the input frame.
- __ mov(ecx, Operand(ebx, FrameDescription::frame_size_offset()));
- __ add(ecx, esp);
-
- // Unwind the stack down to - but not including - the unwinding
- // limit and copy the contents of the activation frame to the input
- // frame description.
- __ lea(edx, Operand(ebx, FrameDescription::frame_content_offset()));
- Label pop_loop_header;
- __ jmp(&pop_loop_header);
- Label pop_loop;
- __ bind(&pop_loop);
- __ pop(Operand(edx, 0));
- __ add(edx, Immediate(sizeof(uint32_t)));
- __ bind(&pop_loop_header);
- __ cmp(ecx, esp);
- __ j(not_equal, &pop_loop);
-
- // Compute the output frame in the deoptimizer.
- __ push(eax);
- __ PrepareCallCFunction(1, ebx);
- __ mov(Operand(esp, 0 * kPointerSize), eax);
- {
- AllowExternalCallThatCantCauseGC scope(masm());
- __ CallCFunction(
- ExternalReference::compute_output_frames_function(isolate), 1);
- }
- __ pop(eax);
-
- if (type() != OSR) {
- // If frame was dynamically aligned, pop padding.
- Label no_padding;
- __ cmp(Operand(eax, Deoptimizer::has_alignment_padding_offset()),
- Immediate(0));
- __ j(equal, &no_padding);
- __ pop(ecx);
- if (FLAG_debug_code) {
- __ cmp(ecx, Immediate(kAlignmentZapValue));
- __ Assert(equal, "alignment marker expected");
- }
- __ bind(&no_padding);
- } else {
- // If frame needs dynamic alignment push padding.
- Label no_padding;
- __ cmp(Operand(eax, Deoptimizer::has_alignment_padding_offset()),
- Immediate(0));
- __ j(equal, &no_padding);
- __ push(Immediate(kAlignmentZapValue));
- __ bind(&no_padding);
- }
-
- // Replace the current frame with the output frames.
- Label outer_push_loop, inner_push_loop,
- outer_loop_header, inner_loop_header;
- // Outer loop state: eax = current FrameDescription**, edx = one past the
- // last FrameDescription**.
- __ mov(edx, Operand(eax, Deoptimizer::output_count_offset()));
- __ mov(eax, Operand(eax, Deoptimizer::output_offset()));
- __ lea(edx, Operand(eax, edx, times_4, 0));
- __ jmp(&outer_loop_header);
- __ bind(&outer_push_loop);
- // Inner loop state: ebx = current FrameDescription*, ecx = loop index.
- __ mov(ebx, Operand(eax, 0));
- __ mov(ecx, Operand(ebx, FrameDescription::frame_size_offset()));
- __ jmp(&inner_loop_header);
- __ bind(&inner_push_loop);
- __ sub(ecx, Immediate(sizeof(uint32_t)));
- __ push(Operand(ebx, ecx, times_1, FrameDescription::frame_content_offset()));
- __ bind(&inner_loop_header);
- __ test(ecx, ecx);
- __ j(not_zero, &inner_push_loop);
- __ add(eax, Immediate(kPointerSize));
- __ bind(&outer_loop_header);
- __ cmp(eax, edx);
- __ j(below, &outer_push_loop);
-
- // In case of OSR or a failed STUB, we have to restore the XMM registers.
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope scope(SSE2);
- for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
- XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
- int src_offset = i * kDoubleSize + double_regs_offset;
- __ movdbl(xmm_reg, Operand(ebx, src_offset));
- }
- }
-
- // Push state, pc, and continuation from the last output frame.
- if (type() != OSR) {
- __ push(Operand(ebx, FrameDescription::state_offset()));
- }
- __ push(Operand(ebx, FrameDescription::pc_offset()));
- __ push(Operand(ebx, FrameDescription::continuation_offset()));
-
-
- // Push the registers from the last output frame.
- for (int i = 0; i < kNumberOfRegisters; i++) {
- int offset = (i * kPointerSize) + FrameDescription::registers_offset();
- __ push(Operand(ebx, offset));
- }
-
- // Restore the registers from the stack.
- __ popad();
-
- // Return to the continuation point.
- __ ret(0);
-}
-
-
-void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
- // Create a sequence of deoptimization entries.
- Label done;
- for (int i = 0; i < count(); i++) {
- int start = masm()->pc_offset();
- USE(start);
- __ push_imm32(i);
- __ jmp(&done);
- ASSERT(masm()->pc_offset() - start == table_entry_size_);
- }
- __ bind(&done);
-}
-
-#undef __
-
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_IA32
diff --git a/src/3rdparty/v8/src/ia32/disasm-ia32.cc b/src/3rdparty/v8/src/ia32/disasm-ia32.cc
deleted file mode 100644
index 1193f2a..0000000
--- a/src/3rdparty/v8/src/ia32/disasm-ia32.cc
+++ /dev/null
@@ -1,1728 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include <assert.h>
-#include <stdio.h>
-#include <stdarg.h>
-
-#include "v8.h"
-
-#undef CONST
-
-#if defined(V8_TARGET_ARCH_IA32)
-
-#include "disasm.h"
-
-namespace disasm {
-
-enum OperandOrder {
- UNSET_OP_ORDER = 0,
- REG_OPER_OP_ORDER,
- OPER_REG_OP_ORDER
-};
-
-
-//------------------------------------------------------------------
-// Tables
-//------------------------------------------------------------------
-struct ByteMnemonic {
- int b; // -1 terminates, otherwise must be in range (0..255)
- const char* mnem;
- OperandOrder op_order_;
-};
-
-
-static const ByteMnemonic two_operands_instr[] = {
- {0x01, "add", OPER_REG_OP_ORDER},
- {0x03, "add", REG_OPER_OP_ORDER},
- {0x09, "or", OPER_REG_OP_ORDER},
- {0x0B, "or", REG_OPER_OP_ORDER},
- {0x1B, "sbb", REG_OPER_OP_ORDER},
- {0x21, "and", OPER_REG_OP_ORDER},
- {0x23, "and", REG_OPER_OP_ORDER},
- {0x29, "sub", OPER_REG_OP_ORDER},
- {0x2A, "subb", REG_OPER_OP_ORDER},
- {0x2B, "sub", REG_OPER_OP_ORDER},
- {0x31, "xor", OPER_REG_OP_ORDER},
- {0x33, "xor", REG_OPER_OP_ORDER},
- {0x38, "cmpb", OPER_REG_OP_ORDER},
- {0x3A, "cmpb", REG_OPER_OP_ORDER},
- {0x3B, "cmp", REG_OPER_OP_ORDER},
- {0x84, "test_b", REG_OPER_OP_ORDER},
- {0x85, "test", REG_OPER_OP_ORDER},
- {0x87, "xchg", REG_OPER_OP_ORDER},
- {0x8A, "mov_b", REG_OPER_OP_ORDER},
- {0x8B, "mov", REG_OPER_OP_ORDER},
- {0x8D, "lea", REG_OPER_OP_ORDER},
- {-1, "", UNSET_OP_ORDER}
-};
-
-
-static const ByteMnemonic zero_operands_instr[] = {
- {0xC3, "ret", UNSET_OP_ORDER},
- {0xC9, "leave", UNSET_OP_ORDER},
- {0x90, "nop", UNSET_OP_ORDER},
- {0xF4, "hlt", UNSET_OP_ORDER},
- {0xCC, "int3", UNSET_OP_ORDER},
- {0x60, "pushad", UNSET_OP_ORDER},
- {0x61, "popad", UNSET_OP_ORDER},
- {0x9C, "pushfd", UNSET_OP_ORDER},
- {0x9D, "popfd", UNSET_OP_ORDER},
- {0x9E, "sahf", UNSET_OP_ORDER},
- {0x99, "cdq", UNSET_OP_ORDER},
- {0x9B, "fwait", UNSET_OP_ORDER},
- {0xFC, "cld", UNSET_OP_ORDER},
- {0xAB, "stos", UNSET_OP_ORDER},
- {-1, "", UNSET_OP_ORDER}
-};
-
-
-static const ByteMnemonic call_jump_instr[] = {
- {0xE8, "call", UNSET_OP_ORDER},
- {0xE9, "jmp", UNSET_OP_ORDER},
- {-1, "", UNSET_OP_ORDER}
-};
-
-
-static const ByteMnemonic short_immediate_instr[] = {
- {0x05, "add", UNSET_OP_ORDER},
- {0x0D, "or", UNSET_OP_ORDER},
- {0x15, "adc", UNSET_OP_ORDER},
- {0x25, "and", UNSET_OP_ORDER},
- {0x2D, "sub", UNSET_OP_ORDER},
- {0x35, "xor", UNSET_OP_ORDER},
- {0x3D, "cmp", UNSET_OP_ORDER},
- {-1, "", UNSET_OP_ORDER}
-};
-
-
-// Generally we don't want to generate these because they are subject to partial
-// register stalls. They are included for completeness and because the cmp
-// variant is used by the RecordWrite stub. Because it does not update the
-// register it is not subject to partial register stalls.
-static ByteMnemonic byte_immediate_instr[] = {
- {0x0c, "or", UNSET_OP_ORDER},
- {0x24, "and", UNSET_OP_ORDER},
- {0x34, "xor", UNSET_OP_ORDER},
- {0x3c, "cmp", UNSET_OP_ORDER},
- {-1, "", UNSET_OP_ORDER}
-};
-
-
-static const char* const jump_conditional_mnem[] = {
- /*0*/ "jo", "jno", "jc", "jnc",
- /*4*/ "jz", "jnz", "jna", "ja",
- /*8*/ "js", "jns", "jpe", "jpo",
- /*12*/ "jl", "jnl", "jng", "jg"
-};
-
-
-static const char* const set_conditional_mnem[] = {
- /*0*/ "seto", "setno", "setc", "setnc",
- /*4*/ "setz", "setnz", "setna", "seta",
- /*8*/ "sets", "setns", "setpe", "setpo",
- /*12*/ "setl", "setnl", "setng", "setg"
-};
-
-
-static const char* const conditional_move_mnem[] = {
- /*0*/ "cmovo", "cmovno", "cmovc", "cmovnc",
- /*4*/ "cmovz", "cmovnz", "cmovna", "cmova",
- /*8*/ "cmovs", "cmovns", "cmovpe", "cmovpo",
- /*12*/ "cmovl", "cmovnl", "cmovng", "cmovg"
-};
-
-
-enum InstructionType {
- NO_INSTR,
- ZERO_OPERANDS_INSTR,
- TWO_OPERANDS_INSTR,
- JUMP_CONDITIONAL_SHORT_INSTR,
- REGISTER_INSTR,
- MOVE_REG_INSTR,
- CALL_JUMP_INSTR,
- SHORT_IMMEDIATE_INSTR,
- BYTE_IMMEDIATE_INSTR
-};
-
-
-struct InstructionDesc {
- const char* mnem;
- InstructionType type;
- OperandOrder op_order_;
-};
-
-
-class InstructionTable {
- public:
- InstructionTable();
- const InstructionDesc& Get(byte x) const { return instructions_[x]; }
- static InstructionTable* get_instance() {
- static InstructionTable table;
- return &table;
- }
-
- private:
- InstructionDesc instructions_[256];
- void Clear();
- void Init();
- void CopyTable(const ByteMnemonic bm[], InstructionType type);
- void SetTableRange(InstructionType type,
- byte start,
- byte end,
- const char* mnem);
- void AddJumpConditionalShort();
-};
-
-
-InstructionTable::InstructionTable() {
- Clear();
- Init();
-}
-
-
-void InstructionTable::Clear() {
- for (int i = 0; i < 256; i++) {
- instructions_[i].mnem = "";
- instructions_[i].type = NO_INSTR;
- instructions_[i].op_order_ = UNSET_OP_ORDER;
- }
-}
-
-
-void InstructionTable::Init() {
- CopyTable(two_operands_instr, TWO_OPERANDS_INSTR);
- CopyTable(zero_operands_instr, ZERO_OPERANDS_INSTR);
- CopyTable(call_jump_instr, CALL_JUMP_INSTR);
- CopyTable(short_immediate_instr, SHORT_IMMEDIATE_INSTR);
- CopyTable(byte_immediate_instr, BYTE_IMMEDIATE_INSTR);
- AddJumpConditionalShort();
- SetTableRange(REGISTER_INSTR, 0x40, 0x47, "inc");
- SetTableRange(REGISTER_INSTR, 0x48, 0x4F, "dec");
- SetTableRange(REGISTER_INSTR, 0x50, 0x57, "push");
- SetTableRange(REGISTER_INSTR, 0x58, 0x5F, "pop");
- SetTableRange(REGISTER_INSTR, 0x91, 0x97, "xchg eax,"); // 0x90 is nop.
- SetTableRange(MOVE_REG_INSTR, 0xB8, 0xBF, "mov");
-}
-
-
-void InstructionTable::CopyTable(const ByteMnemonic bm[],
- InstructionType type) {
- for (int i = 0; bm[i].b >= 0; i++) {
- InstructionDesc* id = &instructions_[bm[i].b];
- id->mnem = bm[i].mnem;
- id->op_order_ = bm[i].op_order_;
- ASSERT_EQ(NO_INSTR, id->type); // Information not already entered.
- id->type = type;
- }
-}
-
-
-void InstructionTable::SetTableRange(InstructionType type,
- byte start,
- byte end,
- const char* mnem) {
- for (byte b = start; b <= end; b++) {
- InstructionDesc* id = &instructions_[b];
- ASSERT_EQ(NO_INSTR, id->type); // Information not already entered.
- id->mnem = mnem;
- id->type = type;
- }
-}
-
-
-void InstructionTable::AddJumpConditionalShort() {
- for (byte b = 0x70; b <= 0x7F; b++) {
- InstructionDesc* id = &instructions_[b];
- ASSERT_EQ(NO_INSTR, id->type); // Information not already entered.
- id->mnem = jump_conditional_mnem[b & 0x0F];
- id->type = JUMP_CONDITIONAL_SHORT_INSTR;
- }
-}
-
-
-// The IA32 disassembler implementation.
-class DisassemblerIA32 {
- public:
- DisassemblerIA32(const NameConverter& converter,
- bool abort_on_unimplemented = true)
- : converter_(converter),
- instruction_table_(InstructionTable::get_instance()),
- tmp_buffer_pos_(0),
- abort_on_unimplemented_(abort_on_unimplemented) {
- tmp_buffer_[0] = '\0';
- }
-
- virtual ~DisassemblerIA32() {}
-
- // Writes one disassembled instruction into 'buffer' (0-terminated).
- // Returns the length of the disassembled machine instruction in bytes.
- int InstructionDecode(v8::internal::Vector<char> buffer, byte* instruction);
-
- private:
- const NameConverter& converter_;
- InstructionTable* instruction_table_;
- v8::internal::EmbeddedVector<char, 128> tmp_buffer_;
- unsigned int tmp_buffer_pos_;
- bool abort_on_unimplemented_;
-
- enum {
- eax = 0,
- ecx = 1,
- edx = 2,
- ebx = 3,
- esp = 4,
- ebp = 5,
- esi = 6,
- edi = 7
- };
-
-
- enum ShiftOpcodeExtension {
- kROL = 0,
- kROR = 1,
- kRCL = 2,
- kRCR = 3,
- kSHL = 4,
- KSHR = 5,
- kSAR = 7
- };
-
-
- const char* NameOfCPURegister(int reg) const {
- return converter_.NameOfCPURegister(reg);
- }
-
-
- const char* NameOfByteCPURegister(int reg) const {
- return converter_.NameOfByteCPURegister(reg);
- }
-
-
- const char* NameOfXMMRegister(int reg) const {
- return converter_.NameOfXMMRegister(reg);
- }
-
-
- const char* NameOfAddress(byte* addr) const {
- return converter_.NameOfAddress(addr);
- }
-
-
- // Disassembler helper functions.
- static void get_modrm(byte data, int* mod, int* regop, int* rm) {
- *mod = (data >> 6) & 3;
- *regop = (data & 0x38) >> 3;
- *rm = data & 7;
- }
-
-
- static void get_sib(byte data, int* scale, int* index, int* base) {
- *scale = (data >> 6) & 3;
- *index = (data >> 3) & 7;
- *base = data & 7;
- }
-
- typedef const char* (DisassemblerIA32::*RegisterNameMapping)(int reg) const;
-
- int PrintRightOperandHelper(byte* modrmp, RegisterNameMapping register_name);
- int PrintRightOperand(byte* modrmp);
- int PrintRightByteOperand(byte* modrmp);
- int PrintRightXMMOperand(byte* modrmp);
- int PrintOperands(const char* mnem, OperandOrder op_order, byte* data);
- int PrintImmediateOp(byte* data);
- int F7Instruction(byte* data);
- int D1D3C1Instruction(byte* data);
- int JumpShort(byte* data);
- int JumpConditional(byte* data, const char* comment);
- int JumpConditionalShort(byte* data, const char* comment);
- int SetCC(byte* data);
- int CMov(byte* data);
- int FPUInstruction(byte* data);
- int MemoryFPUInstruction(int escape_opcode, int regop, byte* modrm_start);
- int RegisterFPUInstruction(int escape_opcode, byte modrm_byte);
- void AppendToBuffer(const char* format, ...);
-
-
- void UnimplementedInstruction() {
- if (abort_on_unimplemented_) {
- UNIMPLEMENTED();
- } else {
- AppendToBuffer("'Unimplemented Instruction'");
- }
- }
-};
-
-
-void DisassemblerIA32::AppendToBuffer(const char* format, ...) {
- v8::internal::Vector<char> buf = tmp_buffer_ + tmp_buffer_pos_;
- va_list args;
- va_start(args, format);
- int result = v8::internal::OS::VSNPrintF(buf, format, args);
- va_end(args);
- tmp_buffer_pos_ += result;
-}
-
-int DisassemblerIA32::PrintRightOperandHelper(
- byte* modrmp,
- RegisterNameMapping direct_register_name) {
- int mod, regop, rm;
- get_modrm(*modrmp, &mod, &regop, &rm);
- RegisterNameMapping register_name = (mod == 3) ? direct_register_name :
- &DisassemblerIA32::NameOfCPURegister;
- switch (mod) {
- case 0:
- if (rm == ebp) {
- int32_t disp = *reinterpret_cast<int32_t*>(modrmp+1);
- AppendToBuffer("[0x%x]", disp);
- return 5;
- } else if (rm == esp) {
- byte sib = *(modrmp + 1);
- int scale, index, base;
- get_sib(sib, &scale, &index, &base);
- if (index == esp && base == esp && scale == 0 /*times_1*/) {
- AppendToBuffer("[%s]", (this->*register_name)(rm));
- return 2;
- } else if (base == ebp) {
- int32_t disp = *reinterpret_cast<int32_t*>(modrmp + 2);
- AppendToBuffer("[%s*%d+0x%x]",
- (this->*register_name)(index),
- 1 << scale,
- disp);
- return 6;
- } else if (index != esp && base != ebp) {
- // [base+index*scale]
- AppendToBuffer("[%s+%s*%d]",
- (this->*register_name)(base),
- (this->*register_name)(index),
- 1 << scale);
- return 2;
- } else {
- UnimplementedInstruction();
- return 1;
- }
- } else {
- AppendToBuffer("[%s]", (this->*register_name)(rm));
- return 1;
- }
- break;
- case 1: // fall through
- case 2:
- if (rm == esp) {
- byte sib = *(modrmp + 1);
- int scale, index, base;
- get_sib(sib, &scale, &index, &base);
- int disp =
- mod == 2 ? *reinterpret_cast<int32_t*>(modrmp + 2) : *(modrmp + 2);
- if (index == base && index == rm /*esp*/ && scale == 0 /*times_1*/) {
- AppendToBuffer("[%s+0x%x]", (this->*register_name)(rm), disp);
- } else {
- AppendToBuffer("[%s+%s*%d+0x%x]",
- (this->*register_name)(base),
- (this->*register_name)(index),
- 1 << scale,
- disp);
- }
- return mod == 2 ? 6 : 3;
- } else {
- // No sib.
- int disp =
- mod == 2 ? *reinterpret_cast<int32_t*>(modrmp + 1) : *(modrmp + 1);
- AppendToBuffer("[%s+0x%x]", (this->*register_name)(rm), disp);
- return mod == 2 ? 5 : 2;
- }
- break;
- case 3:
- AppendToBuffer("%s", (this->*register_name)(rm));
- return 1;
- default:
- UnimplementedInstruction();
- return 1;
- }
- UNREACHABLE();
-}
-
-
-int DisassemblerIA32::PrintRightOperand(byte* modrmp) {
- return PrintRightOperandHelper(modrmp, &DisassemblerIA32::NameOfCPURegister);
-}
-
-
-int DisassemblerIA32::PrintRightByteOperand(byte* modrmp) {
- return PrintRightOperandHelper(modrmp,
- &DisassemblerIA32::NameOfByteCPURegister);
-}
-
-
-int DisassemblerIA32::PrintRightXMMOperand(byte* modrmp) {
- return PrintRightOperandHelper(modrmp,
- &DisassemblerIA32::NameOfXMMRegister);
-}
-
-
-// Returns number of bytes used including the current *data.
-// Writes instruction's mnemonic, left and right operands to 'tmp_buffer_'.
-int DisassemblerIA32::PrintOperands(const char* mnem,
- OperandOrder op_order,
- byte* data) {
- byte modrm = *data;
- int mod, regop, rm;
- get_modrm(modrm, &mod, &regop, &rm);
- int advance = 0;
- switch (op_order) {
- case REG_OPER_OP_ORDER: {
- AppendToBuffer("%s %s,", mnem, NameOfCPURegister(regop));
- advance = PrintRightOperand(data);
- break;
- }
- case OPER_REG_OP_ORDER: {
- AppendToBuffer("%s ", mnem);
- advance = PrintRightOperand(data);
- AppendToBuffer(",%s", NameOfCPURegister(regop));
- break;
- }
- default:
- UNREACHABLE();
- break;
- }
- return advance;
-}
-
-
-// Returns number of bytes used by machine instruction, including *data byte.
-// Writes immediate instructions to 'tmp_buffer_'.
-int DisassemblerIA32::PrintImmediateOp(byte* data) {
- bool sign_extension_bit = (*data & 0x02) != 0;
- byte modrm = *(data+1);
- int mod, regop, rm;
- get_modrm(modrm, &mod, &regop, &rm);
- const char* mnem = "Imm???";
- switch (regop) {
- case 0: mnem = "add"; break;
- case 1: mnem = "or"; break;
- case 2: mnem = "adc"; break;
- case 4: mnem = "and"; break;
- case 5: mnem = "sub"; break;
- case 6: mnem = "xor"; break;
- case 7: mnem = "cmp"; break;
- default: UnimplementedInstruction();
- }
- AppendToBuffer("%s ", mnem);
- int count = PrintRightOperand(data+1);
- if (sign_extension_bit) {
- AppendToBuffer(",0x%x", *(data + 1 + count));
- return 1 + count + 1 /*int8*/;
- } else {
- AppendToBuffer(",0x%x", *reinterpret_cast<int32_t*>(data + 1 + count));
- return 1 + count + 4 /*int32_t*/;
- }
-}
-
-
-// Returns number of bytes used, including *data.
-int DisassemblerIA32::F7Instruction(byte* data) {
- ASSERT_EQ(0xF7, *data);
- byte modrm = *(data+1);
- int mod, regop, rm;
- get_modrm(modrm, &mod, &regop, &rm);
- if (mod == 3 && regop != 0) {
- const char* mnem = NULL;
- switch (regop) {
- case 2: mnem = "not"; break;
- case 3: mnem = "neg"; break;
- case 4: mnem = "mul"; break;
- case 5: mnem = "imul"; break;
- case 7: mnem = "idiv"; break;
- default: UnimplementedInstruction();
- }
- AppendToBuffer("%s %s", mnem, NameOfCPURegister(rm));
- return 2;
- } else if (mod == 3 && regop == eax) {
- int32_t imm = *reinterpret_cast<int32_t*>(data+2);
- AppendToBuffer("test %s,0x%x", NameOfCPURegister(rm), imm);
- return 6;
- } else if (regop == eax) {
- AppendToBuffer("test ");
- int count = PrintRightOperand(data+1);
- int32_t imm = *reinterpret_cast<int32_t*>(data+1+count);
- AppendToBuffer(",0x%x", imm);
- return 1+count+4 /*int32_t*/;
- } else {
- UnimplementedInstruction();
- return 2;
- }
-}
-
-int DisassemblerIA32::D1D3C1Instruction(byte* data) {
- byte op = *data;
- ASSERT(op == 0xD1 || op == 0xD3 || op == 0xC1);
- byte modrm = *(data+1);
- int mod, regop, rm;
- get_modrm(modrm, &mod, &regop, &rm);
- int imm8 = -1;
- int num_bytes = 2;
- if (mod == 3) {
- const char* mnem = NULL;
- switch (regop) {
- case kROL: mnem = "rol"; break;
- case kROR: mnem = "ror"; break;
- case kRCL: mnem = "rcl"; break;
- case kRCR: mnem = "rcr"; break;
- case kSHL: mnem = "shl"; break;
- case KSHR: mnem = "shr"; break;
- case kSAR: mnem = "sar"; break;
- default: UnimplementedInstruction();
- }
- if (op == 0xD1) {
- imm8 = 1;
- } else if (op == 0xC1) {
- imm8 = *(data+2);
- num_bytes = 3;
- } else if (op == 0xD3) {
- // Shift/rotate by cl.
- }
- ASSERT_NE(NULL, mnem);
- AppendToBuffer("%s %s,", mnem, NameOfCPURegister(rm));
- if (imm8 > 0) {
- AppendToBuffer("%d", imm8);
- } else {
- AppendToBuffer("cl");
- }
- } else {
- UnimplementedInstruction();
- }
- return num_bytes;
-}
-
-
-// Returns number of bytes used, including *data.
-int DisassemblerIA32::JumpShort(byte* data) {
- ASSERT_EQ(0xEB, *data);
- byte b = *(data+1);
- byte* dest = data + static_cast<int8_t>(b) + 2;
- AppendToBuffer("jmp %s", NameOfAddress(dest));
- return 2;
-}
-
-
-// Returns number of bytes used, including *data.
-int DisassemblerIA32::JumpConditional(byte* data, const char* comment) {
- ASSERT_EQ(0x0F, *data);
- byte cond = *(data+1) & 0x0F;
- byte* dest = data + *reinterpret_cast<int32_t*>(data+2) + 6;
- const char* mnem = jump_conditional_mnem[cond];
- AppendToBuffer("%s %s", mnem, NameOfAddress(dest));
- if (comment != NULL) {
- AppendToBuffer(", %s", comment);
- }
- return 6; // includes 0x0F
-}
-
-
-// Returns number of bytes used, including *data.
-int DisassemblerIA32::JumpConditionalShort(byte* data, const char* comment) {
- byte cond = *data & 0x0F;
- byte b = *(data+1);
- byte* dest = data + static_cast<int8_t>(b) + 2;
- const char* mnem = jump_conditional_mnem[cond];
- AppendToBuffer("%s %s", mnem, NameOfAddress(dest));
- if (comment != NULL) {
- AppendToBuffer(", %s", comment);
- }
- return 2;
-}
-
-
-// Returns number of bytes used, including *data.
-int DisassemblerIA32::SetCC(byte* data) {
- ASSERT_EQ(0x0F, *data);
- byte cond = *(data+1) & 0x0F;
- const char* mnem = set_conditional_mnem[cond];
- AppendToBuffer("%s ", mnem);
- PrintRightByteOperand(data+2);
- return 3; // Includes 0x0F.
-}
-
-
-// Returns number of bytes used, including *data.
-int DisassemblerIA32::CMov(byte* data) {
- ASSERT_EQ(0x0F, *data);
- byte cond = *(data + 1) & 0x0F;
- const char* mnem = conditional_move_mnem[cond];
- int op_size = PrintOperands(mnem, REG_OPER_OP_ORDER, data + 2);
- return 2 + op_size; // includes 0x0F
-}
-
-
-// Returns number of bytes used, including *data.
-int DisassemblerIA32::FPUInstruction(byte* data) {
- byte escape_opcode = *data;
- ASSERT_EQ(0xD8, escape_opcode & 0xF8);
- byte modrm_byte = *(data+1);
-
- if (modrm_byte >= 0xC0) {
- return RegisterFPUInstruction(escape_opcode, modrm_byte);
- } else {
- return MemoryFPUInstruction(escape_opcode, modrm_byte, data+1);
- }
-}
-
-int DisassemblerIA32::MemoryFPUInstruction(int escape_opcode,
- int modrm_byte,
- byte* modrm_start) {
- const char* mnem = "?";
- int regop = (modrm_byte >> 3) & 0x7; // reg/op field of modrm byte.
- switch (escape_opcode) {
- case 0xD9: switch (regop) {
- case 0: mnem = "fld_s"; break;
- case 3: mnem = "fstp_s"; break;
- case 7: mnem = "fstcw"; break;
- default: UnimplementedInstruction();
- }
- break;
-
- case 0xDB: switch (regop) {
- case 0: mnem = "fild_s"; break;
- case 1: mnem = "fisttp_s"; break;
- case 2: mnem = "fist_s"; break;
- case 3: mnem = "fistp_s"; break;
- default: UnimplementedInstruction();
- }
- break;
-
- case 0xDD: switch (regop) {
- case 0: mnem = "fld_d"; break;
- case 1: mnem = "fisttp_d"; break;
- case 2: mnem = "fst_d"; break;
- case 3: mnem = "fstp_d"; break;
- default: UnimplementedInstruction();
- }
- break;
-
- case 0xDF: switch (regop) {
- case 5: mnem = "fild_d"; break;
- case 7: mnem = "fistp_d"; break;
- default: UnimplementedInstruction();
- }
- break;
-
- default: UnimplementedInstruction();
- }
- AppendToBuffer("%s ", mnem);
- int count = PrintRightOperand(modrm_start);
- return count + 1;
-}
-
-int DisassemblerIA32::RegisterFPUInstruction(int escape_opcode,
- byte modrm_byte) {
- bool has_register = false; // Is the FPU register encoded in modrm_byte?
- const char* mnem = "?";
-
- switch (escape_opcode) {
- case 0xD8:
- UnimplementedInstruction();
- break;
-
- case 0xD9:
- switch (modrm_byte & 0xF8) {
- case 0xC0:
- mnem = "fld";
- has_register = true;
- break;
- case 0xC8:
- mnem = "fxch";
- has_register = true;
- break;
- default:
- switch (modrm_byte) {
- case 0xE0: mnem = "fchs"; break;
- case 0xE1: mnem = "fabs"; break;
- case 0xE4: mnem = "ftst"; break;
- case 0xE8: mnem = "fld1"; break;
- case 0xEB: mnem = "fldpi"; break;
- case 0xED: mnem = "fldln2"; break;
- case 0xEE: mnem = "fldz"; break;
- case 0xF0: mnem = "f2xm1"; break;
- case 0xF1: mnem = "fyl2x"; break;
- case 0xF5: mnem = "fprem1"; break;
- case 0xF7: mnem = "fincstp"; break;
- case 0xF8: mnem = "fprem"; break;
- case 0xFC: mnem = "frndint"; break;
- case 0xFD: mnem = "fscale"; break;
- case 0xFE: mnem = "fsin"; break;
- case 0xFF: mnem = "fcos"; break;
- default: UnimplementedInstruction();
- }
- }
- break;
-
- case 0xDA:
- if (modrm_byte == 0xE9) {
- mnem = "fucompp";
- } else {
- UnimplementedInstruction();
- }
- break;
-
- case 0xDB:
- if ((modrm_byte & 0xF8) == 0xE8) {
- mnem = "fucomi";
- has_register = true;
- } else if (modrm_byte == 0xE2) {
- mnem = "fclex";
- } else if (modrm_byte == 0xE3) {
- mnem = "fninit";
- } else {
- UnimplementedInstruction();
- }
- break;
-
- case 0xDC:
- has_register = true;
- switch (modrm_byte & 0xF8) {
- case 0xC0: mnem = "fadd"; break;
- case 0xE8: mnem = "fsub"; break;
- case 0xC8: mnem = "fmul"; break;
- case 0xF8: mnem = "fdiv"; break;
- default: UnimplementedInstruction();
- }
- break;
-
- case 0xDD:
- has_register = true;
- switch (modrm_byte & 0xF8) {
- case 0xC0: mnem = "ffree"; break;
- case 0xD8: mnem = "fstp"; break;
- default: UnimplementedInstruction();
- }
- break;
-
- case 0xDE:
- if (modrm_byte == 0xD9) {
- mnem = "fcompp";
- } else {
- has_register = true;
- switch (modrm_byte & 0xF8) {
- case 0xC0: mnem = "faddp"; break;
- case 0xE8: mnem = "fsubp"; break;
- case 0xC8: mnem = "fmulp"; break;
- case 0xF8: mnem = "fdivp"; break;
- default: UnimplementedInstruction();
- }
- }
- break;
-
- case 0xDF:
- if (modrm_byte == 0xE0) {
- mnem = "fnstsw_ax";
- } else if ((modrm_byte & 0xF8) == 0xE8) {
- mnem = "fucomip";
- has_register = true;
- }
- break;
-
- default: UnimplementedInstruction();
- }
-
- if (has_register) {
- AppendToBuffer("%s st%d", mnem, modrm_byte & 0x7);
- } else {
- AppendToBuffer("%s", mnem);
- }
- return 2;
-}
-
-
-// Mnemonics for instructions 0xF0 byte.
-// Returns NULL if the instruction is not handled here.
-static const char* F0Mnem(byte f0byte) {
- switch (f0byte) {
- case 0x18: return "prefetch";
- case 0xA2: return "cpuid";
- case 0x31: return "rdtsc";
- case 0xBE: return "movsx_b";
- case 0xBF: return "movsx_w";
- case 0xB6: return "movzx_b";
- case 0xB7: return "movzx_w";
- case 0xAF: return "imul";
- case 0xA5: return "shld";
- case 0xAD: return "shrd";
- case 0xAC: return "shrd"; // 3-operand version.
- case 0xAB: return "bts";
- default: return NULL;
- }
-}
-
-
-// Disassembled instruction '*instr' and writes it into 'out_buffer'.
-int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
- byte* instr) {
- tmp_buffer_pos_ = 0; // starting to write as position 0
- byte* data = instr;
- // Check for hints.
- const char* branch_hint = NULL;
- // We use these two prefixes only with branch prediction
- if (*data == 0x3E /*ds*/) {
- branch_hint = "predicted taken";
- data++;
- } else if (*data == 0x2E /*cs*/) {
- branch_hint = "predicted not taken";
- data++;
- }
- bool processed = true; // Will be set to false if the current instruction
- // is not in 'instructions' table.
- const InstructionDesc& idesc = instruction_table_->Get(*data);
- switch (idesc.type) {
- case ZERO_OPERANDS_INSTR:
- AppendToBuffer(idesc.mnem);
- data++;
- break;
-
- case TWO_OPERANDS_INSTR:
- data++;
- data += PrintOperands(idesc.mnem, idesc.op_order_, data);
- break;
-
- case JUMP_CONDITIONAL_SHORT_INSTR:
- data += JumpConditionalShort(data, branch_hint);
- break;
-
- case REGISTER_INSTR:
- AppendToBuffer("%s %s", idesc.mnem, NameOfCPURegister(*data & 0x07));
- data++;
- break;
-
- case MOVE_REG_INSTR: {
- byte* addr = reinterpret_cast<byte*>(*reinterpret_cast<int32_t*>(data+1));
- AppendToBuffer("mov %s,%s",
- NameOfCPURegister(*data & 0x07),
- NameOfAddress(addr));
- data += 5;
- break;
- }
-
- case CALL_JUMP_INSTR: {
- byte* addr = data + *reinterpret_cast<int32_t*>(data+1) + 5;
- AppendToBuffer("%s %s", idesc.mnem, NameOfAddress(addr));
- data += 5;
- break;
- }
-
- case SHORT_IMMEDIATE_INSTR: {
- byte* addr = reinterpret_cast<byte*>(*reinterpret_cast<int32_t*>(data+1));
- AppendToBuffer("%s eax, %s", idesc.mnem, NameOfAddress(addr));
- data += 5;
- break;
- }
-
- case BYTE_IMMEDIATE_INSTR: {
- AppendToBuffer("%s al, 0x%x", idesc.mnem, data[1]);
- data += 2;
- break;
- }
-
- case NO_INSTR:
- processed = false;
- break;
-
- default:
- UNIMPLEMENTED(); // This type is not implemented.
- }
- //----------------------------
- if (!processed) {
- switch (*data) {
- case 0xC2:
- AppendToBuffer("ret 0x%x", *reinterpret_cast<uint16_t*>(data+1));
- data += 3;
- break;
-
- case 0x69: // fall through
- case 0x6B:
- { int mod, regop, rm;
- get_modrm(*(data+1), &mod, &regop, &rm);
- int32_t imm =
- *data == 0x6B ? *(data+2) : *reinterpret_cast<int32_t*>(data+2);
- AppendToBuffer("imul %s,%s,0x%x",
- NameOfCPURegister(regop),
- NameOfCPURegister(rm),
- imm);
- data += 2 + (*data == 0x6B ? 1 : 4);
- }
- break;
-
- case 0xF6:
- { data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- if (regop == eax) {
- AppendToBuffer("test_b ");
- data += PrintRightByteOperand(data);
- int32_t imm = *data;
- AppendToBuffer(",0x%x", imm);
- data++;
- } else {
- UnimplementedInstruction();
- }
- }
- break;
-
- case 0x81: // fall through
- case 0x83: // 0x81 with sign extension bit set
- data += PrintImmediateOp(data);
- break;
-
- case 0x0F:
- { byte f0byte = data[1];
- const char* f0mnem = F0Mnem(f0byte);
- if (f0byte == 0x18) {
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- const char* suffix[] = {"nta", "1", "2", "3"};
- AppendToBuffer("%s%s ", f0mnem, suffix[regop & 0x03]);
- data += PrintRightOperand(data);
- } else if (f0byte == 0x1F && data[2] == 0) {
- AppendToBuffer("nop"); // 3 byte nop.
- data += 3;
- } else if (f0byte == 0x1F && data[2] == 0x40 && data[3] == 0) {
- AppendToBuffer("nop"); // 4 byte nop.
- data += 4;
- } else if (f0byte == 0x1F && data[2] == 0x44 && data[3] == 0 &&
- data[4] == 0) {
- AppendToBuffer("nop"); // 5 byte nop.
- data += 5;
- } else if (f0byte == 0x1F && data[2] == 0x80 && data[3] == 0 &&
- data[4] == 0 && data[5] == 0 && data[6] == 0) {
- AppendToBuffer("nop"); // 7 byte nop.
- data += 7;
- } else if (f0byte == 0x1F && data[2] == 0x84 && data[3] == 0 &&
- data[4] == 0 && data[5] == 0 && data[6] == 0 &&
- data[7] == 0) {
- AppendToBuffer("nop"); // 8 byte nop.
- data += 8;
- } else if (f0byte == 0xA2 || f0byte == 0x31) {
- AppendToBuffer("%s", f0mnem);
- data += 2;
- } else if (f0byte == 0x28) {
- data += 2;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("movaps %s,%s",
- NameOfXMMRegister(regop),
- NameOfXMMRegister(rm));
- data++;
- } else if (f0byte == 0x57) {
- data += 2;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("xorps %s,%s",
- NameOfXMMRegister(regop),
- NameOfXMMRegister(rm));
- data++;
- } else if (f0byte == 0x50) {
- data += 2;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("movmskps %s,%s",
- NameOfCPURegister(regop),
- NameOfXMMRegister(rm));
- data++;
- } else if ((f0byte & 0xF0) == 0x80) {
- data += JumpConditional(data, branch_hint);
- } else if (f0byte == 0xBE || f0byte == 0xBF || f0byte == 0xB6 ||
- f0byte == 0xB7 || f0byte == 0xAF) {
- data += 2;
- data += PrintOperands(f0mnem, REG_OPER_OP_ORDER, data);
- } else if ((f0byte & 0xF0) == 0x90) {
- data += SetCC(data);
- } else if ((f0byte & 0xF0) == 0x40) {
- data += CMov(data);
- } else {
- data += 2;
- if (f0byte == 0xAB || f0byte == 0xA5 || f0byte == 0xAD) {
- // shrd, shld, bts
- AppendToBuffer("%s ", f0mnem);
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- data += PrintRightOperand(data);
- if (f0byte == 0xAB) {
- AppendToBuffer(",%s", NameOfCPURegister(regop));
- } else {
- AppendToBuffer(",%s,cl", NameOfCPURegister(regop));
- }
- } else {
- UnimplementedInstruction();
- }
- }
- }
- break;
-
- case 0x8F:
- { data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- if (regop == eax) {
- AppendToBuffer("pop ");
- data += PrintRightOperand(data);
- }
- }
- break;
-
- case 0xFF:
- { data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- const char* mnem = NULL;
- switch (regop) {
- case esi: mnem = "push"; break;
- case eax: mnem = "inc"; break;
- case ecx: mnem = "dec"; break;
- case edx: mnem = "call"; break;
- case esp: mnem = "jmp"; break;
- default: mnem = "???";
- }
- AppendToBuffer("%s ", mnem);
- data += PrintRightOperand(data);
- }
- break;
-
- case 0xC7: // imm32, fall through
- case 0xC6: // imm8
- { bool is_byte = *data == 0xC6;
- data++;
- if (is_byte) {
- AppendToBuffer("%s ", "mov_b");
- data += PrintRightByteOperand(data);
- int32_t imm = *data;
- AppendToBuffer(",0x%x", imm);
- data++;
- } else {
- AppendToBuffer("%s ", "mov");
- data += PrintRightOperand(data);
- int32_t imm = *reinterpret_cast<int32_t*>(data);
- AppendToBuffer(",0x%x", imm);
- data += 4;
- }
- }
- break;
-
- case 0x80:
- { data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- const char* mnem = NULL;
- switch (regop) {
- case 5: mnem = "subb"; break;
- case 7: mnem = "cmpb"; break;
- default: UnimplementedInstruction();
- }
- AppendToBuffer("%s ", mnem);
- data += PrintRightByteOperand(data);
- int32_t imm = *data;
- AppendToBuffer(",0x%x", imm);
- data++;
- }
- break;
-
- case 0x88: // 8bit, fall through
- case 0x89: // 32bit
- { bool is_byte = *data == 0x88;
- int mod, regop, rm;
- data++;
- get_modrm(*data, &mod, &regop, &rm);
- if (is_byte) {
- AppendToBuffer("%s ", "mov_b");
- data += PrintRightByteOperand(data);
- AppendToBuffer(",%s", NameOfByteCPURegister(regop));
- } else {
- AppendToBuffer("%s ", "mov");
- data += PrintRightOperand(data);
- AppendToBuffer(",%s", NameOfCPURegister(regop));
- }
- }
- break;
-
- case 0x66: // prefix
- while (*data == 0x66) data++;
- if (*data == 0xf && data[1] == 0x1f) {
- AppendToBuffer("nop"); // 0x66 prefix
- } else if (*data == 0x90) {
- AppendToBuffer("nop"); // 0x66 prefix
- } else if (*data == 0x8B) {
- data++;
- data += PrintOperands("mov_w", REG_OPER_OP_ORDER, data);
- } else if (*data == 0x89) {
- data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("mov_w ");
- data += PrintRightOperand(data);
- AppendToBuffer(",%s", NameOfCPURegister(regop));
- } else if (*data == 0x0F) {
- data++;
- if (*data == 0x38) {
- data++;
- if (*data == 0x17) {
- data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("ptest %s,%s",
- NameOfXMMRegister(regop),
- NameOfXMMRegister(rm));
- data++;
- } else if (*data == 0x2A) {
- // movntdqa
- data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("movntdqa %s,", NameOfXMMRegister(regop));
- data += PrintRightOperand(data);
- } else {
- UnimplementedInstruction();
- }
- } else if (*data == 0x3A) {
- data++;
- if (*data == 0x0B) {
- data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- int8_t imm8 = static_cast<int8_t>(data[1]);
- AppendToBuffer("roundsd %s,%s,%d",
- NameOfXMMRegister(regop),
- NameOfXMMRegister(rm),
- static_cast<int>(imm8));
- data += 2;
- } else if (*data == 0x16) {
- data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- int8_t imm8 = static_cast<int8_t>(data[1]);
- AppendToBuffer("pextrd %s,%s,%d",
- NameOfCPURegister(regop),
- NameOfXMMRegister(rm),
- static_cast<int>(imm8));
- data += 2;
- } else if (*data == 0x17) {
- data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- int8_t imm8 = static_cast<int8_t>(data[1]);
- AppendToBuffer("extractps %s,%s,%d",
- NameOfCPURegister(regop),
- NameOfXMMRegister(rm),
- static_cast<int>(imm8));
- data += 2;
- } else if (*data == 0x22) {
- data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- int8_t imm8 = static_cast<int8_t>(data[1]);
- AppendToBuffer("pinsrd %s,%s,%d",
- NameOfXMMRegister(regop),
- NameOfCPURegister(rm),
- static_cast<int>(imm8));
- data += 2;
- } else {
- UnimplementedInstruction();
- }
- } else if (*data == 0x2E || *data == 0x2F) {
- const char* mnem = (*data == 0x2E) ? "ucomisd" : "comisd";
- data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- if (mod == 0x3) {
- AppendToBuffer("%s %s,%s", mnem,
- NameOfXMMRegister(regop),
- NameOfXMMRegister(rm));
- data++;
- } else {
- AppendToBuffer("%s %s,", mnem, NameOfXMMRegister(regop));
- data += PrintRightOperand(data);
- }
- } else if (*data == 0x50) {
- data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("movmskpd %s,%s",
- NameOfCPURegister(regop),
- NameOfXMMRegister(rm));
- data++;
- } else if (*data == 0x54) {
- data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("andpd %s,%s",
- NameOfXMMRegister(regop),
- NameOfXMMRegister(rm));
- data++;
- } else if (*data == 0x56) {
- data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("orpd %s,%s",
- NameOfXMMRegister(regop),
- NameOfXMMRegister(rm));
- data++;
- } else if (*data == 0x57) {
- data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("xorpd %s,%s",
- NameOfXMMRegister(regop),
- NameOfXMMRegister(rm));
- data++;
- } else if (*data == 0x6E) {
- data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("movd %s,", NameOfXMMRegister(regop));
- data += PrintRightOperand(data);
- } else if (*data == 0x6F) {
- data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("movdqa %s,", NameOfXMMRegister(regop));
- data += PrintRightXMMOperand(data);
- } else if (*data == 0x70) {
- data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- int8_t imm8 = static_cast<int8_t>(data[1]);
- AppendToBuffer("pshufd %s,%s,%d",
- NameOfXMMRegister(regop),
- NameOfXMMRegister(rm),
- static_cast<int>(imm8));
- data += 2;
- } else if (*data == 0x76) {
- data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("pcmpeqd %s,%s",
- NameOfXMMRegister(regop),
- NameOfXMMRegister(rm));
- data++;
- } else if (*data == 0x90) {
- data++;
- AppendToBuffer("nop"); // 2 byte nop.
- } else if (*data == 0xF3) {
- data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("psllq %s,%s",
- NameOfXMMRegister(regop),
- NameOfXMMRegister(rm));
- data++;
- } else if (*data == 0x73) {
- data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- int8_t imm8 = static_cast<int8_t>(data[1]);
- ASSERT(regop == esi || regop == edx);
- AppendToBuffer("%s %s,%d",
- (regop == esi) ? "psllq" : "psrlq",
- NameOfXMMRegister(rm),
- static_cast<int>(imm8));
- data += 2;
- } else if (*data == 0xD3) {
- data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("psrlq %s,%s",
- NameOfXMMRegister(regop),
- NameOfXMMRegister(rm));
- data++;
- } else if (*data == 0x7F) {
- AppendToBuffer("movdqa ");
- data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- data += PrintRightXMMOperand(data);
- AppendToBuffer(",%s", NameOfXMMRegister(regop));
- } else if (*data == 0x7E) {
- data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("movd ");
- data += PrintRightOperand(data);
- AppendToBuffer(",%s", NameOfXMMRegister(regop));
- } else if (*data == 0xDB) {
- data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("pand %s,%s",
- NameOfXMMRegister(regop),
- NameOfXMMRegister(rm));
- data++;
- } else if (*data == 0xE7) {
- data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- if (mod == 3) {
- AppendToBuffer("movntdq ");
- data += PrintRightOperand(data);
- AppendToBuffer(",%s", NameOfXMMRegister(regop));
- } else {
- UnimplementedInstruction();
- }
- } else if (*data == 0xEF) {
- data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("pxor %s,%s",
- NameOfXMMRegister(regop),
- NameOfXMMRegister(rm));
- data++;
- } else if (*data == 0xEB) {
- data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("por %s,%s",
- NameOfXMMRegister(regop),
- NameOfXMMRegister(rm));
- data++;
- } else {
- UnimplementedInstruction();
- }
- } else {
- UnimplementedInstruction();
- }
- break;
-
- case 0xFE:
- { data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- if (regop == ecx) {
- AppendToBuffer("dec_b ");
- data += PrintRightOperand(data);
- } else {
- UnimplementedInstruction();
- }
- }
- break;
-
- case 0x68:
- AppendToBuffer("push 0x%x", *reinterpret_cast<int32_t*>(data+1));
- data += 5;
- break;
-
- case 0x6A:
- AppendToBuffer("push 0x%x", *reinterpret_cast<int8_t*>(data + 1));
- data += 2;
- break;
-
- case 0xA8:
- AppendToBuffer("test al,0x%x", *reinterpret_cast<uint8_t*>(data+1));
- data += 2;
- break;
-
- case 0xA9:
- AppendToBuffer("test eax,0x%x", *reinterpret_cast<int32_t*>(data+1));
- data += 5;
- break;
-
- case 0xD1: // fall through
- case 0xD3: // fall through
- case 0xC1:
- data += D1D3C1Instruction(data);
- break;
-
- case 0xD9: // fall through
- case 0xDA: // fall through
- case 0xDB: // fall through
- case 0xDC: // fall through
- case 0xDD: // fall through
- case 0xDE: // fall through
- case 0xDF:
- data += FPUInstruction(data);
- break;
-
- case 0xEB:
- data += JumpShort(data);
- break;
-
- case 0xF2:
- if (*(data+1) == 0x0F) {
- byte b2 = *(data+2);
- if (b2 == 0x11) {
- AppendToBuffer("movsd ");
- data += 3;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- data += PrintRightXMMOperand(data);
- AppendToBuffer(",%s", NameOfXMMRegister(regop));
- } else if (b2 == 0x10) {
- data += 3;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("movsd %s,", NameOfXMMRegister(regop));
- data += PrintRightXMMOperand(data);
- } else if (b2 == 0x5A) {
- data += 3;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("cvtsd2ss %s,", NameOfXMMRegister(regop));
- data += PrintRightXMMOperand(data);
- } else {
- const char* mnem = "?";
- switch (b2) {
- case 0x2A: mnem = "cvtsi2sd"; break;
- case 0x2C: mnem = "cvttsd2si"; break;
- case 0x2D: mnem = "cvtsd2si"; break;
- case 0x51: mnem = "sqrtsd"; break;
- case 0x58: mnem = "addsd"; break;
- case 0x59: mnem = "mulsd"; break;
- case 0x5C: mnem = "subsd"; break;
- case 0x5E: mnem = "divsd"; break;
- }
- data += 3;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- if (b2 == 0x2A) {
- AppendToBuffer("%s %s,", mnem, NameOfXMMRegister(regop));
- data += PrintRightOperand(data);
- } else if (b2 == 0x2C || b2 == 0x2D) {
- AppendToBuffer("%s %s,", mnem, NameOfCPURegister(regop));
- data += PrintRightXMMOperand(data);
- } else if (b2 == 0xC2) {
- // Intel manual 2A, Table 3-18.
- const char* const pseudo_op[] = {
- "cmpeqsd",
- "cmpltsd",
- "cmplesd",
- "cmpunordsd",
- "cmpneqsd",
- "cmpnltsd",
- "cmpnlesd",
- "cmpordsd"
- };
- AppendToBuffer("%s %s,%s",
- pseudo_op[data[1]],
- NameOfXMMRegister(regop),
- NameOfXMMRegister(rm));
- data += 2;
- } else {
- AppendToBuffer("%s %s,", mnem, NameOfXMMRegister(regop));
- data += PrintRightXMMOperand(data);
- }
- }
- } else {
- UnimplementedInstruction();
- }
- break;
-
- case 0xF3:
- if (*(data+1) == 0x0F) {
- byte b2 = *(data+2);
- if (b2 == 0x11) {
- AppendToBuffer("movss ");
- data += 3;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- data += PrintRightXMMOperand(data);
- AppendToBuffer(",%s", NameOfXMMRegister(regop));
- } else if (b2 == 0x10) {
- data += 3;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("movss %s,", NameOfXMMRegister(regop));
- data += PrintRightXMMOperand(data);
- } else if (b2 == 0x2C) {
- data += 3;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("cvttss2si %s,", NameOfCPURegister(regop));
- data += PrintRightXMMOperand(data);
- } else if (b2 == 0x5A) {
- data += 3;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("cvtss2sd %s,", NameOfXMMRegister(regop));
- data += PrintRightXMMOperand(data);
- } else if (b2 == 0x6F) {
- data += 3;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("movdqu %s,", NameOfXMMRegister(regop));
- data += PrintRightXMMOperand(data);
- } else if (b2 == 0x7F) {
- AppendToBuffer("movdqu ");
- data += 3;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- data += PrintRightXMMOperand(data);
- AppendToBuffer(",%s", NameOfXMMRegister(regop));
- } else {
- UnimplementedInstruction();
- }
- } else if (*(data+1) == 0xA5) {
- data += 2;
- AppendToBuffer("rep_movs");
- } else if (*(data+1) == 0xAB) {
- data += 2;
- AppendToBuffer("rep_stos");
- } else {
- UnimplementedInstruction();
- }
- break;
-
- case 0xF7:
- data += F7Instruction(data);
- break;
-
- default:
- UnimplementedInstruction();
- }
- }
-
- if (tmp_buffer_pos_ < sizeof tmp_buffer_) {
- tmp_buffer_[tmp_buffer_pos_] = '\0';
- }
-
- int instr_len = data - instr;
- if (instr_len == 0) {
- printf("%02x", *data);
- }
- ASSERT(instr_len > 0); // Ensure progress.
-
- int outp = 0;
- // Instruction bytes.
- for (byte* bp = instr; bp < data; bp++) {
- outp += v8::internal::OS::SNPrintF(out_buffer + outp,
- "%02x",
- *bp);
- }
- for (int i = 6 - instr_len; i >= 0; i--) {
- outp += v8::internal::OS::SNPrintF(out_buffer + outp,
- " ");
- }
-
- outp += v8::internal::OS::SNPrintF(out_buffer + outp,
- " %s",
- tmp_buffer_.start());
- return instr_len;
-} // NOLINT (function is too long)
-
-
-//------------------------------------------------------------------------------
-
-
-static const char* cpu_regs[8] = {
- "eax", "ecx", "edx", "ebx", "esp", "ebp", "esi", "edi"
-};
-
-
-static const char* byte_cpu_regs[8] = {
- "al", "cl", "dl", "bl", "ah", "ch", "dh", "bh"
-};
-
-
-static const char* xmm_regs[8] = {
- "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7"
-};
-
-
-const char* NameConverter::NameOfAddress(byte* addr) const {
- v8::internal::OS::SNPrintF(tmp_buffer_, "%p", addr);
- return tmp_buffer_.start();
-}
-
-
-const char* NameConverter::NameOfConstant(byte* addr) const {
- return NameOfAddress(addr);
-}
-
-
-const char* NameConverter::NameOfCPURegister(int reg) const {
- if (0 <= reg && reg < 8) return cpu_regs[reg];
- return "noreg";
-}
-
-
-const char* NameConverter::NameOfByteCPURegister(int reg) const {
- if (0 <= reg && reg < 8) return byte_cpu_regs[reg];
- return "noreg";
-}
-
-
-const char* NameConverter::NameOfXMMRegister(int reg) const {
- if (0 <= reg && reg < 8) return xmm_regs[reg];
- return "noxmmreg";
-}
-
-
-const char* NameConverter::NameInCode(byte* addr) const {
- // IA32 does not embed debug strings at the moment.
- UNREACHABLE();
- return "";
-}
-
-
-//------------------------------------------------------------------------------
-
-Disassembler::Disassembler(const NameConverter& converter)
- : converter_(converter) {}
-
-
-Disassembler::~Disassembler() {}
-
-
-int Disassembler::InstructionDecode(v8::internal::Vector<char> buffer,
- byte* instruction) {
- DisassemblerIA32 d(converter_, false /*do not crash if unimplemented*/);
- return d.InstructionDecode(buffer, instruction);
-}
-
-
-// The IA-32 assembler does not currently use constant pools.
-int Disassembler::ConstantPoolSizeAt(byte* instruction) { return -1; }
-
-
-/*static*/ void Disassembler::Disassemble(FILE* f, byte* begin, byte* end) {
- NameConverter converter;
- Disassembler d(converter);
- for (byte* pc = begin; pc < end;) {
- v8::internal::EmbeddedVector<char, 128> buffer;
- buffer[0] = '\0';
- byte* prev_pc = pc;
- pc += d.InstructionDecode(buffer, pc);
- fprintf(f, "%p", prev_pc);
- fprintf(f, " ");
-
- for (byte* bp = prev_pc; bp < pc; bp++) {
- fprintf(f, "%02x", *bp);
- }
- for (int i = 6 - (pc - prev_pc); i >= 0; i--) {
- fprintf(f, " ");
- }
- fprintf(f, " %s\n", buffer.start());
- }
-}
-
-
-} // namespace disasm
-
-#endif // V8_TARGET_ARCH_IA32
diff --git a/src/3rdparty/v8/src/ia32/frames-ia32.cc b/src/3rdparty/v8/src/ia32/frames-ia32.cc
deleted file mode 100644
index dd44f0e..0000000
--- a/src/3rdparty/v8/src/ia32/frames-ia32.cc
+++ /dev/null
@@ -1,45 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_IA32)
-
-#include "frames-inl.h"
-
-namespace v8 {
-namespace internal {
-
-
-Address ExitFrame::ComputeStackPointer(Address fp) {
- return Memory::Address_at(fp + ExitFrameConstants::kSPOffset);
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_IA32
diff --git a/src/3rdparty/v8/src/ia32/frames-ia32.h b/src/3rdparty/v8/src/ia32/frames-ia32.h
deleted file mode 100644
index 5bd102a..0000000
--- a/src/3rdparty/v8/src/ia32/frames-ia32.h
+++ /dev/null
@@ -1,137 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_IA32_FRAMES_IA32_H_
-#define V8_IA32_FRAMES_IA32_H_
-
-namespace v8 {
-namespace internal {
-
-
-// Register lists
-// Note that the bit values must match those used in actual instruction encoding
-const int kNumRegs = 8;
-
-
-// Caller-saved registers
-const RegList kJSCallerSaved =
- 1 << 0 | // eax
- 1 << 1 | // ecx
- 1 << 2 | // edx
- 1 << 3 | // ebx - used as a caller-saved register in JavaScript code
- 1 << 7; // edi - callee function
-
-const int kNumJSCallerSaved = 5;
-
-typedef Object* JSCallerSavedBuffer[kNumJSCallerSaved];
-
-
-// Number of registers for which space is reserved in safepoints.
-const int kNumSafepointRegisters = 8;
-
-const int kNoAlignmentPadding = 0;
-const int kAlignmentPaddingPushed = 2;
-const int kAlignmentZapValue = 0x12345678; // Not heap object tagged.
-
-// ----------------------------------------------------
-
-
-class StackHandlerConstants : public AllStatic {
- public:
- static const int kNextOffset = 0 * kPointerSize;
- static const int kCodeOffset = 1 * kPointerSize;
- static const int kStateOffset = 2 * kPointerSize;
- static const int kContextOffset = 3 * kPointerSize;
- static const int kFPOffset = 4 * kPointerSize;
-
- static const int kSize = kFPOffset + kPointerSize;
-};
-
-
-class EntryFrameConstants : public AllStatic {
- public:
- static const int kCallerFPOffset = -6 * kPointerSize;
-
- static const int kFunctionArgOffset = +3 * kPointerSize;
- static const int kReceiverArgOffset = +4 * kPointerSize;
- static const int kArgcOffset = +5 * kPointerSize;
- static const int kArgvOffset = +6 * kPointerSize;
-};
-
-
-class ExitFrameConstants : public AllStatic {
- public:
- static const int kCodeOffset = -2 * kPointerSize;
- static const int kSPOffset = -1 * kPointerSize;
-
- static const int kCallerFPOffset = 0 * kPointerSize;
- static const int kCallerPCOffset = +1 * kPointerSize;
-
- // FP-relative displacement of the caller's SP. It points just
- // below the saved PC.
- static const int kCallerSPDisplacement = +2 * kPointerSize;
-};
-
-
-class JavaScriptFrameConstants : public AllStatic {
- public:
- // FP-relative.
- static const int kLocal0Offset = StandardFrameConstants::kExpressionsOffset;
- static const int kLastParameterOffset = +2 * kPointerSize;
- static const int kFunctionOffset = StandardFrameConstants::kMarkerOffset;
-
- // Caller SP-relative.
- static const int kParam0Offset = -2 * kPointerSize;
- static const int kReceiverOffset = -1 * kPointerSize;
-
- static const int kDynamicAlignmentStateOffset = kLocal0Offset;
-};
-
-
-class ArgumentsAdaptorFrameConstants : public AllStatic {
- public:
- static const int kLengthOffset = StandardFrameConstants::kExpressionsOffset;
- static const int kFrameSize =
- StandardFrameConstants::kFixedFrameSize + kPointerSize;
-};
-
-
-class InternalFrameConstants : public AllStatic {
- public:
- static const int kCodeOffset = StandardFrameConstants::kExpressionsOffset;
-};
-
-
-inline Object* JavaScriptFrame::function_slot_object() const {
- const int offset = JavaScriptFrameConstants::kFunctionOffset;
- return Memory::Object_at(fp() + offset);
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_IA32_FRAMES_IA32_H_
diff --git a/src/3rdparty/v8/src/ia32/full-codegen-ia32.cc b/src/3rdparty/v8/src/ia32/full-codegen-ia32.cc
deleted file mode 100644
index 733d977..0000000
--- a/src/3rdparty/v8/src/ia32/full-codegen-ia32.cc
+++ /dev/null
@@ -1,4595 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_IA32)
-
-#include "code-stubs.h"
-#include "codegen.h"
-#include "compiler.h"
-#include "debug.h"
-#include "full-codegen.h"
-#include "isolate-inl.h"
-#include "parser.h"
-#include "scopes.h"
-#include "stub-cache.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm_)
-
-
-class JumpPatchSite BASE_EMBEDDED {
- public:
- explicit JumpPatchSite(MacroAssembler* masm) : masm_(masm) {
-#ifdef DEBUG
- info_emitted_ = false;
-#endif
- }
-
- ~JumpPatchSite() {
- ASSERT(patch_site_.is_bound() == info_emitted_);
- }
-
- void EmitJumpIfNotSmi(Register reg,
- Label* target,
- Label::Distance distance = Label::kFar) {
- __ test(reg, Immediate(kSmiTagMask));
- EmitJump(not_carry, target, distance); // Always taken before patched.
- }
-
- void EmitJumpIfSmi(Register reg,
- Label* target,
- Label::Distance distance = Label::kFar) {
- __ test(reg, Immediate(kSmiTagMask));
- EmitJump(carry, target, distance); // Never taken before patched.
- }
-
- void EmitPatchInfo() {
- if (patch_site_.is_bound()) {
- int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(&patch_site_);
- ASSERT(is_int8(delta_to_patch_site));
- __ test(eax, Immediate(delta_to_patch_site));
-#ifdef DEBUG
- info_emitted_ = true;
-#endif
- } else {
- __ nop(); // Signals no inlined code.
- }
- }
-
- private:
- // jc will be patched with jz, jnc will become jnz.
- void EmitJump(Condition cc, Label* target, Label::Distance distance) {
- ASSERT(!patch_site_.is_bound() && !info_emitted_);
- ASSERT(cc == carry || cc == not_carry);
- __ bind(&patch_site_);
- __ j(cc, target, distance);
- }
-
- MacroAssembler* masm_;
- Label patch_site_;
-#ifdef DEBUG
- bool info_emitted_;
-#endif
-};
-
-
-// Generate code for a JS function. On entry to the function the receiver
-// and arguments have been pushed on the stack left to right, with the
-// return address on top of them. The actual argument count matches the
-// formal parameter count expected by the function.
-//
-// The live registers are:
-// o edi: the JS function object being called (i.e. ourselves)
-// o esi: our context
-// o ebp: our caller's frame pointer
-// o esp: stack pointer (pointing to return address)
-//
-// The function builds a JS frame. Please see JavaScriptFrameConstants in
-// frames-ia32.h for its layout.
-void FullCodeGenerator::Generate() {
- CompilationInfo* info = info_;
- handler_table_ =
- isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED);
- profiling_counter_ = isolate()->factory()->NewJSGlobalPropertyCell(
- Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate()));
- SetFunctionPosition(function());
- Comment cmnt(masm_, "[ function compiled by full code generator");
-
- ProfileEntryHookStub::MaybeCallEntryHook(masm_);
-
-#ifdef DEBUG
- if (strlen(FLAG_stop_at) > 0 &&
- info->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
- __ int3();
- }
-#endif
-
- // Strict mode functions and builtins need to replace the receiver
- // with undefined when called as functions (without an explicit
- // receiver object). ecx is zero for method calls and non-zero for
- // function calls.
- if (!info->is_classic_mode() || info->is_native()) {
- Label ok;
- __ test(ecx, ecx);
- __ j(zero, &ok, Label::kNear);
- // +1 for return address.
- int receiver_offset = (info->scope()->num_parameters() + 1) * kPointerSize;
- __ mov(ecx, Operand(esp, receiver_offset));
- __ JumpIfSmi(ecx, &ok);
- __ CmpObjectType(ecx, JS_GLOBAL_PROXY_TYPE, ecx);
- __ j(not_equal, &ok, Label::kNear);
- __ mov(Operand(esp, receiver_offset),
- Immediate(isolate()->factory()->undefined_value()));
- __ bind(&ok);
- }
-
- // Open a frame scope to indicate that there is a frame on the stack. The
- // MANUAL indicates that the scope shouldn't actually generate code to set up
- // the frame (that is done below).
- FrameScope frame_scope(masm_, StackFrame::MANUAL);
-
- info->set_prologue_offset(masm_->pc_offset());
- __ push(ebp); // Caller's frame pointer.
- __ mov(ebp, esp);
- __ push(esi); // Callee's context.
- __ push(edi); // Callee's JS Function.
-
- { Comment cmnt(masm_, "[ Allocate locals");
- int locals_count = info->scope()->num_stack_slots();
- if (locals_count == 1) {
- __ push(Immediate(isolate()->factory()->undefined_value()));
- } else if (locals_count > 1) {
- __ mov(eax, Immediate(isolate()->factory()->undefined_value()));
- for (int i = 0; i < locals_count; i++) {
- __ push(eax);
- }
- }
- }
-
- bool function_in_register = true;
-
- // Possibly allocate a local context.
- int heap_slots = info->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
- if (heap_slots > 0 ||
- (scope()->is_qml_mode() && scope()->is_global_scope())) {
- Comment cmnt(masm_, "[ Allocate context");
- // Argument to NewContext is the function, which is still in edi.
- __ push(edi);
- if (FLAG_harmony_scoping && info->scope()->is_global_scope()) {
- __ Push(info->scope()->GetScopeInfo());
- __ CallRuntime(Runtime::kNewGlobalContext, 2);
- } else if (heap_slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub((heap_slots < 0) ? 0 : heap_slots);
- __ CallStub(&stub);
- } else {
- __ CallRuntime(Runtime::kNewFunctionContext, 1);
- }
- function_in_register = false;
- // Context is returned in both eax and esi. It replaces the context
- // passed to us. It's saved in the stack and kept live in esi.
- __ mov(Operand(ebp, StandardFrameConstants::kContextOffset), esi);
-
- // Copy parameters into context if necessary.
- int num_parameters = info->scope()->num_parameters();
- for (int i = 0; i < num_parameters; i++) {
- Variable* var = scope()->parameter(i);
- if (var->IsContextSlot()) {
- int parameter_offset = StandardFrameConstants::kCallerSPOffset +
- (num_parameters - 1 - i) * kPointerSize;
- // Load parameter from stack.
- __ mov(eax, Operand(ebp, parameter_offset));
- // Store it in the context.
- int context_offset = Context::SlotOffset(var->index());
- __ mov(Operand(esi, context_offset), eax);
- // Update the write barrier. This clobbers eax and ebx.
- __ RecordWriteContextSlot(esi,
- context_offset,
- eax,
- ebx,
- kDontSaveFPRegs);
- }
- }
- }
-
- Variable* arguments = scope()->arguments();
- if (arguments != NULL) {
- // Function uses arguments object.
- Comment cmnt(masm_, "[ Allocate arguments object");
- if (function_in_register) {
- __ push(edi);
- } else {
- __ push(Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- }
- // Receiver is just before the parameters on the caller's stack.
- int num_parameters = info->scope()->num_parameters();
- int offset = num_parameters * kPointerSize;
- __ lea(edx,
- Operand(ebp, StandardFrameConstants::kCallerSPOffset + offset));
- __ push(edx);
- __ push(Immediate(Smi::FromInt(num_parameters)));
- // Arguments to ArgumentsAccessStub:
- // function, receiver address, parameter count.
- // The stub will rewrite receiver and parameter count if the previous
- // stack frame was an arguments adapter frame.
- ArgumentsAccessStub::Type type;
- if (!is_classic_mode()) {
- type = ArgumentsAccessStub::NEW_STRICT;
- } else if (function()->has_duplicate_parameters()) {
- type = ArgumentsAccessStub::NEW_NON_STRICT_SLOW;
- } else {
- type = ArgumentsAccessStub::NEW_NON_STRICT_FAST;
- }
- ArgumentsAccessStub stub(type);
- __ CallStub(&stub);
-
- SetVar(arguments, eax, ebx, edx);
- }
-
- if (FLAG_trace) {
- __ CallRuntime(Runtime::kTraceEnter, 0);
- }
-
- // Visit the declarations and body unless there is an illegal
- // redeclaration.
- if (scope()->HasIllegalRedeclaration()) {
- Comment cmnt(masm_, "[ Declarations");
- scope()->VisitIllegalRedeclaration(this);
-
- } else {
- PrepareForBailoutForId(BailoutId::FunctionEntry(), NO_REGISTERS);
- { Comment cmnt(masm_, "[ Declarations");
- // For named function expressions, declare the function name as a
- // constant.
- if (scope()->is_function_scope() && scope()->function() != NULL) {
- VariableDeclaration* function = scope()->function();
- ASSERT(function->proxy()->var()->mode() == CONST ||
- function->proxy()->var()->mode() == CONST_HARMONY);
- ASSERT(function->proxy()->var()->location() != Variable::UNALLOCATED);
- VisitVariableDeclaration(function);
- }
- VisitDeclarations(scope()->declarations());
- }
-
- { Comment cmnt(masm_, "[ Stack check");
- PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS);
- Label ok;
- ExternalReference stack_limit =
- ExternalReference::address_of_stack_limit(isolate());
- __ cmp(esp, Operand::StaticVariable(stack_limit));
- __ j(above_equal, &ok, Label::kNear);
- StackCheckStub stub;
- __ CallStub(&stub);
- __ bind(&ok);
- }
-
- { Comment cmnt(masm_, "[ Body");
- ASSERT(loop_depth() == 0);
- VisitStatements(function()->body());
- ASSERT(loop_depth() == 0);
- }
- }
-
- // Always emit a 'return undefined' in case control fell off the end of
- // the body.
- { Comment cmnt(masm_, "[ return <undefined>;");
- __ mov(eax, isolate()->factory()->undefined_value());
- EmitReturnSequence();
- }
-}
-
-
-void FullCodeGenerator::ClearAccumulator() {
- __ Set(eax, Immediate(Smi::FromInt(0)));
-}
-
-
-void FullCodeGenerator::EmitProfilingCounterDecrement(int delta) {
- __ mov(ebx, Immediate(profiling_counter_));
- __ sub(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset),
- Immediate(Smi::FromInt(delta)));
-}
-
-
-void FullCodeGenerator::EmitProfilingCounterReset() {
- int reset_value = FLAG_interrupt_budget;
- if (info_->ShouldSelfOptimize() && !FLAG_retry_self_opt) {
- // Self-optimization is a one-off thing: if it fails, don't try again.
- reset_value = Smi::kMaxValue;
- }
- __ mov(ebx, Immediate(profiling_counter_));
- __ mov(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset),
- Immediate(Smi::FromInt(reset_value)));
-}
-
-
-void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt,
- Label* back_edge_target) {
- Comment cmnt(masm_, "[ Back edge bookkeeping");
- Label ok;
-
- int weight = 1;
- if (FLAG_weighted_back_edges) {
- ASSERT(back_edge_target->is_bound());
- int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
- weight = Min(kMaxBackEdgeWeight,
- Max(1, distance / kBackEdgeDistanceUnit));
- }
- EmitProfilingCounterDecrement(weight);
- __ j(positive, &ok, Label::kNear);
- InterruptStub stub;
- __ CallStub(&stub);
-
- // Record a mapping of this PC offset to the OSR id. This is used to find
- // the AST id from the unoptimized code in order to use it as a key into
- // the deoptimization input data found in the optimized code.
- RecordBackEdge(stmt->OsrEntryId());
-
- // Loop stack checks can be patched to perform on-stack replacement. In
- // order to decide whether or not to perform OSR we embed the loop depth
- // in a test instruction after the call so we can extract it from the OSR
- // builtin.
- ASSERT(loop_depth() > 0);
- __ test(eax, Immediate(Min(loop_depth(), Code::kMaxLoopNestingMarker)));
-
- EmitProfilingCounterReset();
-
- __ bind(&ok);
- PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
- // Record a mapping of the OSR id to this PC. This is used if the OSR
- // entry becomes the target of a bailout. We don't expect it to be, but
- // we want it to work if it is.
- PrepareForBailoutForId(stmt->OsrEntryId(), NO_REGISTERS);
-}
-
-
-void FullCodeGenerator::EmitReturnSequence() {
- Comment cmnt(masm_, "[ Return sequence");
- if (return_label_.is_bound()) {
- __ jmp(&return_label_);
- } else {
- // Common return label
- __ bind(&return_label_);
- if (FLAG_trace) {
- __ push(eax);
- __ CallRuntime(Runtime::kTraceExit, 1);
- }
- if (FLAG_interrupt_at_exit || FLAG_self_optimization) {
- // Pretend that the exit is a backwards jump to the entry.
- int weight = 1;
- if (info_->ShouldSelfOptimize()) {
- weight = FLAG_interrupt_budget / FLAG_self_opt_count;
- } else if (FLAG_weighted_back_edges) {
- int distance = masm_->pc_offset();
- weight = Min(kMaxBackEdgeWeight,
- Max(1, distance / kBackEdgeDistanceUnit));
- }
- EmitProfilingCounterDecrement(weight);
- Label ok;
- __ j(positive, &ok, Label::kNear);
- __ push(eax);
- if (info_->ShouldSelfOptimize() && FLAG_direct_self_opt) {
- __ push(Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- __ CallRuntime(Runtime::kOptimizeFunctionOnNextCall, 1);
- } else {
- InterruptStub stub;
- __ CallStub(&stub);
- }
- __ pop(eax);
- EmitProfilingCounterReset();
- __ bind(&ok);
- }
-#ifdef DEBUG
- // Add a label for checking the size of the code used for returning.
- Label check_exit_codesize;
- masm_->bind(&check_exit_codesize);
-#endif
- SetSourcePosition(function()->end_position() - 1);
- __ RecordJSReturn();
- // Do not use the leave instruction here because it is too short to
- // patch with the code required by the debugger.
- __ mov(esp, ebp);
- __ pop(ebp);
-
- int arguments_bytes = (info_->scope()->num_parameters() + 1) * kPointerSize;
- __ Ret(arguments_bytes, ecx);
-#ifdef ENABLE_DEBUGGER_SUPPORT
- // Check that the size of the code used for returning is large enough
- // for the debugger's requirements.
- ASSERT(Assembler::kJSReturnSequenceLength <=
- masm_->SizeOfCodeGeneratedSince(&check_exit_codesize));
-#endif
- }
-}
-
-
-void FullCodeGenerator::EffectContext::Plug(Variable* var) const {
- ASSERT(var->IsStackAllocated() || var->IsContextSlot());
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::Plug(Variable* var) const {
- ASSERT(var->IsStackAllocated() || var->IsContextSlot());
- codegen()->GetVar(result_register(), var);
-}
-
-
-void FullCodeGenerator::StackValueContext::Plug(Variable* var) const {
- ASSERT(var->IsStackAllocated() || var->IsContextSlot());
- MemOperand operand = codegen()->VarOperand(var, result_register());
- // Memory operands can be pushed directly.
- __ push(operand);
-}
-
-
-void FullCodeGenerator::TestContext::Plug(Variable* var) const {
- // For simplicity we always test the accumulator register.
- codegen()->GetVar(result_register(), var);
- codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
- codegen()->DoTest(this);
-}
-
-
-void FullCodeGenerator::EffectContext::Plug(Heap::RootListIndex index) const {
- UNREACHABLE(); // Not used on IA32.
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::Plug(
- Heap::RootListIndex index) const {
- UNREACHABLE(); // Not used on IA32.
-}
-
-
-void FullCodeGenerator::StackValueContext::Plug(
- Heap::RootListIndex index) const {
- UNREACHABLE(); // Not used on IA32.
-}
-
-
-void FullCodeGenerator::TestContext::Plug(Heap::RootListIndex index) const {
- UNREACHABLE(); // Not used on IA32.
-}
-
-
-void FullCodeGenerator::EffectContext::Plug(Handle<Object> lit) const {
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::Plug(
- Handle<Object> lit) const {
- if (lit->IsSmi()) {
- __ SafeSet(result_register(), Immediate(lit));
- } else {
- __ Set(result_register(), Immediate(lit));
- }
-}
-
-
-void FullCodeGenerator::StackValueContext::Plug(Handle<Object> lit) const {
- if (lit->IsSmi()) {
- __ SafePush(Immediate(lit));
- } else {
- __ push(Immediate(lit));
- }
-}
-
-
-void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
- codegen()->PrepareForBailoutBeforeSplit(condition(),
- true,
- true_label_,
- false_label_);
- ASSERT(!lit->IsUndetectableObject()); // There are no undetectable literals.
- if (lit->IsUndefined() || lit->IsNull() || lit->IsFalse()) {
- if (false_label_ != fall_through_) __ jmp(false_label_);
- } else if (lit->IsTrue() || lit->IsJSObject()) {
- if (true_label_ != fall_through_) __ jmp(true_label_);
- } else if (lit->IsString()) {
- if (String::cast(*lit)->length() == 0) {
- if (false_label_ != fall_through_) __ jmp(false_label_);
- } else {
- if (true_label_ != fall_through_) __ jmp(true_label_);
- }
- } else if (lit->IsSmi()) {
- if (Smi::cast(*lit)->value() == 0) {
- if (false_label_ != fall_through_) __ jmp(false_label_);
- } else {
- if (true_label_ != fall_through_) __ jmp(true_label_);
- }
- } else {
- // For simplicity we always test the accumulator register.
- __ mov(result_register(), lit);
- codegen()->DoTest(this);
- }
-}
-
-
-void FullCodeGenerator::EffectContext::DropAndPlug(int count,
- Register reg) const {
- ASSERT(count > 0);
- __ Drop(count);
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::DropAndPlug(
- int count,
- Register reg) const {
- ASSERT(count > 0);
- __ Drop(count);
- __ Move(result_register(), reg);
-}
-
-
-void FullCodeGenerator::StackValueContext::DropAndPlug(int count,
- Register reg) const {
- ASSERT(count > 0);
- if (count > 1) __ Drop(count - 1);
- __ mov(Operand(esp, 0), reg);
-}
-
-
-void FullCodeGenerator::TestContext::DropAndPlug(int count,
- Register reg) const {
- ASSERT(count > 0);
- // For simplicity we always test the accumulator register.
- __ Drop(count);
- __ Move(result_register(), reg);
- codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
- codegen()->DoTest(this);
-}
-
-
-void FullCodeGenerator::EffectContext::Plug(Label* materialize_true,
- Label* materialize_false) const {
- ASSERT(materialize_true == materialize_false);
- __ bind(materialize_true);
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::Plug(
- Label* materialize_true,
- Label* materialize_false) const {
- Label done;
- __ bind(materialize_true);
- __ mov(result_register(), isolate()->factory()->true_value());
- __ jmp(&done, Label::kNear);
- __ bind(materialize_false);
- __ mov(result_register(), isolate()->factory()->false_value());
- __ bind(&done);
-}
-
-
-void FullCodeGenerator::StackValueContext::Plug(
- Label* materialize_true,
- Label* materialize_false) const {
- Label done;
- __ bind(materialize_true);
- __ push(Immediate(isolate()->factory()->true_value()));
- __ jmp(&done, Label::kNear);
- __ bind(materialize_false);
- __ push(Immediate(isolate()->factory()->false_value()));
- __ bind(&done);
-}
-
-
-void FullCodeGenerator::TestContext::Plug(Label* materialize_true,
- Label* materialize_false) const {
- ASSERT(materialize_true == true_label_);
- ASSERT(materialize_false == false_label_);
-}
-
-
-void FullCodeGenerator::EffectContext::Plug(bool flag) const {
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::Plug(bool flag) const {
- Handle<Object> value = flag
- ? isolate()->factory()->true_value()
- : isolate()->factory()->false_value();
- __ mov(result_register(), value);
-}
-
-
-void FullCodeGenerator::StackValueContext::Plug(bool flag) const {
- Handle<Object> value = flag
- ? isolate()->factory()->true_value()
- : isolate()->factory()->false_value();
- __ push(Immediate(value));
-}
-
-
-void FullCodeGenerator::TestContext::Plug(bool flag) const {
- codegen()->PrepareForBailoutBeforeSplit(condition(),
- true,
- true_label_,
- false_label_);
- if (flag) {
- if (true_label_ != fall_through_) __ jmp(true_label_);
- } else {
- if (false_label_ != fall_through_) __ jmp(false_label_);
- }
-}
-
-
-void FullCodeGenerator::DoTest(Expression* condition,
- Label* if_true,
- Label* if_false,
- Label* fall_through) {
- ToBooleanStub stub(result_register());
- __ push(result_register());
- __ CallStub(&stub, condition->test_id());
- __ test(result_register(), result_register());
- // The stub returns nonzero for true.
- Split(not_zero, if_true, if_false, fall_through);
-}
-
-
-void FullCodeGenerator::Split(Condition cc,
- Label* if_true,
- Label* if_false,
- Label* fall_through) {
- if (if_false == fall_through) {
- __ j(cc, if_true);
- } else if (if_true == fall_through) {
- __ j(NegateCondition(cc), if_false);
- } else {
- __ j(cc, if_true);
- __ jmp(if_false);
- }
-}
-
-
-MemOperand FullCodeGenerator::StackOperand(Variable* var) {
- ASSERT(var->IsStackAllocated());
- // Offset is negative because higher indexes are at lower addresses.
- int offset = -var->index() * kPointerSize;
- // Adjust by a (parameter or local) base offset.
- if (var->IsParameter()) {
- offset += (info_->scope()->num_parameters() + 1) * kPointerSize;
- } else {
- offset += JavaScriptFrameConstants::kLocal0Offset;
- }
- return Operand(ebp, offset);
-}
-
-
-MemOperand FullCodeGenerator::VarOperand(Variable* var, Register scratch) {
- ASSERT(var->IsContextSlot() || var->IsStackAllocated());
- if (var->IsContextSlot()) {
- int context_chain_length = scope()->ContextChainLength(var->scope());
- __ LoadContext(scratch, context_chain_length);
- return ContextOperand(scratch, var->index());
- } else {
- return StackOperand(var);
- }
-}
-
-
-void FullCodeGenerator::GetVar(Register dest, Variable* var) {
- ASSERT(var->IsContextSlot() || var->IsStackAllocated());
- MemOperand location = VarOperand(var, dest);
- __ mov(dest, location);
-}
-
-
-void FullCodeGenerator::SetVar(Variable* var,
- Register src,
- Register scratch0,
- Register scratch1) {
- ASSERT(var->IsContextSlot() || var->IsStackAllocated());
- ASSERT(!scratch0.is(src));
- ASSERT(!scratch0.is(scratch1));
- ASSERT(!scratch1.is(src));
- MemOperand location = VarOperand(var, scratch0);
- __ mov(location, src);
-
- // Emit the write barrier code if the location is in the heap.
- if (var->IsContextSlot()) {
- int offset = Context::SlotOffset(var->index());
- ASSERT(!scratch0.is(esi) && !src.is(esi) && !scratch1.is(esi));
- __ RecordWriteContextSlot(scratch0, offset, src, scratch1, kDontSaveFPRegs);
- }
-}
-
-
-void FullCodeGenerator::PrepareForBailoutBeforeSplit(Expression* expr,
- bool should_normalize,
- Label* if_true,
- Label* if_false) {
- // Only prepare for bailouts before splits if we're in a test
- // context. Otherwise, we let the Visit function deal with the
- // preparation to avoid preparing with the same AST id twice.
- if (!context()->IsTest() || !info_->IsOptimizable()) return;
-
- Label skip;
- if (should_normalize) __ jmp(&skip, Label::kNear);
- PrepareForBailout(expr, TOS_REG);
- if (should_normalize) {
- __ cmp(eax, isolate()->factory()->true_value());
- Split(equal, if_true, if_false, NULL);
- __ bind(&skip);
- }
-}
-
-
-void FullCodeGenerator::EmitDebugCheckDeclarationContext(Variable* variable) {
- // The variable in the declaration always resides in the current context.
- ASSERT_EQ(0, scope()->ContextChainLength(variable->scope()));
- if (generate_debug_code_) {
- // Check that we're not inside a with or catch context.
- __ mov(ebx, FieldOperand(esi, HeapObject::kMapOffset));
- __ cmp(ebx, isolate()->factory()->with_context_map());
- __ Check(not_equal, "Declaration in with context.");
- __ cmp(ebx, isolate()->factory()->catch_context_map());
- __ Check(not_equal, "Declaration in catch context.");
- }
-}
-
-
-void FullCodeGenerator::VisitVariableDeclaration(
- VariableDeclaration* declaration) {
- // If it was not possible to allocate the variable at compile time, we
- // need to "declare" it at runtime to make sure it actually exists in the
- // local context.
- VariableProxy* proxy = declaration->proxy();
- VariableMode mode = declaration->mode();
- Variable* variable = proxy->var();
- bool hole_init = mode == CONST || mode == CONST_HARMONY || mode == LET;
- switch (variable->location()) {
- case Variable::UNALLOCATED:
- globals_->Add(variable->name(), zone());
- globals_->Add(variable->binding_needs_init()
- ? isolate()->factory()->the_hole_value()
- : isolate()->factory()->undefined_value(), zone());
- globals_->Add(isolate()->factory()->ToBoolean(variable->is_qml_global()),
- zone());
- break;
-
- case Variable::PARAMETER:
- case Variable::LOCAL:
- if (hole_init) {
- Comment cmnt(masm_, "[ VariableDeclaration");
- __ mov(StackOperand(variable),
- Immediate(isolate()->factory()->the_hole_value()));
- }
- break;
-
- case Variable::CONTEXT:
- if (hole_init) {
- Comment cmnt(masm_, "[ VariableDeclaration");
- EmitDebugCheckDeclarationContext(variable);
- __ mov(ContextOperand(esi, variable->index()),
- Immediate(isolate()->factory()->the_hole_value()));
- // No write barrier since the hole value is in old space.
- PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
- }
- break;
-
- case Variable::LOOKUP: {
- Comment cmnt(masm_, "[ VariableDeclaration");
- __ push(esi);
- __ push(Immediate(variable->name()));
- // VariableDeclaration nodes are always introduced in one of four modes.
- ASSERT(IsDeclaredVariableMode(mode));
- PropertyAttributes attr =
- IsImmutableVariableMode(mode) ? READ_ONLY : NONE;
- __ push(Immediate(Smi::FromInt(attr)));
- // Push initial value, if any.
- // Note: For variables we must not push an initial value (such as
- // 'undefined') because we may have a (legal) redeclaration and we
- // must not destroy the current value.
- if (hole_init) {
- __ push(Immediate(isolate()->factory()->the_hole_value()));
- } else {
- __ push(Immediate(Smi::FromInt(0))); // Indicates no initial value.
- }
- __ CallRuntime(Runtime::kDeclareContextSlot, 4);
- break;
- }
- }
-}
-
-
-void FullCodeGenerator::VisitFunctionDeclaration(
- FunctionDeclaration* declaration) {
- VariableProxy* proxy = declaration->proxy();
- Variable* variable = proxy->var();
- switch (variable->location()) {
- case Variable::UNALLOCATED: {
- globals_->Add(variable->name(), zone());
- Handle<SharedFunctionInfo> function =
- Compiler::BuildFunctionInfo(declaration->fun(), script());
- // Check for stack-overflow exception.
- if (function.is_null()) return SetStackOverflow();
- globals_->Add(function, zone());
- globals_->Add(isolate()->factory()->ToBoolean(variable->is_qml_global()),
- zone());
- break;
- }
-
- case Variable::PARAMETER:
- case Variable::LOCAL: {
- Comment cmnt(masm_, "[ FunctionDeclaration");
- VisitForAccumulatorValue(declaration->fun());
- __ mov(StackOperand(variable), result_register());
- break;
- }
-
- case Variable::CONTEXT: {
- Comment cmnt(masm_, "[ FunctionDeclaration");
- EmitDebugCheckDeclarationContext(variable);
- VisitForAccumulatorValue(declaration->fun());
- __ mov(ContextOperand(esi, variable->index()), result_register());
- // We know that we have written a function, which is not a smi.
- __ RecordWriteContextSlot(esi,
- Context::SlotOffset(variable->index()),
- result_register(),
- ecx,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
- break;
- }
-
- case Variable::LOOKUP: {
- Comment cmnt(masm_, "[ FunctionDeclaration");
- __ push(esi);
- __ push(Immediate(variable->name()));
- __ push(Immediate(Smi::FromInt(NONE)));
- VisitForStackValue(declaration->fun());
- __ CallRuntime(Runtime::kDeclareContextSlot, 4);
- break;
- }
- }
-}
-
-
-void FullCodeGenerator::VisitModuleDeclaration(ModuleDeclaration* declaration) {
- Variable* variable = declaration->proxy()->var();
- ASSERT(variable->location() == Variable::CONTEXT);
- ASSERT(variable->interface()->IsFrozen());
-
- Comment cmnt(masm_, "[ ModuleDeclaration");
- EmitDebugCheckDeclarationContext(variable);
-
- // Load instance object.
- __ LoadContext(eax, scope_->ContextChainLength(scope_->GlobalScope()));
- __ mov(eax, ContextOperand(eax, variable->interface()->Index()));
- __ mov(eax, ContextOperand(eax, Context::EXTENSION_INDEX));
-
- // Assign it.
- __ mov(ContextOperand(esi, variable->index()), eax);
- // We know that we have written a module, which is not a smi.
- __ RecordWriteContextSlot(esi,
- Context::SlotOffset(variable->index()),
- eax,
- ecx,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- PrepareForBailoutForId(declaration->proxy()->id(), NO_REGISTERS);
-
- // Traverse into body.
- Visit(declaration->module());
-}
-
-
-void FullCodeGenerator::VisitImportDeclaration(ImportDeclaration* declaration) {
- VariableProxy* proxy = declaration->proxy();
- Variable* variable = proxy->var();
- switch (variable->location()) {
- case Variable::UNALLOCATED:
- // TODO(rossberg)
- break;
-
- case Variable::CONTEXT: {
- Comment cmnt(masm_, "[ ImportDeclaration");
- EmitDebugCheckDeclarationContext(variable);
- // TODO(rossberg)
- break;
- }
-
- case Variable::PARAMETER:
- case Variable::LOCAL:
- case Variable::LOOKUP:
- UNREACHABLE();
- }
-}
-
-
-void FullCodeGenerator::VisitExportDeclaration(ExportDeclaration* declaration) {
- // TODO(rossberg)
-}
-
-
-void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
- // Call the runtime to declare the globals.
- __ push(esi); // The context is the first argument.
- __ Push(pairs);
- __ Push(Smi::FromInt(DeclareGlobalsFlags()));
- __ CallRuntime(Runtime::kDeclareGlobals, 3);
- // Return value is ignored.
-}
-
-
-void FullCodeGenerator::DeclareModules(Handle<FixedArray> descriptions) {
- // Call the runtime to declare the modules.
- __ Push(descriptions);
- __ CallRuntime(Runtime::kDeclareModules, 1);
- // Return value is ignored.
-}
-
-
-void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
- Comment cmnt(masm_, "[ SwitchStatement");
- Breakable nested_statement(this, stmt);
- SetStatementPosition(stmt);
-
- // Keep the switch value on the stack until a case matches.
- VisitForStackValue(stmt->tag());
- PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
-
- ZoneList<CaseClause*>* clauses = stmt->cases();
- CaseClause* default_clause = NULL; // Can occur anywhere in the list.
-
- Label next_test; // Recycled for each test.
- // Compile all the tests with branches to their bodies.
- for (int i = 0; i < clauses->length(); i++) {
- CaseClause* clause = clauses->at(i);
- clause->body_target()->Unuse();
-
- // The default is not a test, but remember it as final fall through.
- if (clause->is_default()) {
- default_clause = clause;
- continue;
- }
-
- Comment cmnt(masm_, "[ Case comparison");
- __ bind(&next_test);
- next_test.Unuse();
-
- // Compile the label expression.
- VisitForAccumulatorValue(clause->label());
-
- // Perform the comparison as if via '==='.
- __ mov(edx, Operand(esp, 0)); // Switch value.
- bool inline_smi_code = ShouldInlineSmiCase(Token::EQ_STRICT);
- JumpPatchSite patch_site(masm_);
- if (inline_smi_code) {
- Label slow_case;
- __ mov(ecx, edx);
- __ or_(ecx, eax);
- patch_site.EmitJumpIfNotSmi(ecx, &slow_case, Label::kNear);
-
- __ cmp(edx, eax);
- __ j(not_equal, &next_test);
- __ Drop(1); // Switch value is no longer needed.
- __ jmp(clause->body_target());
- __ bind(&slow_case);
- }
-
- // Record position before stub call for type feedback.
- SetSourcePosition(clause->position());
- Handle<Code> ic = CompareIC::GetUninitialized(isolate(), Token::EQ_STRICT);
- CallIC(ic, RelocInfo::CODE_TARGET, clause->CompareId());
- patch_site.EmitPatchInfo();
- __ test(eax, eax);
- __ j(not_equal, &next_test);
- __ Drop(1); // Switch value is no longer needed.
- __ jmp(clause->body_target());
- }
-
- // Discard the test value and jump to the default if present, otherwise to
- // the end of the statement.
- __ bind(&next_test);
- __ Drop(1); // Switch value is no longer needed.
- if (default_clause == NULL) {
- __ jmp(nested_statement.break_label());
- } else {
- __ jmp(default_clause->body_target());
- }
-
- // Compile all the case bodies.
- for (int i = 0; i < clauses->length(); i++) {
- Comment cmnt(masm_, "[ Case body");
- CaseClause* clause = clauses->at(i);
- __ bind(clause->body_target());
- PrepareForBailoutForId(clause->EntryId(), NO_REGISTERS);
- VisitStatements(clause->statements());
- }
-
- __ bind(nested_statement.break_label());
- PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
-}
-
-
-void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
- Comment cmnt(masm_, "[ ForInStatement");
- SetStatementPosition(stmt);
-
- Label loop, exit;
- ForIn loop_statement(this, stmt);
- increment_loop_depth();
-
- // Get the object to enumerate over. Both SpiderMonkey and JSC
- // ignore null and undefined in contrast to the specification; see
- // ECMA-262 section 12.6.4.
- VisitForAccumulatorValue(stmt->enumerable());
- __ cmp(eax, isolate()->factory()->undefined_value());
- __ j(equal, &exit);
- __ cmp(eax, isolate()->factory()->null_value());
- __ j(equal, &exit);
-
- PrepareForBailoutForId(stmt->PrepareId(), TOS_REG);
-
- // Convert the object to a JS object.
- Label convert, done_convert;
- __ JumpIfSmi(eax, &convert, Label::kNear);
- __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
- __ j(above_equal, &done_convert, Label::kNear);
- __ bind(&convert);
- __ push(eax);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
- __ bind(&done_convert);
- __ push(eax);
-
- // Check for proxies.
- Label call_runtime, use_cache, fixed_array;
- STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
- __ CmpObjectType(eax, LAST_JS_PROXY_TYPE, ecx);
- __ j(below_equal, &call_runtime);
-
- // Check cache validity in generated code. This is a fast case for
- // the JSObject::IsSimpleEnum cache validity checks. If we cannot
- // guarantee cache validity, call the runtime system to check cache
- // validity or get the property names in a fixed array.
- __ CheckEnumCache(&call_runtime);
-
- __ mov(eax, FieldOperand(eax, HeapObject::kMapOffset));
- __ jmp(&use_cache, Label::kNear);
-
- // Get the set of properties to enumerate.
- __ bind(&call_runtime);
- __ push(eax);
- __ CallRuntime(Runtime::kGetPropertyNamesFast, 1);
- __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
- isolate()->factory()->meta_map());
- __ j(not_equal, &fixed_array);
-
-
- // We got a map in register eax. Get the enumeration cache from it.
- Label no_descriptors;
- __ bind(&use_cache);
-
- __ EnumLength(edx, eax);
- __ cmp(edx, Immediate(Smi::FromInt(0)));
- __ j(equal, &no_descriptors);
-
- __ LoadInstanceDescriptors(eax, ecx);
- __ mov(ecx, FieldOperand(ecx, DescriptorArray::kEnumCacheOffset));
- __ mov(ecx, FieldOperand(ecx, DescriptorArray::kEnumCacheBridgeCacheOffset));
-
- // Set up the four remaining stack slots.
- __ push(eax); // Map.
- __ push(ecx); // Enumeration cache.
- __ push(edx); // Number of valid entries for the map in the enum cache.
- __ push(Immediate(Smi::FromInt(0))); // Initial index.
- __ jmp(&loop);
-
- __ bind(&no_descriptors);
- __ add(esp, Immediate(kPointerSize));
- __ jmp(&exit);
-
- // We got a fixed array in register eax. Iterate through that.
- Label non_proxy;
- __ bind(&fixed_array);
-
- Handle<JSGlobalPropertyCell> cell =
- isolate()->factory()->NewJSGlobalPropertyCell(
- Handle<Object>(
- Smi::FromInt(TypeFeedbackCells::kForInFastCaseMarker),
- isolate()));
- RecordTypeFeedbackCell(stmt->ForInFeedbackId(), cell);
- __ LoadHeapObject(ebx, cell);
- __ mov(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset),
- Immediate(Smi::FromInt(TypeFeedbackCells::kForInSlowCaseMarker)));
-
- __ mov(ebx, Immediate(Smi::FromInt(1))); // Smi indicates slow check
- __ mov(ecx, Operand(esp, 0 * kPointerSize)); // Get enumerated object
- STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
- __ CmpObjectType(ecx, LAST_JS_PROXY_TYPE, ecx);
- __ j(above, &non_proxy);
- __ mov(ebx, Immediate(Smi::FromInt(0))); // Zero indicates proxy
- __ bind(&non_proxy);
- __ push(ebx); // Smi
- __ push(eax); // Array
- __ mov(eax, FieldOperand(eax, FixedArray::kLengthOffset));
- __ push(eax); // Fixed array length (as smi).
- __ push(Immediate(Smi::FromInt(0))); // Initial index.
-
- // Generate code for doing the condition check.
- PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
- __ bind(&loop);
- __ mov(eax, Operand(esp, 0 * kPointerSize)); // Get the current index.
- __ cmp(eax, Operand(esp, 1 * kPointerSize)); // Compare to the array length.
- __ j(above_equal, loop_statement.break_label());
-
- // Get the current entry of the array into register ebx.
- __ mov(ebx, Operand(esp, 2 * kPointerSize));
- __ mov(ebx, FieldOperand(ebx, eax, times_2, FixedArray::kHeaderSize));
-
- // Get the expected map from the stack or a smi in the
- // permanent slow case into register edx.
- __ mov(edx, Operand(esp, 3 * kPointerSize));
-
- // Check if the expected map still matches that of the enumerable.
- // If not, we may have to filter the key.
- Label update_each;
- __ mov(ecx, Operand(esp, 4 * kPointerSize));
- __ cmp(edx, FieldOperand(ecx, HeapObject::kMapOffset));
- __ j(equal, &update_each, Label::kNear);
-
- // For proxies, no filtering is done.
- // TODO(rossberg): What if only a prototype is a proxy? Not specified yet.
- ASSERT(Smi::FromInt(0) == 0);
- __ test(edx, edx);
- __ j(zero, &update_each);
-
- // Convert the entry to a string or null if it isn't a property
- // anymore. If the property has been removed while iterating, we
- // just skip it.
- __ push(ecx); // Enumerable.
- __ push(ebx); // Current entry.
- __ InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION);
- __ test(eax, eax);
- __ j(equal, loop_statement.continue_label());
- __ mov(ebx, eax);
-
- // Update the 'each' property or variable from the possibly filtered
- // entry in register ebx.
- __ bind(&update_each);
- __ mov(result_register(), ebx);
- // Perform the assignment as if via '='.
- { EffectContext context(this);
- EmitAssignment(stmt->each());
- }
-
- // Generate code for the body of the loop.
- Visit(stmt->body());
-
- // Generate code for going to the next element by incrementing the
- // index (smi) stored on top of the stack.
- __ bind(loop_statement.continue_label());
- __ add(Operand(esp, 0 * kPointerSize), Immediate(Smi::FromInt(1)));
-
- EmitBackEdgeBookkeeping(stmt, &loop);
- __ jmp(&loop);
-
- // Remove the pointers stored on the stack.
- __ bind(loop_statement.break_label());
- __ add(esp, Immediate(5 * kPointerSize));
-
- // Exit and decrement the loop depth.
- PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
- __ bind(&exit);
- decrement_loop_depth();
-}
-
-
-void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
- bool pretenure) {
- // Use the fast case closure allocation code that allocates in new
- // space for nested functions that don't need literals cloning. If
- // we're running with the --always-opt or the --prepare-always-opt
- // flag, we need to use the runtime function so that the new function
- // we are creating here gets a chance to have its code optimized and
- // doesn't just get a copy of the existing unoptimized code.
- if (!FLAG_always_opt &&
- !FLAG_prepare_always_opt &&
- !pretenure &&
- scope()->is_function_scope() &&
- info->num_literals() == 0) {
- FastNewClosureStub stub(info->language_mode());
- __ push(Immediate(info));
- __ CallStub(&stub);
- } else {
- __ push(esi);
- __ push(Immediate(info));
- __ push(Immediate(pretenure
- ? isolate()->factory()->true_value()
- : isolate()->factory()->false_value()));
- __ CallRuntime(Runtime::kNewClosure, 3);
- }
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
- Comment cmnt(masm_, "[ VariableProxy");
- EmitVariableLoad(expr);
-}
-
-
-void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
- TypeofState typeof_state,
- Label* slow) {
- Register context = esi;
- Register temp = edx;
-
- Scope* s = scope();
- while (s != NULL) {
- if (s->num_heap_slots() > 0) {
- if (s->calls_non_strict_eval()) {
- // Check that extension is NULL.
- __ cmp(ContextOperand(context, Context::EXTENSION_INDEX),
- Immediate(0));
- __ j(not_equal, slow);
- }
- // Load next context in chain.
- __ mov(temp, ContextOperand(context, Context::PREVIOUS_INDEX));
- // Walk the rest of the chain without clobbering esi.
- context = temp;
- }
- // If no outer scope calls eval, we do not need to check more
- // context extensions. If we have reached an eval scope, we check
- // all extensions from this point.
- if (!s->outer_scope_calls_non_strict_eval() || s->is_eval_scope()) break;
- s = s->outer_scope();
- }
-
- if (s != NULL && s->is_eval_scope()) {
- // Loop up the context chain. There is no frame effect so it is
- // safe to use raw labels here.
- Label next, fast;
- if (!context.is(temp)) {
- __ mov(temp, context);
- }
- __ bind(&next);
- // Terminate at native context.
- __ cmp(FieldOperand(temp, HeapObject::kMapOffset),
- Immediate(isolate()->factory()->native_context_map()));
- __ j(equal, &fast, Label::kNear);
- // Check that extension is NULL.
- __ cmp(ContextOperand(temp, Context::EXTENSION_INDEX), Immediate(0));
- __ j(not_equal, slow);
- // Load next context in chain.
- __ mov(temp, ContextOperand(temp, Context::PREVIOUS_INDEX));
- __ jmp(&next);
- __ bind(&fast);
- }
-
- // All extension objects were empty and it is safe to use a global
- // load IC call.
- __ mov(edx, var->is_qml_global()
- ? QmlGlobalObjectOperand()
- : GlobalObjectOperand());
- __ mov(ecx, var->name());
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF)
- ? RelocInfo::CODE_TARGET
- : RelocInfo::CODE_TARGET_CONTEXT;
- CallIC(ic, mode);
-}
-
-
-MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
- Label* slow) {
- ASSERT(var->IsContextSlot());
- Register context = esi;
- Register temp = ebx;
-
- for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) {
- if (s->num_heap_slots() > 0) {
- if (s->calls_non_strict_eval()) {
- // Check that extension is NULL.
- __ cmp(ContextOperand(context, Context::EXTENSION_INDEX),
- Immediate(0));
- __ j(not_equal, slow);
- }
- __ mov(temp, ContextOperand(context, Context::PREVIOUS_INDEX));
- // Walk the rest of the chain without clobbering esi.
- context = temp;
- }
- }
- // Check that last extension is NULL.
- __ cmp(ContextOperand(context, Context::EXTENSION_INDEX), Immediate(0));
- __ j(not_equal, slow);
-
- // This function is used only for loads, not stores, so it's safe to
- // return an esi-based operand (the write barrier cannot be allowed to
- // destroy the esi register).
- return ContextOperand(context, var->index());
-}
-
-
-void FullCodeGenerator::EmitDynamicLookupFastCase(Variable* var,
- TypeofState typeof_state,
- Label* slow,
- Label* done) {
- // Generate fast-case code for variables that might be shadowed by
- // eval-introduced variables. Eval is used a lot without
- // introducing variables. In those cases, we do not want to
- // perform a runtime call for all variables in the scope
- // containing the eval.
- if (var->mode() == DYNAMIC_GLOBAL) {
- EmitLoadGlobalCheckExtensions(var, typeof_state, slow);
- __ jmp(done);
- } else if (var->mode() == DYNAMIC_LOCAL) {
- Variable* local = var->local_if_not_shadowed();
- __ mov(eax, ContextSlotOperandCheckExtensions(local, slow));
- if (local->mode() == LET ||
- local->mode() == CONST ||
- local->mode() == CONST_HARMONY) {
- __ cmp(eax, isolate()->factory()->the_hole_value());
- __ j(not_equal, done);
- if (local->mode() == CONST) {
- __ mov(eax, isolate()->factory()->undefined_value());
- } else { // LET || CONST_HARMONY
- __ push(Immediate(var->name()));
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
- }
- }
- __ jmp(done);
- }
-}
-
-
-void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
- // Record position before possible IC call.
- SetSourcePosition(proxy->position());
- Variable* var = proxy->var();
-
- // Three cases: global variables, lookup variables, and all other types of
- // variables.
- switch (var->location()) {
- case Variable::UNALLOCATED: {
- Comment cmnt(masm_, "Global variable");
- // Use inline caching. Variable name is passed in ecx and the global
- // object in eax.
- __ mov(edx, var->is_qml_global()
- ? QmlGlobalObjectOperand()
- : GlobalObjectOperand());
- __ mov(ecx, var->name());
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
- context()->Plug(eax);
- break;
- }
-
- case Variable::PARAMETER:
- case Variable::LOCAL:
- case Variable::CONTEXT: {
- Comment cmnt(masm_, var->IsContextSlot()
- ? "Context variable"
- : "Stack variable");
- if (var->binding_needs_init()) {
- // var->scope() may be NULL when the proxy is located in eval code and
- // refers to a potential outside binding. Currently those bindings are
- // always looked up dynamically, i.e. in that case
- // var->location() == LOOKUP.
- // always holds.
- ASSERT(var->scope() != NULL);
-
- // Check if the binding really needs an initialization check. The check
- // can be skipped in the following situation: we have a LET or CONST
- // binding in harmony mode, both the Variable and the VariableProxy have
- // the same declaration scope (i.e. they are both in global code, in the
- // same function or in the same eval code) and the VariableProxy is in
- // the source physically located after the initializer of the variable.
- //
- // We cannot skip any initialization checks for CONST in non-harmony
- // mode because const variables may be declared but never initialized:
- // if (false) { const x; }; var y = x;
- //
- // The condition on the declaration scopes is a conservative check for
- // nested functions that access a binding and are called before the
- // binding is initialized:
- // function() { f(); let x = 1; function f() { x = 2; } }
- //
- bool skip_init_check;
- if (var->scope()->DeclarationScope() != scope()->DeclarationScope()) {
- skip_init_check = false;
- } else {
- // Check that we always have valid source position.
- ASSERT(var->initializer_position() != RelocInfo::kNoPosition);
- ASSERT(proxy->position() != RelocInfo::kNoPosition);
- skip_init_check = var->mode() != CONST &&
- var->initializer_position() < proxy->position();
- }
-
- if (!skip_init_check) {
- // Let and const need a read barrier.
- Label done;
- GetVar(eax, var);
- __ cmp(eax, isolate()->factory()->the_hole_value());
- __ j(not_equal, &done, Label::kNear);
- if (var->mode() == LET || var->mode() == CONST_HARMONY) {
- // Throw a reference error when using an uninitialized let/const
- // binding in harmony mode.
- __ push(Immediate(var->name()));
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
- } else {
- // Uninitalized const bindings outside of harmony mode are unholed.
- ASSERT(var->mode() == CONST);
- __ mov(eax, isolate()->factory()->undefined_value());
- }
- __ bind(&done);
- context()->Plug(eax);
- break;
- }
- }
- context()->Plug(var);
- break;
- }
-
- case Variable::LOOKUP: {
- Label done, slow;
- // Generate code for loading from variables potentially shadowed
- // by eval-introduced variables.
- EmitDynamicLookupFastCase(var, NOT_INSIDE_TYPEOF, &slow, &done);
- __ bind(&slow);
- Comment cmnt(masm_, "Lookup variable");
- __ push(esi); // Context.
- __ push(Immediate(var->name()));
- __ CallRuntime(Runtime::kLoadContextSlot, 2);
- __ bind(&done);
- context()->Plug(eax);
- break;
- }
- }
-}
-
-
-void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
- Comment cmnt(masm_, "[ RegExpLiteral");
- Label materialized;
- // Registers will be used as follows:
- // edi = JS function.
- // ecx = literals array.
- // ebx = regexp literal.
- // eax = regexp literal clone.
- __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- __ mov(ecx, FieldOperand(edi, JSFunction::kLiteralsOffset));
- int literal_offset =
- FixedArray::kHeaderSize + expr->literal_index() * kPointerSize;
- __ mov(ebx, FieldOperand(ecx, literal_offset));
- __ cmp(ebx, isolate()->factory()->undefined_value());
- __ j(not_equal, &materialized, Label::kNear);
-
- // Create regexp literal using runtime function
- // Result will be in eax.
- __ push(ecx);
- __ push(Immediate(Smi::FromInt(expr->literal_index())));
- __ push(Immediate(expr->pattern()));
- __ push(Immediate(expr->flags()));
- __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
- __ mov(ebx, eax);
-
- __ bind(&materialized);
- int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
- Label allocated, runtime_allocate;
- __ AllocateInNewSpace(size, eax, ecx, edx, &runtime_allocate, TAG_OBJECT);
- __ jmp(&allocated);
-
- __ bind(&runtime_allocate);
- __ push(ebx);
- __ push(Immediate(Smi::FromInt(size)));
- __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
- __ pop(ebx);
-
- __ bind(&allocated);
- // Copy the content into the newly allocated memory.
- // (Unroll copy loop once for better throughput).
- for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
- __ mov(edx, FieldOperand(ebx, i));
- __ mov(ecx, FieldOperand(ebx, i + kPointerSize));
- __ mov(FieldOperand(eax, i), edx);
- __ mov(FieldOperand(eax, i + kPointerSize), ecx);
- }
- if ((size % (2 * kPointerSize)) != 0) {
- __ mov(edx, FieldOperand(ebx, size - kPointerSize));
- __ mov(FieldOperand(eax, size - kPointerSize), edx);
- }
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitAccessor(Expression* expression) {
- if (expression == NULL) {
- __ push(Immediate(isolate()->factory()->null_value()));
- } else {
- VisitForStackValue(expression);
- }
-}
-
-
-void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
- Comment cmnt(masm_, "[ ObjectLiteral");
- Handle<FixedArray> constant_properties = expr->constant_properties();
- int flags = expr->fast_elements()
- ? ObjectLiteral::kFastElements
- : ObjectLiteral::kNoFlags;
- flags |= expr->has_function()
- ? ObjectLiteral::kHasFunction
- : ObjectLiteral::kNoFlags;
- int properties_count = constant_properties->length() / 2;
- if (expr->depth() > 1) {
- __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- __ push(FieldOperand(edi, JSFunction::kLiteralsOffset));
- __ push(Immediate(Smi::FromInt(expr->literal_index())));
- __ push(Immediate(constant_properties));
- __ push(Immediate(Smi::FromInt(flags)));
- __ CallRuntime(Runtime::kCreateObjectLiteral, 4);
- } else if (Serializer::enabled() || flags != ObjectLiteral::kFastElements ||
- properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
- __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- __ push(FieldOperand(edi, JSFunction::kLiteralsOffset));
- __ push(Immediate(Smi::FromInt(expr->literal_index())));
- __ push(Immediate(constant_properties));
- __ push(Immediate(Smi::FromInt(flags)));
- __ CallRuntime(Runtime::kCreateObjectLiteralShallow, 4);
- } else {
- __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- __ mov(eax, FieldOperand(edi, JSFunction::kLiteralsOffset));
- __ mov(ebx, Immediate(Smi::FromInt(expr->literal_index())));
- __ mov(ecx, Immediate(constant_properties));
- __ mov(edx, Immediate(Smi::FromInt(flags)));
- FastCloneShallowObjectStub stub(properties_count);
- __ CallStub(&stub);
- }
-
- // If result_saved is true the result is on top of the stack. If
- // result_saved is false the result is in eax.
- bool result_saved = false;
-
- // Mark all computed expressions that are bound to a key that
- // is shadowed by a later occurrence of the same key. For the
- // marked expressions, no store code is emitted.
- expr->CalculateEmitStore(zone());
-
- AccessorTable accessor_table(zone());
- for (int i = 0; i < expr->properties()->length(); i++) {
- ObjectLiteral::Property* property = expr->properties()->at(i);
- if (property->IsCompileTimeValue()) continue;
-
- Literal* key = property->key();
- Expression* value = property->value();
- if (!result_saved) {
- __ push(eax); // Save result on the stack
- result_saved = true;
- }
- switch (property->kind()) {
- case ObjectLiteral::Property::CONSTANT:
- UNREACHABLE();
- case ObjectLiteral::Property::MATERIALIZED_LITERAL:
- ASSERT(!CompileTimeValue::IsCompileTimeValue(value));
- // Fall through.
- case ObjectLiteral::Property::COMPUTED:
- if (key->handle()->IsInternalizedString()) {
- if (property->emit_store()) {
- VisitForAccumulatorValue(value);
- __ mov(ecx, Immediate(key->handle()));
- __ mov(edx, Operand(esp, 0));
- Handle<Code> ic = is_classic_mode()
- ? isolate()->builtins()->StoreIC_Initialize()
- : isolate()->builtins()->StoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, key->LiteralFeedbackId());
- PrepareForBailoutForId(key->id(), NO_REGISTERS);
- } else {
- VisitForEffect(value);
- }
- break;
- }
- // Fall through.
- case ObjectLiteral::Property::PROTOTYPE:
- __ push(Operand(esp, 0)); // Duplicate receiver.
- VisitForStackValue(key);
- VisitForStackValue(value);
- if (property->emit_store()) {
- __ push(Immediate(Smi::FromInt(NONE))); // PropertyAttributes
- __ CallRuntime(Runtime::kSetProperty, 4);
- } else {
- __ Drop(3);
- }
- break;
- case ObjectLiteral::Property::GETTER:
- accessor_table.lookup(key)->second->getter = value;
- break;
- case ObjectLiteral::Property::SETTER:
- accessor_table.lookup(key)->second->setter = value;
- break;
- }
- }
-
- // Emit code to define accessors, using only a single call to the runtime for
- // each pair of corresponding getters and setters.
- for (AccessorTable::Iterator it = accessor_table.begin();
- it != accessor_table.end();
- ++it) {
- __ push(Operand(esp, 0)); // Duplicate receiver.
- VisitForStackValue(it->first);
- EmitAccessor(it->second->getter);
- EmitAccessor(it->second->setter);
- __ push(Immediate(Smi::FromInt(NONE)));
- __ CallRuntime(Runtime::kDefineOrRedefineAccessorProperty, 5);
- }
-
- if (expr->has_function()) {
- ASSERT(result_saved);
- __ push(Operand(esp, 0));
- __ CallRuntime(Runtime::kToFastProperties, 1);
- }
-
- if (result_saved) {
- context()->PlugTOS();
- } else {
- context()->Plug(eax);
- }
-}
-
-
-void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
- Comment cmnt(masm_, "[ ArrayLiteral");
-
- ZoneList<Expression*>* subexprs = expr->values();
- int length = subexprs->length();
- Handle<FixedArray> constant_elements = expr->constant_elements();
- ASSERT_EQ(2, constant_elements->length());
- ElementsKind constant_elements_kind =
- static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value());
- bool has_constant_fast_elements =
- IsFastObjectElementsKind(constant_elements_kind);
- Handle<FixedArrayBase> constant_elements_values(
- FixedArrayBase::cast(constant_elements->get(1)));
-
- __ mov(ebx, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- __ push(FieldOperand(ebx, JSFunction::kLiteralsOffset));
- __ push(Immediate(Smi::FromInt(expr->literal_index())));
- __ push(Immediate(constant_elements));
- Heap* heap = isolate()->heap();
- if (has_constant_fast_elements &&
- constant_elements_values->map() == heap->fixed_cow_array_map()) {
- // If the elements are already FAST_*_ELEMENTS, the boilerplate cannot
- // change, so it's possible to specialize the stub in advance.
- __ IncrementCounter(isolate()->counters()->cow_arrays_created_stub(), 1);
- FastCloneShallowArrayStub stub(
- FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS,
- DONT_TRACK_ALLOCATION_SITE,
- length);
- __ CallStub(&stub);
- } else if (expr->depth() > 1) {
- __ CallRuntime(Runtime::kCreateArrayLiteral, 3);
- } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
- __ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
- } else {
- ASSERT(IsFastSmiOrObjectElementsKind(constant_elements_kind) ||
- FLAG_smi_only_arrays);
- FastCloneShallowArrayStub::Mode mode =
- FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS;
- AllocationSiteMode allocation_site_mode = FLAG_track_allocation_sites
- ? TRACK_ALLOCATION_SITE : DONT_TRACK_ALLOCATION_SITE;
-
- // If the elements are already FAST_*_ELEMENTS, the boilerplate cannot
- // change, so it's possible to specialize the stub in advance.
- if (has_constant_fast_elements) {
- mode = FastCloneShallowArrayStub::CLONE_ELEMENTS;
- allocation_site_mode = DONT_TRACK_ALLOCATION_SITE;
- }
-
- FastCloneShallowArrayStub stub(mode, allocation_site_mode, length);
- __ CallStub(&stub);
- }
-
- bool result_saved = false; // Is the result saved to the stack?
-
- // Emit code to evaluate all the non-constant subexpressions and to store
- // them into the newly cloned array.
- for (int i = 0; i < length; i++) {
- Expression* subexpr = subexprs->at(i);
- // If the subexpression is a literal or a simple materialized literal it
- // is already set in the cloned array.
- if (subexpr->AsLiteral() != NULL ||
- CompileTimeValue::IsCompileTimeValue(subexpr)) {
- continue;
- }
-
- if (!result_saved) {
- __ push(eax);
- result_saved = true;
- }
- VisitForAccumulatorValue(subexpr);
-
- if (IsFastObjectElementsKind(constant_elements_kind)) {
- // Fast-case array literal with ElementsKind of FAST_*_ELEMENTS, they
- // cannot transition and don't need to call the runtime stub.
- int offset = FixedArray::kHeaderSize + (i * kPointerSize);
- __ mov(ebx, Operand(esp, 0)); // Copy of array literal.
- __ mov(ebx, FieldOperand(ebx, JSObject::kElementsOffset));
- // Store the subexpression value in the array's elements.
- __ mov(FieldOperand(ebx, offset), result_register());
- // Update the write barrier for the array store.
- __ RecordWriteField(ebx, offset, result_register(), ecx,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- INLINE_SMI_CHECK);
- } else {
- // Store the subexpression value in the array's elements.
- __ mov(ebx, Operand(esp, 0)); // Copy of array literal.
- __ mov(edi, FieldOperand(ebx, JSObject::kMapOffset));
- __ mov(ecx, Immediate(Smi::FromInt(i)));
- __ mov(edx, Immediate(Smi::FromInt(expr->literal_index())));
- StoreArrayLiteralElementStub stub;
- __ CallStub(&stub);
- }
-
- PrepareForBailoutForId(expr->GetIdForElement(i), NO_REGISTERS);
- }
-
- if (result_saved) {
- context()->PlugTOS();
- } else {
- context()->Plug(eax);
- }
-}
-
-
-void FullCodeGenerator::VisitAssignment(Assignment* expr) {
- Comment cmnt(masm_, "[ Assignment");
- // Invalid left-hand sides are rewritten to have a 'throw ReferenceError'
- // on the left-hand side.
- if (!expr->target()->IsValidLeftHandSide()) {
- VisitForEffect(expr->target());
- return;
- }
-
- // Left-hand side can only be a property, a global or a (parameter or local)
- // slot.
- enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
- LhsKind assign_type = VARIABLE;
- Property* property = expr->target()->AsProperty();
- if (property != NULL) {
- assign_type = (property->key()->IsPropertyName())
- ? NAMED_PROPERTY
- : KEYED_PROPERTY;
- }
-
- // Evaluate LHS expression.
- switch (assign_type) {
- case VARIABLE:
- // Nothing to do here.
- break;
- case NAMED_PROPERTY:
- if (expr->is_compound()) {
- // We need the receiver both on the stack and in edx.
- VisitForStackValue(property->obj());
- __ mov(edx, Operand(esp, 0));
- } else {
- VisitForStackValue(property->obj());
- }
- break;
- case KEYED_PROPERTY: {
- if (expr->is_compound()) {
- VisitForStackValue(property->obj());
- VisitForStackValue(property->key());
- __ mov(edx, Operand(esp, kPointerSize)); // Object.
- __ mov(ecx, Operand(esp, 0)); // Key.
- } else {
- VisitForStackValue(property->obj());
- VisitForStackValue(property->key());
- }
- break;
- }
- }
-
- // For compound assignments we need another deoptimization point after the
- // variable/property load.
- if (expr->is_compound()) {
- AccumulatorValueContext result_context(this);
- { AccumulatorValueContext left_operand_context(this);
- switch (assign_type) {
- case VARIABLE:
- EmitVariableLoad(expr->target()->AsVariableProxy());
- PrepareForBailout(expr->target(), TOS_REG);
- break;
- case NAMED_PROPERTY:
- EmitNamedPropertyLoad(property);
- PrepareForBailoutForId(property->LoadId(), TOS_REG);
- break;
- case KEYED_PROPERTY:
- EmitKeyedPropertyLoad(property);
- PrepareForBailoutForId(property->LoadId(), TOS_REG);
- break;
- }
- }
-
- Token::Value op = expr->binary_op();
- __ push(eax); // Left operand goes on the stack.
- VisitForAccumulatorValue(expr->value());
-
- OverwriteMode mode = expr->value()->ResultOverwriteAllowed()
- ? OVERWRITE_RIGHT
- : NO_OVERWRITE;
- SetSourcePosition(expr->position() + 1);
- if (ShouldInlineSmiCase(op)) {
- EmitInlineSmiBinaryOp(expr->binary_operation(),
- op,
- mode,
- expr->target(),
- expr->value());
- } else {
- EmitBinaryOp(expr->binary_operation(), op, mode);
- }
-
- // Deoptimization point in case the binary operation may have side effects.
- PrepareForBailout(expr->binary_operation(), TOS_REG);
- } else {
- VisitForAccumulatorValue(expr->value());
- }
-
- // Record source position before possible IC call.
- SetSourcePosition(expr->position());
-
- // Store the value.
- switch (assign_type) {
- case VARIABLE:
- EmitVariableAssignment(expr->target()->AsVariableProxy()->var(),
- expr->op());
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
- context()->Plug(eax);
- break;
- case NAMED_PROPERTY:
- EmitNamedPropertyAssignment(expr);
- break;
- case KEYED_PROPERTY:
- EmitKeyedPropertyAssignment(expr);
- break;
- }
-}
-
-
-void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
- SetSourcePosition(prop->position());
- Literal* key = prop->key()->AsLiteral();
- ASSERT(!key->handle()->IsSmi());
- __ mov(ecx, Immediate(key->handle()));
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallIC(ic, RelocInfo::CODE_TARGET, prop->PropertyFeedbackId());
-}
-
-
-void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
- SetSourcePosition(prop->position());
- Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- CallIC(ic, RelocInfo::CODE_TARGET, prop->PropertyFeedbackId());
-}
-
-
-void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
- Token::Value op,
- OverwriteMode mode,
- Expression* left,
- Expression* right) {
- // Do combined smi check of the operands. Left operand is on the
- // stack. Right operand is in eax.
- Label smi_case, done, stub_call;
- __ pop(edx);
- __ mov(ecx, eax);
- __ or_(eax, edx);
- JumpPatchSite patch_site(masm_);
- patch_site.EmitJumpIfSmi(eax, &smi_case, Label::kNear);
-
- __ bind(&stub_call);
- __ mov(eax, ecx);
- BinaryOpStub stub(op, mode);
- CallIC(stub.GetCode(isolate()), RelocInfo::CODE_TARGET,
- expr->BinaryOperationFeedbackId());
- patch_site.EmitPatchInfo();
- __ jmp(&done, Label::kNear);
-
- // Smi case.
- __ bind(&smi_case);
- __ mov(eax, edx); // Copy left operand in case of a stub call.
-
- switch (op) {
- case Token::SAR:
- __ SmiUntag(eax);
- __ SmiUntag(ecx);
- __ sar_cl(eax); // No checks of result necessary
- __ SmiTag(eax);
- break;
- case Token::SHL: {
- Label result_ok;
- __ SmiUntag(eax);
- __ SmiUntag(ecx);
- __ shl_cl(eax);
- // Check that the *signed* result fits in a smi.
- __ cmp(eax, 0xc0000000);
- __ j(positive, &result_ok);
- __ SmiTag(ecx);
- __ jmp(&stub_call);
- __ bind(&result_ok);
- __ SmiTag(eax);
- break;
- }
- case Token::SHR: {
- Label result_ok;
- __ SmiUntag(eax);
- __ SmiUntag(ecx);
- __ shr_cl(eax);
- __ test(eax, Immediate(0xc0000000));
- __ j(zero, &result_ok);
- __ SmiTag(ecx);
- __ jmp(&stub_call);
- __ bind(&result_ok);
- __ SmiTag(eax);
- break;
- }
- case Token::ADD:
- __ add(eax, ecx);
- __ j(overflow, &stub_call);
- break;
- case Token::SUB:
- __ sub(eax, ecx);
- __ j(overflow, &stub_call);
- break;
- case Token::MUL: {
- __ SmiUntag(eax);
- __ imul(eax, ecx);
- __ j(overflow, &stub_call);
- __ test(eax, eax);
- __ j(not_zero, &done, Label::kNear);
- __ mov(ebx, edx);
- __ or_(ebx, ecx);
- __ j(negative, &stub_call);
- break;
- }
- case Token::BIT_OR:
- __ or_(eax, ecx);
- break;
- case Token::BIT_AND:
- __ and_(eax, ecx);
- break;
- case Token::BIT_XOR:
- __ xor_(eax, ecx);
- break;
- default:
- UNREACHABLE();
- }
-
- __ bind(&done);
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
- Token::Value op,
- OverwriteMode mode) {
- __ pop(edx);
- BinaryOpStub stub(op, mode);
- JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
- CallIC(stub.GetCode(isolate()), RelocInfo::CODE_TARGET,
- expr->BinaryOperationFeedbackId());
- patch_site.EmitPatchInfo();
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitAssignment(Expression* expr) {
- // Invalid left-hand sides are rewritten by the parser to have a 'throw
- // ReferenceError' on the left-hand side.
- if (!expr->IsValidLeftHandSide()) {
- VisitForEffect(expr);
- return;
- }
-
- // Left-hand side can only be a property, a global or a (parameter or local)
- // slot.
- enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
- LhsKind assign_type = VARIABLE;
- Property* prop = expr->AsProperty();
- if (prop != NULL) {
- assign_type = (prop->key()->IsPropertyName())
- ? NAMED_PROPERTY
- : KEYED_PROPERTY;
- }
-
- switch (assign_type) {
- case VARIABLE: {
- Variable* var = expr->AsVariableProxy()->var();
- EffectContext context(this);
- EmitVariableAssignment(var, Token::ASSIGN);
- break;
- }
- case NAMED_PROPERTY: {
- __ push(eax); // Preserve value.
- VisitForAccumulatorValue(prop->obj());
- __ mov(edx, eax);
- __ pop(eax); // Restore value.
- __ mov(ecx, prop->key()->AsLiteral()->handle());
- Handle<Code> ic = is_classic_mode()
- ? isolate()->builtins()->StoreIC_Initialize()
- : isolate()->builtins()->StoreIC_Initialize_Strict();
- CallIC(ic);
- break;
- }
- case KEYED_PROPERTY: {
- __ push(eax); // Preserve value.
- VisitForStackValue(prop->obj());
- VisitForAccumulatorValue(prop->key());
- __ mov(ecx, eax);
- __ pop(edx); // Receiver.
- __ pop(eax); // Restore value.
- Handle<Code> ic = is_classic_mode()
- ? isolate()->builtins()->KeyedStoreIC_Initialize()
- : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
- CallIC(ic);
- break;
- }
- }
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitVariableAssignment(Variable* var,
- Token::Value op) {
- if (var->IsUnallocated()) {
- // Global var, const, or let.
- __ mov(ecx, var->name());
- __ mov(edx, var->is_qml_global()
- ? QmlGlobalObjectOperand()
- : GlobalObjectOperand());
- Handle<Code> ic = is_classic_mode()
- ? isolate()->builtins()->StoreIC_Initialize()
- : isolate()->builtins()->StoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
-
- } else if (op == Token::INIT_CONST) {
- // Const initializers need a write barrier.
- ASSERT(!var->IsParameter()); // No const parameters.
- if (var->IsStackLocal()) {
- Label skip;
- __ mov(edx, StackOperand(var));
- __ cmp(edx, isolate()->factory()->the_hole_value());
- __ j(not_equal, &skip);
- __ mov(StackOperand(var), eax);
- __ bind(&skip);
- } else {
- ASSERT(var->IsContextSlot() || var->IsLookupSlot());
- // Like var declarations, const declarations are hoisted to function
- // scope. However, unlike var initializers, const initializers are
- // able to drill a hole to that function context, even from inside a
- // 'with' context. We thus bypass the normal static scope lookup for
- // var->IsContextSlot().
- __ push(eax);
- __ push(esi);
- __ push(Immediate(var->name()));
- __ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
- }
-
- } else if (var->mode() == LET && op != Token::INIT_LET) {
- // Non-initializing assignment to let variable needs a write barrier.
- if (var->IsLookupSlot()) {
- __ push(eax); // Value.
- __ push(esi); // Context.
- __ push(Immediate(var->name()));
- __ push(Immediate(Smi::FromInt(language_mode())));
- __ CallRuntime(Runtime::kStoreContextSlot, 4);
- } else {
- ASSERT(var->IsStackAllocated() || var->IsContextSlot());
- Label assign;
- MemOperand location = VarOperand(var, ecx);
- __ mov(edx, location);
- __ cmp(edx, isolate()->factory()->the_hole_value());
- __ j(not_equal, &assign, Label::kNear);
- __ push(Immediate(var->name()));
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
- __ bind(&assign);
- __ mov(location, eax);
- if (var->IsContextSlot()) {
- __ mov(edx, eax);
- int offset = Context::SlotOffset(var->index());
- __ RecordWriteContextSlot(ecx, offset, edx, ebx, kDontSaveFPRegs);
- }
- }
-
- } else if (!var->is_const_mode() || op == Token::INIT_CONST_HARMONY) {
- // Assignment to var or initializing assignment to let/const
- // in harmony mode.
- if (var->IsStackAllocated() || var->IsContextSlot()) {
- MemOperand location = VarOperand(var, ecx);
- if (generate_debug_code_ && op == Token::INIT_LET) {
- // Check for an uninitialized let binding.
- __ mov(edx, location);
- __ cmp(edx, isolate()->factory()->the_hole_value());
- __ Check(equal, "Let binding re-initialization.");
- }
- // Perform the assignment.
- __ mov(location, eax);
- if (var->IsContextSlot()) {
- __ mov(edx, eax);
- int offset = Context::SlotOffset(var->index());
- __ RecordWriteContextSlot(ecx, offset, edx, ebx, kDontSaveFPRegs);
- }
- } else {
- ASSERT(var->IsLookupSlot());
- __ push(eax); // Value.
- __ push(esi); // Context.
- __ push(Immediate(var->name()));
- __ push(Immediate(Smi::FromInt(language_mode())));
- __ CallRuntime(Runtime::kStoreContextSlot, 4);
- }
- }
- // Non-initializing assignments to consts are ignored.
-}
-
-
-void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
- // Assignment to a property, using a named store IC.
- // eax : value
- // esp[0] : receiver
-
- Property* prop = expr->target()->AsProperty();
- ASSERT(prop != NULL);
- ASSERT(prop->key()->AsLiteral() != NULL);
-
- // Record source code position before IC call.
- SetSourcePosition(expr->position());
- __ mov(ecx, prop->key()->AsLiteral()->handle());
- __ pop(edx);
- Handle<Code> ic = is_classic_mode()
- ? isolate()->builtins()->StoreIC_Initialize()
- : isolate()->builtins()->StoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, expr->AssignmentFeedbackId());
-
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
- // Assignment to a property, using a keyed store IC.
- // eax : value
- // esp[0] : key
- // esp[kPointerSize] : receiver
-
- __ pop(ecx); // Key.
- __ pop(edx);
- // Record source code position before IC call.
- SetSourcePosition(expr->position());
- Handle<Code> ic = is_classic_mode()
- ? isolate()->builtins()->KeyedStoreIC_Initialize()
- : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, expr->AssignmentFeedbackId());
-
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::VisitProperty(Property* expr) {
- Comment cmnt(masm_, "[ Property");
- Expression* key = expr->key();
-
- if (key->IsPropertyName()) {
- VisitForAccumulatorValue(expr->obj());
- __ mov(edx, result_register());
- EmitNamedPropertyLoad(expr);
- PrepareForBailoutForId(expr->LoadId(), TOS_REG);
- context()->Plug(eax);
- } else {
- VisitForStackValue(expr->obj());
- VisitForAccumulatorValue(expr->key());
- __ pop(edx); // Object.
- __ mov(ecx, result_register()); // Key.
- EmitKeyedPropertyLoad(expr);
- context()->Plug(eax);
- }
-}
-
-
-void FullCodeGenerator::CallIC(Handle<Code> code,
- RelocInfo::Mode rmode,
- TypeFeedbackId ast_id) {
- ic_total_count_++;
- __ call(code, rmode, ast_id);
-}
-
-
-
-
-void FullCodeGenerator::EmitCallWithIC(Call* expr,
- Handle<Object> name,
- RelocInfo::Mode mode) {
- // Code common for calls using the IC.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
- { PreservePositionScope scope(masm()->positions_recorder());
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
- __ Set(ecx, Immediate(name));
- }
- // Record source position of the IC call.
- SetSourcePosition(expr->position());
- Handle<Code> ic =
- isolate()->stub_cache()->ComputeCallInitialize(arg_count, mode);
- CallIC(ic, mode, expr->CallFeedbackId());
- RecordJSReturnSite(expr);
- // Restore context register.
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr,
- Expression* key) {
- // Load the key.
- VisitForAccumulatorValue(key);
-
- // Swap the name of the function and the receiver on the stack to follow
- // the calling convention for call ICs.
- __ pop(ecx);
- __ push(eax);
- __ push(ecx);
-
- // Load the arguments.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
- { PreservePositionScope scope(masm()->positions_recorder());
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
- }
- // Record source position of the IC call.
- SetSourcePosition(expr->position());
- Handle<Code> ic =
- isolate()->stub_cache()->ComputeKeyedCallInitialize(arg_count);
- __ mov(ecx, Operand(esp, (arg_count + 1) * kPointerSize)); // Key.
- CallIC(ic, RelocInfo::CODE_TARGET, expr->CallFeedbackId());
- RecordJSReturnSite(expr);
- // Restore context register.
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- context()->DropAndPlug(1, eax); // Drop the key still on the stack.
-}
-
-
-void FullCodeGenerator::EmitCallWithStub(Call* expr, CallFunctionFlags flags) {
- // Code common for calls using the call stub.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
- { PreservePositionScope scope(masm()->positions_recorder());
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
- }
- // Record source position for debugger.
- SetSourcePosition(expr->position());
-
- // Record call targets in unoptimized code.
- flags = static_cast<CallFunctionFlags>(flags | RECORD_CALL_TARGET);
- Handle<Object> uninitialized =
- TypeFeedbackCells::UninitializedSentinel(isolate());
- Handle<JSGlobalPropertyCell> cell =
- isolate()->factory()->NewJSGlobalPropertyCell(uninitialized);
- RecordTypeFeedbackCell(expr->CallFeedbackId(), cell);
- __ mov(ebx, cell);
-
- CallFunctionStub stub(arg_count, flags);
- __ mov(edi, Operand(esp, (arg_count + 1) * kPointerSize));
- __ CallStub(&stub, expr->CallFeedbackId());
-
- RecordJSReturnSite(expr);
- // Restore context register.
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- context()->DropAndPlug(1, eax);
-}
-
-
-void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
- // Push copy of the first argument or undefined if it doesn't exist.
- if (arg_count > 0) {
- __ push(Operand(esp, arg_count * kPointerSize));
- } else {
- __ push(Immediate(isolate()->factory()->undefined_value()));
- }
-
- // Push the receiver of the enclosing function.
- __ push(Operand(ebp, (2 + info_->scope()->num_parameters()) * kPointerSize));
- // Push the language mode.
- __ push(Immediate(Smi::FromInt(language_mode())));
-
- // Push the start position of the scope the calls resides in.
- __ push(Immediate(Smi::FromInt(scope()->start_position())));
-
- // Push the qml mode flag
- __ push(Immediate(Smi::FromInt(is_qml_mode())));
-
- // Do the runtime call.
- __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 6);
-}
-
-
-void FullCodeGenerator::VisitCall(Call* expr) {
-#ifdef DEBUG
- // We want to verify that RecordJSReturnSite gets called on all paths
- // through this function. Avoid early returns.
- expr->return_is_recorded_ = false;
-#endif
-
- Comment cmnt(masm_, "[ Call");
- Expression* callee = expr->expression();
- VariableProxy* proxy = callee->AsVariableProxy();
- Property* property = callee->AsProperty();
-
- if (proxy != NULL && proxy->var()->is_possibly_eval(isolate())) {
- // In a call to eval, we first call %ResolvePossiblyDirectEval to
- // resolve the function we need to call and the receiver of the call.
- // Then we call the resolved function using the given arguments.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
- { PreservePositionScope pos_scope(masm()->positions_recorder());
- VisitForStackValue(callee);
- // Reserved receiver slot.
- __ push(Immediate(isolate()->factory()->undefined_value()));
- // Push the arguments.
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
-
- // Push a copy of the function (found below the arguments) and
- // resolve eval.
- __ push(Operand(esp, (arg_count + 1) * kPointerSize));
- EmitResolvePossiblyDirectEval(arg_count);
-
- // The runtime call returns a pair of values in eax (function) and
- // edx (receiver). Touch up the stack with the right values.
- __ mov(Operand(esp, (arg_count + 0) * kPointerSize), edx);
- __ mov(Operand(esp, (arg_count + 1) * kPointerSize), eax);
- }
- // Record source position for debugger.
- SetSourcePosition(expr->position());
- CallFunctionStub stub(arg_count, RECEIVER_MIGHT_BE_IMPLICIT);
- __ mov(edi, Operand(esp, (arg_count + 1) * kPointerSize));
- __ CallStub(&stub);
- RecordJSReturnSite(expr);
- // Restore context register.
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- context()->DropAndPlug(1, eax);
-
- } else if (proxy != NULL && proxy->var()->IsUnallocated()) {
- // Push global object as receiver for the call IC.
- __ push(proxy->var()->is_qml_global()
- ? QmlGlobalObjectOperand()
- : GlobalObjectOperand());
- EmitCallWithIC(expr, proxy->name(), RelocInfo::CODE_TARGET_CONTEXT);
-
- } else if (proxy != NULL && proxy->var()->IsLookupSlot()) {
- // Call to a lookup slot (dynamically introduced variable).
- Label slow, done;
- { PreservePositionScope scope(masm()->positions_recorder());
- // Generate code for loading from variables potentially shadowed by
- // eval-introduced variables.
- EmitDynamicLookupFastCase(proxy->var(), NOT_INSIDE_TYPEOF, &slow, &done);
- }
- __ bind(&slow);
- // Call the runtime to find the function to call (returned in eax) and
- // the object holding it (returned in edx).
- __ push(context_register());
- __ push(Immediate(proxy->name()));
- __ CallRuntime(Runtime::kLoadContextSlot, 2);
- __ push(eax); // Function.
- __ push(edx); // Receiver.
-
- // If fast case code has been generated, emit code to push the function
- // and receiver and have the slow path jump around this code.
- if (done.is_linked()) {
- Label call;
- __ jmp(&call, Label::kNear);
- __ bind(&done);
- // Push function.
- __ push(eax);
- // The receiver is implicitly the global receiver. Indicate this by
- // passing the hole to the call function stub.
- __ push(Immediate(isolate()->factory()->the_hole_value()));
- __ bind(&call);
- }
-
- // The receiver is either the global receiver or an object found by
- // LoadContextSlot. That object could be the hole if the receiver is
- // implicitly the global object.
- EmitCallWithStub(expr, RECEIVER_MIGHT_BE_IMPLICIT);
-
- } else if (property != NULL) {
- { PreservePositionScope scope(masm()->positions_recorder());
- VisitForStackValue(property->obj());
- }
- if (property->key()->IsPropertyName()) {
- EmitCallWithIC(expr,
- property->key()->AsLiteral()->handle(),
- RelocInfo::CODE_TARGET);
- } else {
- EmitKeyedCallWithIC(expr, property->key());
- }
-
- } else {
- // Call to an arbitrary expression not handled specially above.
- { PreservePositionScope scope(masm()->positions_recorder());
- VisitForStackValue(callee);
- }
- // Load global receiver object.
- __ mov(ebx, GlobalObjectOperand());
- __ push(FieldOperand(ebx, GlobalObject::kGlobalReceiverOffset));
- // Emit function call.
- EmitCallWithStub(expr, NO_CALL_FUNCTION_FLAGS);
- }
-
-#ifdef DEBUG
- // RecordJSReturnSite should have been called.
- ASSERT(expr->return_is_recorded_);
-#endif
-}
-
-
-void FullCodeGenerator::VisitCallNew(CallNew* expr) {
- Comment cmnt(masm_, "[ CallNew");
- // According to ECMA-262, section 11.2.2, page 44, the function
- // expression in new calls must be evaluated before the
- // arguments.
-
- // Push constructor on the stack. If it's not a function it's used as
- // receiver for CALL_NON_FUNCTION, otherwise the value on the stack is
- // ignored.
- VisitForStackValue(expr->expression());
-
- // Push the arguments ("left-to-right") on the stack.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
-
- // Call the construct call builtin that handles allocation and
- // constructor invocation.
- SetSourcePosition(expr->position());
-
- // Load function and argument count into edi and eax.
- __ Set(eax, Immediate(arg_count));
- __ mov(edi, Operand(esp, arg_count * kPointerSize));
-
- // Record call targets in unoptimized code.
- Handle<Object> uninitialized =
- TypeFeedbackCells::UninitializedSentinel(isolate());
- Handle<JSGlobalPropertyCell> cell =
- isolate()->factory()->NewJSGlobalPropertyCell(uninitialized);
- RecordTypeFeedbackCell(expr->CallNewFeedbackId(), cell);
- __ mov(ebx, cell);
-
- CallConstructStub stub(RECORD_CALL_TARGET);
- __ call(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL);
- PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- __ test(eax, Immediate(kSmiTagMask));
- Split(zero, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsNonNegativeSmi(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- __ test(eax, Immediate(kSmiTagMask | 0x80000000));
- Split(zero, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsObject(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ JumpIfSmi(eax, if_false);
- __ cmp(eax, isolate()->factory()->null_value());
- __ j(equal, if_true);
- __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
- // Undetectable objects behave like undefined when tested with typeof.
- __ movzx_b(ecx, FieldOperand(ebx, Map::kBitFieldOffset));
- __ test(ecx, Immediate(1 << Map::kIsUndetectable));
- __ j(not_zero, if_false);
- __ movzx_b(ecx, FieldOperand(ebx, Map::kInstanceTypeOffset));
- __ cmp(ecx, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
- __ j(below, if_false);
- __ cmp(ecx, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(below_equal, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsSpecObject(CallRuntime* expr) {
- // TODO(rossberg): incorporate symbols.
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ JumpIfSmi(eax, if_false);
- __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ebx);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(above_equal, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsUndetectableObject(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ JumpIfSmi(eax, if_false);
- __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
- __ movzx_b(ebx, FieldOperand(ebx, Map::kBitFieldOffset));
- __ test(ebx, Immediate(1 << Map::kIsUndetectable));
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(not_zero, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
- CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ AssertNotSmi(eax);
-
- // Check whether this map has already been checked to be safe for default
- // valueOf.
- __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
- __ test_b(FieldOperand(ebx, Map::kBitField2Offset),
- 1 << Map::kStringWrapperSafeForDefaultValueOf);
- __ j(not_zero, if_true);
-
- // Check for fast case object. Return false for slow case objects.
- __ mov(ecx, FieldOperand(eax, JSObject::kPropertiesOffset));
- __ mov(ecx, FieldOperand(ecx, HeapObject::kMapOffset));
- __ cmp(ecx, FACTORY->hash_table_map());
- __ j(equal, if_false);
-
- // Look for valueOf string in the descriptor array, and indicate false if
- // found. Since we omit an enumeration index check, if it is added via a
- // transition that shares its descriptor array, this is a false positive.
- Label entry, loop, done;
-
- // Skip loop if no descriptors are valid.
- __ NumberOfOwnDescriptors(ecx, ebx);
- __ cmp(ecx, 0);
- __ j(equal, &done);
-
- __ LoadInstanceDescriptors(ebx, ebx);
- // ebx: descriptor array.
- // ecx: valid entries in the descriptor array.
- // Calculate the end of the descriptor array.
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize == 1);
- STATIC_ASSERT(kPointerSize == 4);
- __ imul(ecx, ecx, DescriptorArray::kDescriptorSize);
- __ lea(ecx, Operand(ebx, ecx, times_2, DescriptorArray::kFirstOffset));
- // Calculate location of the first key name.
- __ add(ebx, Immediate(DescriptorArray::kFirstOffset));
- // Loop through all the keys in the descriptor array. If one of these is the
- // internalized string "valueOf" the result is false.
- __ jmp(&entry);
- __ bind(&loop);
- __ mov(edx, FieldOperand(ebx, 0));
- __ cmp(edx, FACTORY->value_of_string());
- __ j(equal, if_false);
- __ add(ebx, Immediate(DescriptorArray::kDescriptorSize * kPointerSize));
- __ bind(&entry);
- __ cmp(ebx, ecx);
- __ j(not_equal, &loop);
-
- __ bind(&done);
-
- // Reload map as register ebx was used as temporary above.
- __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
-
- // If a valueOf property is not found on the object check that its
- // prototype is the un-modified String prototype. If not result is false.
- __ mov(ecx, FieldOperand(ebx, Map::kPrototypeOffset));
- __ JumpIfSmi(ecx, if_false);
- __ mov(ecx, FieldOperand(ecx, HeapObject::kMapOffset));
- __ mov(edx, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ mov(edx,
- FieldOperand(edx, GlobalObject::kNativeContextOffset));
- __ cmp(ecx,
- ContextOperand(edx,
- Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX));
- __ j(not_equal, if_false);
- // Set the bit in the map to indicate that it has been checked safe for
- // default valueOf and set true result.
- __ or_(FieldOperand(ebx, Map::kBitField2Offset),
- Immediate(1 << Map::kStringWrapperSafeForDefaultValueOf));
- __ jmp(if_true);
-
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsSymbol(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ JumpIfSmi(eax, if_false);
- __ CmpObjectType(eax, SYMBOL_TYPE, ebx);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(equal, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ JumpIfSmi(eax, if_false);
- __ CmpObjectType(eax, JS_FUNCTION_TYPE, ebx);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(equal, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsArray(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ JumpIfSmi(eax, if_false);
- __ CmpObjectType(eax, JS_ARRAY_TYPE, ebx);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(equal, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsRegExp(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ JumpIfSmi(eax, if_false);
- __ CmpObjectType(eax, JS_REGEXP_TYPE, ebx);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(equal, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-
-void FullCodeGenerator::EmitIsConstructCall(CallRuntime* expr) {
- ASSERT(expr->arguments()->length() == 0);
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- // Get the frame pointer for the calling frame.
- __ mov(eax, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
-
- // Skip the arguments adaptor frame if it exists.
- Label check_frame_marker;
- __ cmp(Operand(eax, StandardFrameConstants::kContextOffset),
- Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ j(not_equal, &check_frame_marker);
- __ mov(eax, Operand(eax, StandardFrameConstants::kCallerFPOffset));
-
- // Check the marker in the calling frame.
- __ bind(&check_frame_marker);
- __ cmp(Operand(eax, StandardFrameConstants::kMarkerOffset),
- Immediate(Smi::FromInt(StackFrame::CONSTRUCT)));
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(equal, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitObjectEquals(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 2);
-
- // Load the two objects into registers and perform the comparison.
- VisitForStackValue(args->at(0));
- VisitForAccumulatorValue(args->at(1));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ pop(ebx);
- __ cmp(eax, ebx);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(equal, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitArguments(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
-
- // ArgumentsAccessStub expects the key in edx and the formal
- // parameter count in eax.
- VisitForAccumulatorValue(args->at(0));
- __ mov(edx, eax);
- __ Set(eax, Immediate(Smi::FromInt(info_->scope()->num_parameters())));
- ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
- __ CallStub(&stub);
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitArgumentsLength(CallRuntime* expr) {
- ASSERT(expr->arguments()->length() == 0);
-
- Label exit;
- // Get the number of formal parameters.
- __ Set(eax, Immediate(Smi::FromInt(info_->scope()->num_parameters())));
-
- // Check if the calling frame is an arguments adaptor frame.
- __ mov(ebx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
- __ cmp(Operand(ebx, StandardFrameConstants::kContextOffset),
- Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ j(not_equal, &exit);
-
- // Arguments adaptor case: Read the arguments length from the
- // adaptor frame.
- __ mov(eax, Operand(ebx, ArgumentsAdaptorFrameConstants::kLengthOffset));
-
- __ bind(&exit);
- __ AssertSmi(eax);
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
- Label done, null, function, non_function_constructor;
-
- VisitForAccumulatorValue(args->at(0));
-
- // If the object is a smi, we return null.
- __ JumpIfSmi(eax, &null);
-
- // Check that the object is a JS object but take special care of JS
- // functions to make sure they have 'Function' as their class.
- // Assume that there are only two callable types, and one of them is at
- // either end of the type range for JS object types. Saves extra comparisons.
- STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
- __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, eax);
- // Map is now in eax.
- __ j(below, &null);
- STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
- FIRST_SPEC_OBJECT_TYPE + 1);
- __ j(equal, &function);
-
- __ CmpInstanceType(eax, LAST_SPEC_OBJECT_TYPE);
- STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
- LAST_SPEC_OBJECT_TYPE - 1);
- __ j(equal, &function);
- // Assume that there is no larger type.
- STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == LAST_TYPE - 1);
-
- // Check if the constructor in the map is a JS function.
- __ mov(eax, FieldOperand(eax, Map::kConstructorOffset));
- __ CmpObjectType(eax, JS_FUNCTION_TYPE, ebx);
- __ j(not_equal, &non_function_constructor);
-
- // eax now contains the constructor function. Grab the
- // instance class name from there.
- __ mov(eax, FieldOperand(eax, JSFunction::kSharedFunctionInfoOffset));
- __ mov(eax, FieldOperand(eax, SharedFunctionInfo::kInstanceClassNameOffset));
- __ jmp(&done);
-
- // Functions have class 'Function'.
- __ bind(&function);
- __ mov(eax, isolate()->factory()->function_class_string());
- __ jmp(&done);
-
- // Objects with a non-function constructor have class 'Object'.
- __ bind(&non_function_constructor);
- __ mov(eax, isolate()->factory()->Object_string());
- __ jmp(&done);
-
- // Non-JS objects have class null.
- __ bind(&null);
- __ mov(eax, isolate()->factory()->null_value());
-
- // All done.
- __ bind(&done);
-
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitLog(CallRuntime* expr) {
- // Conditionally generate a log call.
- // Args:
- // 0 (literal string): The type of logging (corresponds to the flags).
- // This is used to determine whether or not to generate the log call.
- // 1 (string): Format string. Access the string at argument index 2
- // with '%2s' (see Logger::LogRuntime for all the formats).
- // 2 (array): Arguments to the format string.
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT_EQ(args->length(), 3);
- if (CodeGenerator::ShouldGenerateLog(args->at(0))) {
- VisitForStackValue(args->at(1));
- VisitForStackValue(args->at(2));
- __ CallRuntime(Runtime::kLog, 2);
- }
- // Finally, we're expected to leave a value on the top of the stack.
- __ mov(eax, isolate()->factory()->undefined_value());
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitRandomHeapNumber(CallRuntime* expr) {
- ASSERT(expr->arguments()->length() == 0);
-
- Label slow_allocate_heapnumber;
- Label heapnumber_allocated;
-
- __ AllocateHeapNumber(edi, ebx, ecx, &slow_allocate_heapnumber);
- __ jmp(&heapnumber_allocated);
-
- __ bind(&slow_allocate_heapnumber);
- // Allocate a heap number.
- __ CallRuntime(Runtime::kNumberAlloc, 0);
- __ mov(edi, eax);
-
- __ bind(&heapnumber_allocated);
-
- __ PrepareCallCFunction(1, ebx);
- __ mov(eax, ContextOperand(context_register(), Context::GLOBAL_OBJECT_INDEX));
- __ mov(eax, FieldOperand(eax, GlobalObject::kNativeContextOffset));
- __ mov(Operand(esp, 0), eax);
- __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
-
- // Convert 32 random bits in eax to 0.(32 random bits) in a double
- // by computing:
- // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
- // This is implemented on both SSE2 and FPU.
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope fscope(SSE2);
- __ mov(ebx, Immediate(0x49800000)); // 1.0 x 2^20 as single.
- __ movd(xmm1, ebx);
- __ movd(xmm0, eax);
- __ cvtss2sd(xmm1, xmm1);
- __ xorps(xmm0, xmm1);
- __ subsd(xmm0, xmm1);
- __ movdbl(FieldOperand(edi, HeapNumber::kValueOffset), xmm0);
- } else {
- // 0x4130000000000000 is 1.0 x 2^20 as a double.
- __ mov(FieldOperand(edi, HeapNumber::kExponentOffset),
- Immediate(0x41300000));
- __ mov(FieldOperand(edi, HeapNumber::kMantissaOffset), eax);
- __ fld_d(FieldOperand(edi, HeapNumber::kValueOffset));
- __ mov(FieldOperand(edi, HeapNumber::kMantissaOffset), Immediate(0));
- __ fld_d(FieldOperand(edi, HeapNumber::kValueOffset));
- __ fsubp(1);
- __ fstp_d(FieldOperand(edi, HeapNumber::kValueOffset));
- }
- __ mov(eax, edi);
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitSubString(CallRuntime* expr) {
- // Load the arguments on the stack and call the stub.
- SubStringStub stub;
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 3);
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
- VisitForStackValue(args->at(2));
- __ CallStub(&stub);
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitRegExpExec(CallRuntime* expr) {
- // Load the arguments on the stack and call the stub.
- RegExpExecStub stub;
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 4);
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
- VisitForStackValue(args->at(2));
- VisitForStackValue(args->at(3));
- __ CallStub(&stub);
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitValueOf(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0)); // Load the object.
-
- Label done;
- // If the object is a smi return the object.
- __ JumpIfSmi(eax, &done, Label::kNear);
- // If the object is not a value type, return the object.
- __ CmpObjectType(eax, JS_VALUE_TYPE, ebx);
- __ j(not_equal, &done, Label::kNear);
- __ mov(eax, FieldOperand(eax, JSValue::kValueOffset));
-
- __ bind(&done);
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 2);
- ASSERT_NE(NULL, args->at(1)->AsLiteral());
- Smi* index = Smi::cast(*(args->at(1)->AsLiteral()->handle()));
-
- VisitForAccumulatorValue(args->at(0)); // Load the object.
-
- Label runtime, done, not_date_object;
- Register object = eax;
- Register result = eax;
- Register scratch = ecx;
-
- __ JumpIfSmi(object, &not_date_object);
- __ CmpObjectType(object, JS_DATE_TYPE, scratch);
- __ j(not_equal, &not_date_object);
-
- if (index->value() == 0) {
- __ mov(result, FieldOperand(object, JSDate::kValueOffset));
- __ jmp(&done);
- } else {
- if (index->value() < JSDate::kFirstUncachedField) {
- ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
- __ mov(scratch, Operand::StaticVariable(stamp));
- __ cmp(scratch, FieldOperand(object, JSDate::kCacheStampOffset));
- __ j(not_equal, &runtime, Label::kNear);
- __ mov(result, FieldOperand(object, JSDate::kValueOffset +
- kPointerSize * index->value()));
- __ jmp(&done);
- }
- __ bind(&runtime);
- __ PrepareCallCFunction(2, scratch);
- __ mov(Operand(esp, 0), object);
- __ mov(Operand(esp, 1 * kPointerSize), Immediate(index));
- __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
- __ jmp(&done);
- }
-
- __ bind(&not_date_object);
- __ CallRuntime(Runtime::kThrowNotDateError, 0);
- __ bind(&done);
- context()->Plug(result);
-}
-
-
-void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT_EQ(3, args->length());
-
- VisitForStackValue(args->at(1)); // index
- VisitForStackValue(args->at(2)); // value
- __ pop(ecx);
- __ pop(ebx);
- VisitForAccumulatorValue(args->at(0)); // string
-
- static const String::Encoding encoding = String::ONE_BYTE_ENCODING;
- SeqStringSetCharGenerator::Generate(masm_, encoding, eax, ebx, ecx);
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT_EQ(3, args->length());
-
- VisitForStackValue(args->at(1)); // index
- VisitForStackValue(args->at(2)); // value
- __ pop(ecx);
- __ pop(ebx);
- VisitForAccumulatorValue(args->at(0)); // string
-
- static const String::Encoding encoding = String::TWO_BYTE_ENCODING;
- SeqStringSetCharGenerator::Generate(masm_, encoding, eax, ebx, ecx);
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
- // Load the arguments on the stack and call the runtime function.
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 2);
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
-
- if (CpuFeatures::IsSupported(SSE2)) {
- MathPowStub stub(MathPowStub::ON_STACK);
- __ CallStub(&stub);
- } else {
- __ CallRuntime(Runtime::kMath_pow, 2);
- }
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitSetValueOf(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 2);
-
- VisitForStackValue(args->at(0)); // Load the object.
- VisitForAccumulatorValue(args->at(1)); // Load the value.
- __ pop(ebx); // eax = value. ebx = object.
-
- Label done;
- // If the object is a smi, return the value.
- __ JumpIfSmi(ebx, &done, Label::kNear);
-
- // If the object is not a value type, return the value.
- __ CmpObjectType(ebx, JS_VALUE_TYPE, ecx);
- __ j(not_equal, &done, Label::kNear);
-
- // Store the value.
- __ mov(FieldOperand(ebx, JSValue::kValueOffset), eax);
-
- // Update the write barrier. Save the value as it will be
- // overwritten by the write barrier code and is needed afterward.
- __ mov(edx, eax);
- __ RecordWriteField(ebx, JSValue::kValueOffset, edx, ecx, kDontSaveFPRegs);
-
- __ bind(&done);
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT_EQ(args->length(), 1);
-
- // Load the argument on the stack and call the stub.
- VisitForStackValue(args->at(0));
-
- NumberToStringStub stub;
- __ CallStub(&stub);
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label done;
- StringCharFromCodeGenerator generator(eax, ebx);
- generator.GenerateFast(masm_);
- __ jmp(&done);
-
- NopRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm_, call_helper);
-
- __ bind(&done);
- context()->Plug(ebx);
-}
-
-
-void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 2);
-
- VisitForStackValue(args->at(0));
- VisitForAccumulatorValue(args->at(1));
-
- Register object = ebx;
- Register index = eax;
- Register result = edx;
-
- __ pop(object);
-
- Label need_conversion;
- Label index_out_of_range;
- Label done;
- StringCharCodeAtGenerator generator(object,
- index,
- result,
- &need_conversion,
- &need_conversion,
- &index_out_of_range,
- STRING_INDEX_IS_NUMBER);
- generator.GenerateFast(masm_);
- __ jmp(&done);
-
- __ bind(&index_out_of_range);
- // When the index is out of range, the spec requires us to return
- // NaN.
- __ Set(result, Immediate(isolate()->factory()->nan_value()));
- __ jmp(&done);
-
- __ bind(&need_conversion);
- // Move the undefined value into the result register, which will
- // trigger conversion.
- __ Set(result, Immediate(isolate()->factory()->undefined_value()));
- __ jmp(&done);
-
- NopRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm_, call_helper);
-
- __ bind(&done);
- context()->Plug(result);
-}
-
-
-void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 2);
-
- VisitForStackValue(args->at(0));
- VisitForAccumulatorValue(args->at(1));
-
- Register object = ebx;
- Register index = eax;
- Register scratch = edx;
- Register result = eax;
-
- __ pop(object);
-
- Label need_conversion;
- Label index_out_of_range;
- Label done;
- StringCharAtGenerator generator(object,
- index,
- scratch,
- result,
- &need_conversion,
- &need_conversion,
- &index_out_of_range,
- STRING_INDEX_IS_NUMBER);
- generator.GenerateFast(masm_);
- __ jmp(&done);
-
- __ bind(&index_out_of_range);
- // When the index is out of range, the spec requires us to return
- // the empty string.
- __ Set(result, Immediate(isolate()->factory()->empty_string()));
- __ jmp(&done);
-
- __ bind(&need_conversion);
- // Move smi zero into the result register, which will trigger
- // conversion.
- __ Set(result, Immediate(Smi::FromInt(0)));
- __ jmp(&done);
-
- NopRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm_, call_helper);
-
- __ bind(&done);
- context()->Plug(result);
-}
-
-
-void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT_EQ(2, args->length());
-
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
-
- StringAddStub stub(NO_STRING_ADD_FLAGS);
- __ CallStub(&stub);
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitStringCompare(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT_EQ(2, args->length());
-
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
-
- StringCompareStub stub;
- __ CallStub(&stub);
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitMathSin(CallRuntime* expr) {
- // Load the argument on the stack and call the stub.
- TranscendentalCacheStub stub(TranscendentalCache::SIN,
- TranscendentalCacheStub::TAGGED);
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
- VisitForStackValue(args->at(0));
- __ CallStub(&stub);
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitMathCos(CallRuntime* expr) {
- // Load the argument on the stack and call the stub.
- TranscendentalCacheStub stub(TranscendentalCache::COS,
- TranscendentalCacheStub::TAGGED);
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
- VisitForStackValue(args->at(0));
- __ CallStub(&stub);
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitMathTan(CallRuntime* expr) {
- // Load the argument on the stack and call the stub.
- TranscendentalCacheStub stub(TranscendentalCache::TAN,
- TranscendentalCacheStub::TAGGED);
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
- VisitForStackValue(args->at(0));
- __ CallStub(&stub);
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitMathLog(CallRuntime* expr) {
- // Load the argument on the stack and call the stub.
- TranscendentalCacheStub stub(TranscendentalCache::LOG,
- TranscendentalCacheStub::TAGGED);
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
- VisitForStackValue(args->at(0));
- __ CallStub(&stub);
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitMathSqrt(CallRuntime* expr) {
- // Load the argument on the stack and call the runtime function.
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
- VisitForStackValue(args->at(0));
- __ CallRuntime(Runtime::kMath_sqrt, 1);
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() >= 2);
-
- int arg_count = args->length() - 2; // 2 ~ receiver and function.
- for (int i = 0; i < arg_count + 1; ++i) {
- VisitForStackValue(args->at(i));
- }
- VisitForAccumulatorValue(args->last()); // Function.
-
- Label runtime, done;
- // Check for non-function argument (including proxy).
- __ JumpIfSmi(eax, &runtime);
- __ CmpObjectType(eax, JS_FUNCTION_TYPE, ebx);
- __ j(not_equal, &runtime);
-
- // InvokeFunction requires the function in edi. Move it in there.
- __ mov(edi, result_register());
- ParameterCount count(arg_count);
- __ InvokeFunction(edi, count, CALL_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- __ jmp(&done);
-
- __ bind(&runtime);
- __ push(eax);
- __ CallRuntime(Runtime::kCall, args->length());
- __ bind(&done);
-
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
- // Load the arguments on the stack and call the stub.
- RegExpConstructResultStub stub;
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 3);
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
- VisitForStackValue(args->at(2));
- __ CallStub(&stub);
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT_EQ(2, args->length());
-
- ASSERT_NE(NULL, args->at(0)->AsLiteral());
- int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->handle()))->value();
-
- Handle<FixedArray> jsfunction_result_caches(
- isolate()->native_context()->jsfunction_result_caches());
- if (jsfunction_result_caches->length() <= cache_id) {
- __ Abort("Attempt to use undefined cache.");
- __ mov(eax, isolate()->factory()->undefined_value());
- context()->Plug(eax);
- return;
- }
-
- VisitForAccumulatorValue(args->at(1));
-
- Register key = eax;
- Register cache = ebx;
- Register tmp = ecx;
- __ mov(cache, ContextOperand(esi, Context::GLOBAL_OBJECT_INDEX));
- __ mov(cache,
- FieldOperand(cache, GlobalObject::kNativeContextOffset));
- __ mov(cache, ContextOperand(cache, Context::JSFUNCTION_RESULT_CACHES_INDEX));
- __ mov(cache,
- FieldOperand(cache, FixedArray::OffsetOfElementAt(cache_id)));
-
- Label done, not_found;
- // tmp now holds finger offset as a smi.
- STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
- __ mov(tmp, FieldOperand(cache, JSFunctionResultCache::kFingerOffset));
- __ cmp(key, CodeGenerator::FixedArrayElementOperand(cache, tmp));
- __ j(not_equal, &not_found);
-
- __ mov(eax, CodeGenerator::FixedArrayElementOperand(cache, tmp, 1));
- __ jmp(&done);
-
- __ bind(&not_found);
- // Call runtime to perform the lookup.
- __ push(cache);
- __ push(key);
- __ CallRuntime(Runtime::kGetFromCache, 2);
-
- __ bind(&done);
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitIsRegExpEquivalent(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT_EQ(2, args->length());
-
- Register right = eax;
- Register left = ebx;
- Register tmp = ecx;
-
- VisitForStackValue(args->at(0));
- VisitForAccumulatorValue(args->at(1));
- __ pop(left);
-
- Label done, fail, ok;
- __ cmp(left, right);
- __ j(equal, &ok);
- // Fail if either is a non-HeapObject.
- __ mov(tmp, left);
- __ and_(tmp, right);
- __ JumpIfSmi(tmp, &fail);
- __ mov(tmp, FieldOperand(left, HeapObject::kMapOffset));
- __ CmpInstanceType(tmp, JS_REGEXP_TYPE);
- __ j(not_equal, &fail);
- __ cmp(tmp, FieldOperand(right, HeapObject::kMapOffset));
- __ j(not_equal, &fail);
- __ mov(tmp, FieldOperand(left, JSRegExp::kDataOffset));
- __ cmp(tmp, FieldOperand(right, JSRegExp::kDataOffset));
- __ j(equal, &ok);
- __ bind(&fail);
- __ mov(eax, Immediate(isolate()->factory()->false_value()));
- __ jmp(&done);
- __ bind(&ok);
- __ mov(eax, Immediate(isolate()->factory()->true_value()));
- __ bind(&done);
-
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- __ AssertString(eax);
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ test(FieldOperand(eax, String::kHashFieldOffset),
- Immediate(String::kContainsCachedArrayIndexMask));
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(zero, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
- VisitForAccumulatorValue(args->at(0));
-
- __ AssertString(eax);
-
- __ mov(eax, FieldOperand(eax, String::kHashFieldOffset));
- __ IndexFromHash(eax, eax);
-
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
- Label bailout, done, one_char_separator, long_separator,
- non_trivial_array, not_size_one_array, loop,
- loop_1, loop_1_condition, loop_2, loop_2_entry, loop_3, loop_3_entry;
-
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 2);
- // We will leave the separator on the stack until the end of the function.
- VisitForStackValue(args->at(1));
- // Load this to eax (= array)
- VisitForAccumulatorValue(args->at(0));
- // All aliases of the same register have disjoint lifetimes.
- Register array = eax;
- Register elements = no_reg; // Will be eax.
-
- Register index = edx;
-
- Register string_length = ecx;
-
- Register string = esi;
-
- Register scratch = ebx;
-
- Register array_length = edi;
- Register result_pos = no_reg; // Will be edi.
-
- // Separator operand is already pushed.
- Operand separator_operand = Operand(esp, 2 * kPointerSize);
- Operand result_operand = Operand(esp, 1 * kPointerSize);
- Operand array_length_operand = Operand(esp, 0);
- __ sub(esp, Immediate(2 * kPointerSize));
- __ cld();
- // Check that the array is a JSArray
- __ JumpIfSmi(array, &bailout);
- __ CmpObjectType(array, JS_ARRAY_TYPE, scratch);
- __ j(not_equal, &bailout);
-
- // Check that the array has fast elements.
- __ CheckFastElements(scratch, &bailout);
-
- // If the array has length zero, return the empty string.
- __ mov(array_length, FieldOperand(array, JSArray::kLengthOffset));
- __ SmiUntag(array_length);
- __ j(not_zero, &non_trivial_array);
- __ mov(result_operand, isolate()->factory()->empty_string());
- __ jmp(&done);
-
- // Save the array length.
- __ bind(&non_trivial_array);
- __ mov(array_length_operand, array_length);
-
- // Save the FixedArray containing array's elements.
- // End of array's live range.
- elements = array;
- __ mov(elements, FieldOperand(array, JSArray::kElementsOffset));
- array = no_reg;
-
-
- // Check that all array elements are sequential ASCII strings, and
- // accumulate the sum of their lengths, as a smi-encoded value.
- __ Set(index, Immediate(0));
- __ Set(string_length, Immediate(0));
- // Loop condition: while (index < length).
- // Live loop registers: index, array_length, string,
- // scratch, string_length, elements.
- if (generate_debug_code_) {
- __ cmp(index, array_length);
- __ Assert(less, "No empty arrays here in EmitFastAsciiArrayJoin");
- }
- __ bind(&loop);
- __ mov(string, FieldOperand(elements,
- index,
- times_pointer_size,
- FixedArray::kHeaderSize));
- __ JumpIfSmi(string, &bailout);
- __ mov(scratch, FieldOperand(string, HeapObject::kMapOffset));
- __ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
- __ and_(scratch, Immediate(
- kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask));
- __ cmp(scratch, kStringTag | kOneByteStringTag | kSeqStringTag);
- __ j(not_equal, &bailout);
- __ add(string_length,
- FieldOperand(string, SeqOneByteString::kLengthOffset));
- __ j(overflow, &bailout);
- __ add(index, Immediate(1));
- __ cmp(index, array_length);
- __ j(less, &loop);
-
- // If array_length is 1, return elements[0], a string.
- __ cmp(array_length, 1);
- __ j(not_equal, &not_size_one_array);
- __ mov(scratch, FieldOperand(elements, FixedArray::kHeaderSize));
- __ mov(result_operand, scratch);
- __ jmp(&done);
-
- __ bind(&not_size_one_array);
-
- // End of array_length live range.
- result_pos = array_length;
- array_length = no_reg;
-
- // Live registers:
- // string_length: Sum of string lengths, as a smi.
- // elements: FixedArray of strings.
-
- // Check that the separator is a flat ASCII string.
- __ mov(string, separator_operand);
- __ JumpIfSmi(string, &bailout);
- __ mov(scratch, FieldOperand(string, HeapObject::kMapOffset));
- __ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
- __ and_(scratch, Immediate(
- kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask));
- __ cmp(scratch, ASCII_STRING_TYPE);
- __ j(not_equal, &bailout);
-
- // Add (separator length times array_length) - separator length
- // to string_length.
- __ mov(scratch, separator_operand);
- __ mov(scratch, FieldOperand(scratch, SeqOneByteString::kLengthOffset));
- __ sub(string_length, scratch); // May be negative, temporarily.
- __ imul(scratch, array_length_operand);
- __ j(overflow, &bailout);
- __ add(string_length, scratch);
- __ j(overflow, &bailout);
-
- __ shr(string_length, 1);
- // Live registers and stack values:
- // string_length
- // elements
- __ AllocateAsciiString(result_pos, string_length, scratch,
- index, string, &bailout);
- __ mov(result_operand, result_pos);
- __ lea(result_pos, FieldOperand(result_pos, SeqOneByteString::kHeaderSize));
-
-
- __ mov(string, separator_operand);
- __ cmp(FieldOperand(string, SeqOneByteString::kLengthOffset),
- Immediate(Smi::FromInt(1)));
- __ j(equal, &one_char_separator);
- __ j(greater, &long_separator);
-
-
- // Empty separator case
- __ mov(index, Immediate(0));
- __ jmp(&loop_1_condition);
- // Loop condition: while (index < length).
- __ bind(&loop_1);
- // Each iteration of the loop concatenates one string to the result.
- // Live values in registers:
- // index: which element of the elements array we are adding to the result.
- // result_pos: the position to which we are currently copying characters.
- // elements: the FixedArray of strings we are joining.
-
- // Get string = array[index].
- __ mov(string, FieldOperand(elements, index,
- times_pointer_size,
- FixedArray::kHeaderSize));
- __ mov(string_length,
- FieldOperand(string, String::kLengthOffset));
- __ shr(string_length, 1);
- __ lea(string,
- FieldOperand(string, SeqOneByteString::kHeaderSize));
- __ CopyBytes(string, result_pos, string_length, scratch);
- __ add(index, Immediate(1));
- __ bind(&loop_1_condition);
- __ cmp(index, array_length_operand);
- __ j(less, &loop_1); // End while (index < length).
- __ jmp(&done);
-
-
-
- // One-character separator case
- __ bind(&one_char_separator);
- // Replace separator with its ASCII character value.
- __ mov_b(scratch, FieldOperand(string, SeqOneByteString::kHeaderSize));
- __ mov_b(separator_operand, scratch);
-
- __ Set(index, Immediate(0));
- // Jump into the loop after the code that copies the separator, so the first
- // element is not preceded by a separator
- __ jmp(&loop_2_entry);
- // Loop condition: while (index < length).
- __ bind(&loop_2);
- // Each iteration of the loop concatenates one string to the result.
- // Live values in registers:
- // index: which element of the elements array we are adding to the result.
- // result_pos: the position to which we are currently copying characters.
-
- // Copy the separator character to the result.
- __ mov_b(scratch, separator_operand);
- __ mov_b(Operand(result_pos, 0), scratch);
- __ inc(result_pos);
-
- __ bind(&loop_2_entry);
- // Get string = array[index].
- __ mov(string, FieldOperand(elements, index,
- times_pointer_size,
- FixedArray::kHeaderSize));
- __ mov(string_length,
- FieldOperand(string, String::kLengthOffset));
- __ shr(string_length, 1);
- __ lea(string,
- FieldOperand(string, SeqOneByteString::kHeaderSize));
- __ CopyBytes(string, result_pos, string_length, scratch);
- __ add(index, Immediate(1));
-
- __ cmp(index, array_length_operand);
- __ j(less, &loop_2); // End while (index < length).
- __ jmp(&done);
-
-
- // Long separator case (separator is more than one character).
- __ bind(&long_separator);
-
- __ Set(index, Immediate(0));
- // Jump into the loop after the code that copies the separator, so the first
- // element is not preceded by a separator
- __ jmp(&loop_3_entry);
- // Loop condition: while (index < length).
- __ bind(&loop_3);
- // Each iteration of the loop concatenates one string to the result.
- // Live values in registers:
- // index: which element of the elements array we are adding to the result.
- // result_pos: the position to which we are currently copying characters.
-
- // Copy the separator to the result.
- __ mov(string, separator_operand);
- __ mov(string_length,
- FieldOperand(string, String::kLengthOffset));
- __ shr(string_length, 1);
- __ lea(string,
- FieldOperand(string, SeqOneByteString::kHeaderSize));
- __ CopyBytes(string, result_pos, string_length, scratch);
-
- __ bind(&loop_3_entry);
- // Get string = array[index].
- __ mov(string, FieldOperand(elements, index,
- times_pointer_size,
- FixedArray::kHeaderSize));
- __ mov(string_length,
- FieldOperand(string, String::kLengthOffset));
- __ shr(string_length, 1);
- __ lea(string,
- FieldOperand(string, SeqOneByteString::kHeaderSize));
- __ CopyBytes(string, result_pos, string_length, scratch);
- __ add(index, Immediate(1));
-
- __ cmp(index, array_length_operand);
- __ j(less, &loop_3); // End while (index < length).
- __ jmp(&done);
-
-
- __ bind(&bailout);
- __ mov(result_operand, isolate()->factory()->undefined_value());
- __ bind(&done);
- __ mov(eax, result_operand);
- // Drop temp values from the stack, and restore context register.
- __ add(esp, Immediate(3 * kPointerSize));
-
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
- Handle<String> name = expr->name();
- if (name->length() > 0 && name->Get(0) == '_') {
- Comment cmnt(masm_, "[ InlineRuntimeCall");
- EmitInlineRuntimeCall(expr);
- return;
- }
-
- Comment cmnt(masm_, "[ CallRuntime");
- ZoneList<Expression*>* args = expr->arguments();
-
- if (expr->is_jsruntime()) {
- // Prepare for calling JS runtime function.
- __ mov(eax, GlobalObjectOperand());
- __ push(FieldOperand(eax, GlobalObject::kBuiltinsOffset));
- }
-
- // Push the arguments ("left-to-right").
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
-
- if (expr->is_jsruntime()) {
- // Call the JS runtime function via a call IC.
- __ Set(ecx, Immediate(expr->name()));
- RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
- Handle<Code> ic =
- isolate()->stub_cache()->ComputeCallInitialize(arg_count, mode);
- CallIC(ic, mode, expr->CallRuntimeFeedbackId());
- // Restore context register.
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- } else {
- // Call the C runtime function.
- __ CallRuntime(expr->function(), arg_count);
- }
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
- switch (expr->op()) {
- case Token::DELETE: {
- Comment cmnt(masm_, "[ UnaryOperation (DELETE)");
- Property* property = expr->expression()->AsProperty();
- VariableProxy* proxy = expr->expression()->AsVariableProxy();
-
- if (property != NULL) {
- VisitForStackValue(property->obj());
- VisitForStackValue(property->key());
- StrictModeFlag strict_mode_flag = (language_mode() == CLASSIC_MODE)
- ? kNonStrictMode : kStrictMode;
- __ push(Immediate(Smi::FromInt(strict_mode_flag)));
- __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
- context()->Plug(eax);
- } else if (proxy != NULL) {
- Variable* var = proxy->var();
- // Delete of an unqualified identifier is disallowed in strict mode
- // but "delete this" is allowed.
- ASSERT(language_mode() == CLASSIC_MODE || var->is_this());
- if (var->IsUnallocated()) {
- __ push(var->is_qml_global()
- ? QmlGlobalObjectOperand()
- : GlobalObjectOperand());
- __ push(Immediate(var->name()));
- __ push(Immediate(Smi::FromInt(kNonStrictMode)));
- __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
- context()->Plug(eax);
- } else if (var->IsStackAllocated() || var->IsContextSlot()) {
- // Result of deleting non-global variables is false. 'this' is
- // not really a variable, though we implement it as one. The
- // subexpression does not have side effects.
- context()->Plug(var->is_this());
- } else {
- // Non-global variable. Call the runtime to try to delete from the
- // context where the variable was introduced.
- __ push(context_register());
- __ push(Immediate(var->name()));
- __ CallRuntime(Runtime::kDeleteContextSlot, 2);
- context()->Plug(eax);
- }
- } else {
- // Result of deleting non-property, non-variable reference is true.
- // The subexpression may have side effects.
- VisitForEffect(expr->expression());
- context()->Plug(true);
- }
- break;
- }
-
- case Token::VOID: {
- Comment cmnt(masm_, "[ UnaryOperation (VOID)");
- VisitForEffect(expr->expression());
- context()->Plug(isolate()->factory()->undefined_value());
- break;
- }
-
- case Token::NOT: {
- Comment cmnt(masm_, "[ UnaryOperation (NOT)");
- if (context()->IsEffect()) {
- // Unary NOT has no side effects so it's only necessary to visit the
- // subexpression. Match the optimizing compiler by not branching.
- VisitForEffect(expr->expression());
- } else if (context()->IsTest()) {
- const TestContext* test = TestContext::cast(context());
- // The labels are swapped for the recursive call.
- VisitForControl(expr->expression(),
- test->false_label(),
- test->true_label(),
- test->fall_through());
- context()->Plug(test->true_label(), test->false_label());
- } else {
- // We handle value contexts explicitly rather than simply visiting
- // for control and plugging the control flow into the context,
- // because we need to prepare a pair of extra administrative AST ids
- // for the optimizing compiler.
- ASSERT(context()->IsAccumulatorValue() || context()->IsStackValue());
- Label materialize_true, materialize_false, done;
- VisitForControl(expr->expression(),
- &materialize_false,
- &materialize_true,
- &materialize_true);
- __ bind(&materialize_true);
- PrepareForBailoutForId(expr->MaterializeTrueId(), NO_REGISTERS);
- if (context()->IsAccumulatorValue()) {
- __ mov(eax, isolate()->factory()->true_value());
- } else {
- __ Push(isolate()->factory()->true_value());
- }
- __ jmp(&done, Label::kNear);
- __ bind(&materialize_false);
- PrepareForBailoutForId(expr->MaterializeFalseId(), NO_REGISTERS);
- if (context()->IsAccumulatorValue()) {
- __ mov(eax, isolate()->factory()->false_value());
- } else {
- __ Push(isolate()->factory()->false_value());
- }
- __ bind(&done);
- }
- break;
- }
-
- case Token::TYPEOF: {
- Comment cmnt(masm_, "[ UnaryOperation (TYPEOF)");
- { StackValueContext context(this);
- VisitForTypeofValue(expr->expression());
- }
- __ CallRuntime(Runtime::kTypeof, 1);
- context()->Plug(eax);
- break;
- }
-
- case Token::ADD: {
- Comment cmt(masm_, "[ UnaryOperation (ADD)");
- VisitForAccumulatorValue(expr->expression());
- Label no_conversion;
- __ JumpIfSmi(result_register(), &no_conversion);
- ToNumberStub convert_stub;
- __ CallStub(&convert_stub);
- __ bind(&no_conversion);
- context()->Plug(result_register());
- break;
- }
-
- case Token::SUB:
- EmitUnaryOperation(expr, "[ UnaryOperation (SUB)");
- break;
-
- case Token::BIT_NOT:
- EmitUnaryOperation(expr, "[ UnaryOperation (BIT_NOT)");
- break;
-
- default:
- UNREACHABLE();
- }
-}
-
-
-void FullCodeGenerator::EmitUnaryOperation(UnaryOperation* expr,
- const char* comment) {
- Comment cmt(masm_, comment);
- bool can_overwrite = expr->expression()->ResultOverwriteAllowed();
- UnaryOverwriteMode overwrite =
- can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
- UnaryOpStub stub(expr->op(), overwrite);
- // UnaryOpStub expects the argument to be in the
- // accumulator register eax.
- VisitForAccumulatorValue(expr->expression());
- SetSourcePosition(expr->position());
- CallIC(stub.GetCode(isolate()), RelocInfo::CODE_TARGET,
- expr->UnaryOperationFeedbackId());
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
- Comment cmnt(masm_, "[ CountOperation");
- SetSourcePosition(expr->position());
-
- // Invalid left-hand sides are rewritten to have a 'throw ReferenceError'
- // as the left-hand side.
- if (!expr->expression()->IsValidLeftHandSide()) {
- VisitForEffect(expr->expression());
- return;
- }
-
- // Expression can only be a property, a global or a (parameter or local)
- // slot.
- enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
- LhsKind assign_type = VARIABLE;
- Property* prop = expr->expression()->AsProperty();
- // In case of a property we use the uninitialized expression context
- // of the key to detect a named property.
- if (prop != NULL) {
- assign_type =
- (prop->key()->IsPropertyName()) ? NAMED_PROPERTY : KEYED_PROPERTY;
- }
-
- // Evaluate expression and get value.
- if (assign_type == VARIABLE) {
- ASSERT(expr->expression()->AsVariableProxy()->var() != NULL);
- AccumulatorValueContext context(this);
- EmitVariableLoad(expr->expression()->AsVariableProxy());
- } else {
- // Reserve space for result of postfix operation.
- if (expr->is_postfix() && !context()->IsEffect()) {
- __ push(Immediate(Smi::FromInt(0)));
- }
- if (assign_type == NAMED_PROPERTY) {
- // Put the object both on the stack and in edx.
- VisitForAccumulatorValue(prop->obj());
- __ push(eax);
- __ mov(edx, eax);
- EmitNamedPropertyLoad(prop);
- } else {
- VisitForStackValue(prop->obj());
- VisitForStackValue(prop->key());
- __ mov(edx, Operand(esp, kPointerSize)); // Object.
- __ mov(ecx, Operand(esp, 0)); // Key.
- EmitKeyedPropertyLoad(prop);
- }
- }
-
- // We need a second deoptimization point after loading the value
- // in case evaluating the property load my have a side effect.
- if (assign_type == VARIABLE) {
- PrepareForBailout(expr->expression(), TOS_REG);
- } else {
- PrepareForBailoutForId(prop->LoadId(), TOS_REG);
- }
-
- // Call ToNumber only if operand is not a smi.
- Label no_conversion;
- if (ShouldInlineSmiCase(expr->op())) {
- __ JumpIfSmi(eax, &no_conversion, Label::kNear);
- }
- ToNumberStub convert_stub;
- __ CallStub(&convert_stub);
- __ bind(&no_conversion);
-
- // Save result for postfix expressions.
- if (expr->is_postfix()) {
- if (!context()->IsEffect()) {
- // Save the result on the stack. If we have a named or keyed property
- // we store the result under the receiver that is currently on top
- // of the stack.
- switch (assign_type) {
- case VARIABLE:
- __ push(eax);
- break;
- case NAMED_PROPERTY:
- __ mov(Operand(esp, kPointerSize), eax);
- break;
- case KEYED_PROPERTY:
- __ mov(Operand(esp, 2 * kPointerSize), eax);
- break;
- }
- }
- }
-
- // Inline smi case if we are in a loop.
- Label done, stub_call;
- JumpPatchSite patch_site(masm_);
-
- if (ShouldInlineSmiCase(expr->op())) {
- if (expr->op() == Token::INC) {
- __ add(eax, Immediate(Smi::FromInt(1)));
- } else {
- __ sub(eax, Immediate(Smi::FromInt(1)));
- }
- __ j(overflow, &stub_call, Label::kNear);
- // We could eliminate this smi check if we split the code at
- // the first smi check before calling ToNumber.
- patch_site.EmitJumpIfSmi(eax, &done, Label::kNear);
-
- __ bind(&stub_call);
- // Call stub. Undo operation first.
- if (expr->op() == Token::INC) {
- __ sub(eax, Immediate(Smi::FromInt(1)));
- } else {
- __ add(eax, Immediate(Smi::FromInt(1)));
- }
- }
-
- // Record position before stub call.
- SetSourcePosition(expr->position());
-
- // Call stub for +1/-1.
- __ mov(edx, eax);
- __ mov(eax, Immediate(Smi::FromInt(1)));
- BinaryOpStub stub(expr->binary_op(), NO_OVERWRITE);
- CallIC(stub.GetCode(isolate()),
- RelocInfo::CODE_TARGET,
- expr->CountBinOpFeedbackId());
- patch_site.EmitPatchInfo();
- __ bind(&done);
-
- // Store the value returned in eax.
- switch (assign_type) {
- case VARIABLE:
- if (expr->is_postfix()) {
- // Perform the assignment as if via '='.
- { EffectContext context(this);
- EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
- Token::ASSIGN);
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
- context.Plug(eax);
- }
- // For all contexts except EffectContext We have the result on
- // top of the stack.
- if (!context()->IsEffect()) {
- context()->PlugTOS();
- }
- } else {
- // Perform the assignment as if via '='.
- EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
- Token::ASSIGN);
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
- context()->Plug(eax);
- }
- break;
- case NAMED_PROPERTY: {
- __ mov(ecx, prop->key()->AsLiteral()->handle());
- __ pop(edx);
- Handle<Code> ic = is_classic_mode()
- ? isolate()->builtins()->StoreIC_Initialize()
- : isolate()->builtins()->StoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, expr->CountStoreFeedbackId());
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
- if (expr->is_postfix()) {
- if (!context()->IsEffect()) {
- context()->PlugTOS();
- }
- } else {
- context()->Plug(eax);
- }
- break;
- }
- case KEYED_PROPERTY: {
- __ pop(ecx);
- __ pop(edx);
- Handle<Code> ic = is_classic_mode()
- ? isolate()->builtins()->KeyedStoreIC_Initialize()
- : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, expr->CountStoreFeedbackId());
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
- if (expr->is_postfix()) {
- // Result is on the stack
- if (!context()->IsEffect()) {
- context()->PlugTOS();
- }
- } else {
- context()->Plug(eax);
- }
- break;
- }
- }
-}
-
-
-void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
- VariableProxy* proxy = expr->AsVariableProxy();
- ASSERT(!context()->IsEffect());
- ASSERT(!context()->IsTest());
-
- if (proxy != NULL && proxy->var()->IsUnallocated()) {
- Comment cmnt(masm_, "Global variable");
- __ mov(edx, proxy->var()->is_qml_global()
- ? QmlGlobalObjectOperand()
- : GlobalObjectOperand());
- __ mov(ecx, Immediate(proxy->name()));
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- // Use a regular load, not a contextual load, to avoid a reference
- // error.
- CallIC(ic);
- PrepareForBailout(expr, TOS_REG);
- context()->Plug(eax);
- } else if (proxy != NULL && proxy->var()->IsLookupSlot()) {
- Label done, slow;
-
- // Generate code for loading from variables potentially shadowed
- // by eval-introduced variables.
- EmitDynamicLookupFastCase(proxy->var(), INSIDE_TYPEOF, &slow, &done);
-
- __ bind(&slow);
- __ push(esi);
- __ push(Immediate(proxy->name()));
- __ CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
- PrepareForBailout(expr, TOS_REG);
- __ bind(&done);
-
- context()->Plug(eax);
- } else {
- // This expression cannot throw a reference error at the top level.
- VisitInDuplicateContext(expr);
- }
-}
-
-
-void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
- Expression* sub_expr,
- Handle<String> check) {
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- { AccumulatorValueContext context(this);
- VisitForTypeofValue(sub_expr);
- }
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
-
- if (check->Equals(isolate()->heap()->number_string())) {
- __ JumpIfSmi(eax, if_true);
- __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
- isolate()->factory()->heap_number_map());
- Split(equal, if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->string_string())) {
- __ JumpIfSmi(eax, if_false);
- __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, edx);
- __ j(above_equal, if_false);
- // Check for undetectable objects => false.
- __ test_b(FieldOperand(edx, Map::kBitFieldOffset),
- 1 << Map::kIsUndetectable);
- Split(zero, if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->boolean_string())) {
- __ cmp(eax, isolate()->factory()->true_value());
- __ j(equal, if_true);
- __ cmp(eax, isolate()->factory()->false_value());
- Split(equal, if_true, if_false, fall_through);
- } else if (FLAG_harmony_typeof &&
- check->Equals(isolate()->heap()->null_string())) {
- __ cmp(eax, isolate()->factory()->null_value());
- Split(equal, if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->undefined_string())) {
- __ cmp(eax, isolate()->factory()->undefined_value());
- __ j(equal, if_true);
- __ JumpIfSmi(eax, if_false);
- // Check for undetectable objects => true.
- __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
- __ movzx_b(ecx, FieldOperand(edx, Map::kBitFieldOffset));
- __ test(ecx, Immediate(1 << Map::kIsUndetectable));
- Split(not_zero, if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->function_string())) {
- __ JumpIfSmi(eax, if_false);
- STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
- __ CmpObjectType(eax, JS_FUNCTION_TYPE, edx);
- __ j(equal, if_true);
- __ CmpInstanceType(edx, JS_FUNCTION_PROXY_TYPE);
- Split(equal, if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->object_string())) {
- __ JumpIfSmi(eax, if_false);
- if (!FLAG_harmony_typeof) {
- __ cmp(eax, isolate()->factory()->null_value());
- __ j(equal, if_true);
- }
- if (FLAG_harmony_symbols) {
- __ CmpObjectType(eax, SYMBOL_TYPE, edx);
- __ j(equal, if_true);
- }
- __ CmpObjectType(eax, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, edx);
- __ j(below, if_false);
- __ CmpInstanceType(edx, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
- __ j(above, if_false);
- // Check for undetectable objects => false.
- __ test_b(FieldOperand(edx, Map::kBitFieldOffset),
- 1 << Map::kIsUndetectable);
- Split(zero, if_true, if_false, fall_through);
- } else {
- if (if_false != fall_through) __ jmp(if_false);
- }
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
- Comment cmnt(masm_, "[ CompareOperation");
- SetSourcePosition(expr->position());
-
- // First we try a fast inlined version of the compare when one of
- // the operands is a literal.
- if (TryLiteralCompare(expr)) return;
-
- // Always perform the comparison for its control flow. Pack the result
- // into the expression's context after the comparison is performed.
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- Token::Value op = expr->op();
- VisitForStackValue(expr->left());
- switch (op) {
- case Token::IN:
- VisitForStackValue(expr->right());
- __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION);
- PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
- __ cmp(eax, isolate()->factory()->true_value());
- Split(equal, if_true, if_false, fall_through);
- break;
-
- case Token::INSTANCEOF: {
- VisitForStackValue(expr->right());
- InstanceofStub stub(InstanceofStub::kNoFlags);
- __ CallStub(&stub);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- __ test(eax, eax);
- // The stub returns 0 for true.
- Split(zero, if_true, if_false, fall_through);
- break;
- }
-
- default: {
- VisitForAccumulatorValue(expr->right());
- Condition cc = CompareIC::ComputeCondition(op);
- __ pop(edx);
-
- bool inline_smi_code = ShouldInlineSmiCase(op);
- JumpPatchSite patch_site(masm_);
- if (inline_smi_code) {
- Label slow_case;
- __ mov(ecx, edx);
- __ or_(ecx, eax);
- patch_site.EmitJumpIfNotSmi(ecx, &slow_case, Label::kNear);
- __ cmp(edx, eax);
- Split(cc, if_true, if_false, NULL);
- __ bind(&slow_case);
- }
-
- // Record position and call the compare IC.
- SetSourcePosition(expr->position());
- Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
- CallIC(ic, RelocInfo::CODE_TARGET, expr->CompareOperationFeedbackId());
- patch_site.EmitPatchInfo();
-
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- __ test(eax, eax);
- Split(cc, if_true, if_false, fall_through);
- }
- }
-
- // Convert the result of the comparison into one expected for this
- // expression's context.
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
- Expression* sub_expr,
- NilValue nil) {
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- VisitForAccumulatorValue(sub_expr);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Handle<Object> nil_value = nil == kNullValue ?
- isolate()->factory()->null_value() :
- isolate()->factory()->undefined_value();
- __ cmp(eax, nil_value);
- if (expr->op() == Token::EQ_STRICT) {
- Split(equal, if_true, if_false, fall_through);
- } else {
- Handle<Object> other_nil_value = nil == kNullValue ?
- isolate()->factory()->undefined_value() :
- isolate()->factory()->null_value();
- __ j(equal, if_true);
- __ cmp(eax, other_nil_value);
- __ j(equal, if_true);
- __ JumpIfSmi(eax, if_false);
- // It can be an undetectable object.
- __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
- __ movzx_b(edx, FieldOperand(edx, Map::kBitFieldOffset));
- __ test(edx, Immediate(1 << Map::kIsUndetectable));
- Split(not_zero, if_true, if_false, fall_through);
- }
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::VisitThisFunction(ThisFunction* expr) {
- __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- context()->Plug(eax);
-}
-
-
-Register FullCodeGenerator::result_register() {
- return eax;
-}
-
-
-Register FullCodeGenerator::context_register() {
- return esi;
-}
-
-
-void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
- ASSERT_EQ(POINTER_SIZE_ALIGN(frame_offset), frame_offset);
- __ mov(Operand(ebp, frame_offset), value);
-}
-
-
-void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
- __ mov(dst, ContextOperand(esi, context_index));
-}
-
-
-void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
- Scope* declaration_scope = scope()->DeclarationScope();
- if (declaration_scope->is_global_scope() ||
- declaration_scope->is_module_scope()) {
- // Contexts nested in the native context have a canonical empty function
- // as their closure, not the anonymous closure containing the global
- // code. Pass a smi sentinel and let the runtime look up the empty
- // function.
- __ push(Immediate(Smi::FromInt(0)));
- } else if (declaration_scope->is_eval_scope()) {
- // Contexts nested inside eval code have the same closure as the context
- // calling eval, not the anonymous closure containing the eval code.
- // Fetch it from the context.
- __ push(ContextOperand(esi, Context::CLOSURE_INDEX));
- } else {
- ASSERT(declaration_scope->is_function_scope());
- __ push(Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- }
-}
-
-
-// ----------------------------------------------------------------------------
-// Non-local control flow support.
-
-void FullCodeGenerator::EnterFinallyBlock() {
- // Cook return address on top of stack (smi encoded Code* delta)
- ASSERT(!result_register().is(edx));
- __ pop(edx);
- __ sub(edx, Immediate(masm_->CodeObject()));
- STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
- STATIC_ASSERT(kSmiTag == 0);
- __ SmiTag(edx);
- __ push(edx);
-
- // Store result register while executing finally block.
- __ push(result_register());
-
- // Store pending message while executing finally block.
- ExternalReference pending_message_obj =
- ExternalReference::address_of_pending_message_obj(isolate());
- __ mov(edx, Operand::StaticVariable(pending_message_obj));
- __ push(edx);
-
- ExternalReference has_pending_message =
- ExternalReference::address_of_has_pending_message(isolate());
- __ mov(edx, Operand::StaticVariable(has_pending_message));
- __ SmiTag(edx);
- __ push(edx);
-
- ExternalReference pending_message_script =
- ExternalReference::address_of_pending_message_script(isolate());
- __ mov(edx, Operand::StaticVariable(pending_message_script));
- __ push(edx);
-}
-
-
-void FullCodeGenerator::ExitFinallyBlock() {
- ASSERT(!result_register().is(edx));
- // Restore pending message from stack.
- __ pop(edx);
- ExternalReference pending_message_script =
- ExternalReference::address_of_pending_message_script(isolate());
- __ mov(Operand::StaticVariable(pending_message_script), edx);
-
- __ pop(edx);
- __ SmiUntag(edx);
- ExternalReference has_pending_message =
- ExternalReference::address_of_has_pending_message(isolate());
- __ mov(Operand::StaticVariable(has_pending_message), edx);
-
- __ pop(edx);
- ExternalReference pending_message_obj =
- ExternalReference::address_of_pending_message_obj(isolate());
- __ mov(Operand::StaticVariable(pending_message_obj), edx);
-
- // Restore result register from stack.
- __ pop(result_register());
-
- // Uncook return address.
- __ pop(edx);
- __ SmiUntag(edx);
- __ add(edx, Immediate(masm_->CodeObject()));
- __ jmp(edx);
-}
-
-
-#undef __
-
-#define __ ACCESS_MASM(masm())
-
-FullCodeGenerator::NestedStatement* FullCodeGenerator::TryFinally::Exit(
- int* stack_depth,
- int* context_length) {
- // The macros used here must preserve the result register.
-
- // Because the handler block contains the context of the finally
- // code, we can restore it directly from there for the finally code
- // rather than iteratively unwinding contexts via their previous
- // links.
- __ Drop(*stack_depth); // Down to the handler block.
- if (*context_length > 0) {
- // Restore the context to its dedicated register and the stack.
- __ mov(esi, Operand(esp, StackHandlerConstants::kContextOffset));
- __ mov(Operand(ebp, StandardFrameConstants::kContextOffset), esi);
- }
- __ PopTryHandler();
- __ call(finally_entry_);
-
- *stack_depth = 0;
- *context_length = 0;
- return previous_;
-}
-
-#undef __
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_IA32
diff --git a/src/3rdparty/v8/src/ia32/ic-ia32.cc b/src/3rdparty/v8/src/ia32/ic-ia32.cc
deleted file mode 100644
index 428d830..0000000
--- a/src/3rdparty/v8/src/ia32/ic-ia32.cc
+++ /dev/null
@@ -1,1675 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_IA32)
-
-#include "codegen.h"
-#include "ic-inl.h"
-#include "runtime.h"
-#include "stub-cache.h"
-
-namespace v8 {
-namespace internal {
-
-// ----------------------------------------------------------------------------
-// Static IC stub generators.
-//
-
-#define __ ACCESS_MASM(masm)
-
-
-static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm,
- Register type,
- Label* global_object) {
- // Register usage:
- // type: holds the receiver instance type on entry.
- __ cmp(type, JS_GLOBAL_OBJECT_TYPE);
- __ j(equal, global_object);
- __ cmp(type, JS_BUILTINS_OBJECT_TYPE);
- __ j(equal, global_object);
- __ cmp(type, JS_GLOBAL_PROXY_TYPE);
- __ j(equal, global_object);
-}
-
-
-// Generated code falls through if the receiver is a regular non-global
-// JS object with slow properties and no interceptors.
-static void GenerateStringDictionaryReceiverCheck(MacroAssembler* masm,
- Register receiver,
- Register r0,
- Register r1,
- Label* miss) {
- // Register usage:
- // receiver: holds the receiver on entry and is unchanged.
- // r0: used to hold receiver instance type.
- // Holds the property dictionary on fall through.
- // r1: used to hold receivers map.
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, miss);
-
- // Check that the receiver is a valid JS object.
- __ mov(r1, FieldOperand(receiver, HeapObject::kMapOffset));
- __ movzx_b(r0, FieldOperand(r1, Map::kInstanceTypeOffset));
- __ cmp(r0, FIRST_SPEC_OBJECT_TYPE);
- __ j(below, miss);
-
- // If this assert fails, we have to check upper bound too.
- STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
-
- GenerateGlobalInstanceTypeCheck(masm, r0, miss);
-
- // Check for non-global object that requires access check.
- __ test_b(FieldOperand(r1, Map::kBitFieldOffset),
- (1 << Map::kIsAccessCheckNeeded) |
- (1 << Map::kHasNamedInterceptor));
- __ j(not_zero, miss);
-
- __ mov(r0, FieldOperand(receiver, JSObject::kPropertiesOffset));
- __ CheckMap(r0, FACTORY->hash_table_map(), miss, DONT_DO_SMI_CHECK);
-}
-
-
-// Helper function used to load a property from a dictionary backing
-// storage. This function may fail to load a property even though it is
-// in the dictionary, so code at miss_label must always call a backup
-// property load that is complete. This function is safe to call if
-// name is not internalized, and will jump to the miss_label in that
-// case. The generated code assumes that the receiver has slow
-// properties, is not a global object and does not have interceptors.
-static void GenerateDictionaryLoad(MacroAssembler* masm,
- Label* miss_label,
- Register elements,
- Register name,
- Register r0,
- Register r1,
- Register result) {
- // Register use:
- //
- // elements - holds the property dictionary on entry and is unchanged.
- //
- // name - holds the name of the property on entry and is unchanged.
- //
- // Scratch registers:
- //
- // r0 - used for the index into the property dictionary
- //
- // r1 - used to hold the capacity of the property dictionary.
- //
- // result - holds the result on exit.
-
- Label done;
-
- // Probe the dictionary.
- StringDictionaryLookupStub::GeneratePositiveLookup(masm,
- miss_label,
- &done,
- elements,
- name,
- r0,
- r1);
-
- // If probing finds an entry in the dictionary, r0 contains the
- // index into the dictionary. Check that the value is a normal
- // property.
- __ bind(&done);
- const int kElementsStartOffset =
- StringDictionary::kHeaderSize +
- StringDictionary::kElementsStartIndex * kPointerSize;
- const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
- __ test(Operand(elements, r0, times_4, kDetailsOffset - kHeapObjectTag),
- Immediate(PropertyDetails::TypeField::kMask << kSmiTagSize));
- __ j(not_zero, miss_label);
-
- // Get the value at the masked, scaled index.
- const int kValueOffset = kElementsStartOffset + kPointerSize;
- __ mov(result, Operand(elements, r0, times_4, kValueOffset - kHeapObjectTag));
-}
-
-
-// Helper function used to store a property to a dictionary backing
-// storage. This function may fail to store a property eventhough it
-// is in the dictionary, so code at miss_label must always call a
-// backup property store that is complete. This function is safe to
-// call if name is not internalized, and will jump to the miss_label in
-// that case. The generated code assumes that the receiver has slow
-// properties, is not a global object and does not have interceptors.
-static void GenerateDictionaryStore(MacroAssembler* masm,
- Label* miss_label,
- Register elements,
- Register name,
- Register value,
- Register r0,
- Register r1) {
- // Register use:
- //
- // elements - holds the property dictionary on entry and is clobbered.
- //
- // name - holds the name of the property on entry and is unchanged.
- //
- // value - holds the value to store and is unchanged.
- //
- // r0 - used for index into the property dictionary and is clobbered.
- //
- // r1 - used to hold the capacity of the property dictionary and is clobbered.
- Label done;
-
-
- // Probe the dictionary.
- StringDictionaryLookupStub::GeneratePositiveLookup(masm,
- miss_label,
- &done,
- elements,
- name,
- r0,
- r1);
-
- // If probing finds an entry in the dictionary, r0 contains the
- // index into the dictionary. Check that the value is a normal
- // property that is not read only.
- __ bind(&done);
- const int kElementsStartOffset =
- StringDictionary::kHeaderSize +
- StringDictionary::kElementsStartIndex * kPointerSize;
- const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
- const int kTypeAndReadOnlyMask =
- (PropertyDetails::TypeField::kMask |
- PropertyDetails::AttributesField::encode(READ_ONLY)) << kSmiTagSize;
- __ test(Operand(elements, r0, times_4, kDetailsOffset - kHeapObjectTag),
- Immediate(kTypeAndReadOnlyMask));
- __ j(not_zero, miss_label);
-
- // Store the value at the masked, scaled index.
- const int kValueOffset = kElementsStartOffset + kPointerSize;
- __ lea(r0, Operand(elements, r0, times_4, kValueOffset - kHeapObjectTag));
- __ mov(Operand(r0, 0), value);
-
- // Update write barrier. Make sure not to clobber the value.
- __ mov(r1, value);
- __ RecordWrite(elements, r0, r1, kDontSaveFPRegs);
-}
-
-
-// Checks the receiver for special cases (value type, slow case bits).
-// Falls through for regular JS object.
-static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
- Register receiver,
- Register map,
- int interceptor_bit,
- Label* slow) {
- // Register use:
- // receiver - holds the receiver and is unchanged.
- // Scratch registers:
- // map - used to hold the map of the receiver.
-
- // Check that the object isn't a smi.
- __ JumpIfSmi(receiver, slow);
-
- // Get the map of the receiver.
- __ mov(map, FieldOperand(receiver, HeapObject::kMapOffset));
-
- // Check bit field.
- __ test_b(FieldOperand(map, Map::kBitFieldOffset),
- (1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit));
- __ j(not_zero, slow);
- // Check that the object is some kind of JS object EXCEPT JS Value type.
- // In the case that the object is a value-wrapper object,
- // we enter the runtime system to make sure that indexing
- // into string objects works as intended.
- ASSERT(JS_OBJECT_TYPE > JS_VALUE_TYPE);
-
- __ CmpInstanceType(map, JS_OBJECT_TYPE);
- __ j(below, slow);
-}
-
-
-// Loads an indexed element from a fast case array.
-// If not_fast_array is NULL, doesn't perform the elements map check.
-static void GenerateFastArrayLoad(MacroAssembler* masm,
- Register receiver,
- Register key,
- Register scratch,
- Register result,
- Label* not_fast_array,
- Label* out_of_range) {
- // Register use:
- // receiver - holds the receiver and is unchanged.
- // key - holds the key and is unchanged (must be a smi).
- // Scratch registers:
- // scratch - used to hold elements of the receiver and the loaded value.
- // result - holds the result on exit if the load succeeds and
- // we fall through.
-
- __ mov(scratch, FieldOperand(receiver, JSObject::kElementsOffset));
- if (not_fast_array != NULL) {
- // Check that the object is in fast mode and writable.
- __ CheckMap(scratch,
- FACTORY->fixed_array_map(),
- not_fast_array,
- DONT_DO_SMI_CHECK);
- } else {
- __ AssertFastElements(scratch);
- }
- // Check that the key (index) is within bounds.
- __ cmp(key, FieldOperand(scratch, FixedArray::kLengthOffset));
- __ j(above_equal, out_of_range);
- // Fast case: Do the load.
- STATIC_ASSERT((kPointerSize == 4) && (kSmiTagSize == 1) && (kSmiTag == 0));
- __ mov(scratch, FieldOperand(scratch, key, times_2, FixedArray::kHeaderSize));
- __ cmp(scratch, Immediate(FACTORY->the_hole_value()));
- // In case the loaded value is the_hole we have to consult GetProperty
- // to ensure the prototype chain is searched.
- __ j(equal, out_of_range);
- if (!result.is(scratch)) {
- __ mov(result, scratch);
- }
-}
-
-
-// Checks whether a key is an array index string or an internalized string.
-// Falls through if the key is an internalized string.
-static void GenerateKeyStringCheck(MacroAssembler* masm,
- Register key,
- Register map,
- Register hash,
- Label* index_string,
- Label* not_internalized) {
- // Register use:
- // key - holds the key and is unchanged. Assumed to be non-smi.
- // Scratch registers:
- // map - used to hold the map of the key.
- // hash - used to hold the hash of the key.
- __ CmpObjectType(key, FIRST_NONSTRING_TYPE, map);
- __ j(above_equal, not_internalized);
-
- // Is the string an array index, with cached numeric value?
- __ mov(hash, FieldOperand(key, String::kHashFieldOffset));
- __ test(hash, Immediate(String::kContainsCachedArrayIndexMask));
- __ j(zero, index_string);
-
- // Is the string internalized?
- STATIC_ASSERT(kInternalizedTag != 0);
- __ test_b(FieldOperand(map, Map::kInstanceTypeOffset), kIsInternalizedMask);
- __ j(zero, not_internalized);
-}
-
-
-static Operand GenerateMappedArgumentsLookup(MacroAssembler* masm,
- Register object,
- Register key,
- Register scratch1,
- Register scratch2,
- Label* unmapped_case,
- Label* slow_case) {
- Heap* heap = masm->isolate()->heap();
- Factory* factory = masm->isolate()->factory();
-
- // Check that the receiver is a JSObject. Because of the elements
- // map check later, we do not need to check for interceptors or
- // whether it requires access checks.
- __ JumpIfSmi(object, slow_case);
- // Check that the object is some kind of JSObject.
- __ CmpObjectType(object, FIRST_JS_RECEIVER_TYPE, scratch1);
- __ j(below, slow_case);
-
- // Check that the key is a positive smi.
- __ test(key, Immediate(0x80000001));
- __ j(not_zero, slow_case);
-
- // Load the elements into scratch1 and check its map.
- Handle<Map> arguments_map(heap->non_strict_arguments_elements_map());
- __ mov(scratch1, FieldOperand(object, JSObject::kElementsOffset));
- __ CheckMap(scratch1, arguments_map, slow_case, DONT_DO_SMI_CHECK);
-
- // Check if element is in the range of mapped arguments. If not, jump
- // to the unmapped lookup with the parameter map in scratch1.
- __ mov(scratch2, FieldOperand(scratch1, FixedArray::kLengthOffset));
- __ sub(scratch2, Immediate(Smi::FromInt(2)));
- __ cmp(key, scratch2);
- __ j(above_equal, unmapped_case);
-
- // Load element index and check whether it is the hole.
- const int kHeaderSize = FixedArray::kHeaderSize + 2 * kPointerSize;
- __ mov(scratch2, FieldOperand(scratch1,
- key,
- times_half_pointer_size,
- kHeaderSize));
- __ cmp(scratch2, factory->the_hole_value());
- __ j(equal, unmapped_case);
-
- // Load value from context and return it. We can reuse scratch1 because
- // we do not jump to the unmapped lookup (which requires the parameter
- // map in scratch1).
- const int kContextOffset = FixedArray::kHeaderSize;
- __ mov(scratch1, FieldOperand(scratch1, kContextOffset));
- return FieldOperand(scratch1,
- scratch2,
- times_half_pointer_size,
- Context::kHeaderSize);
-}
-
-
-static Operand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
- Register key,
- Register parameter_map,
- Register scratch,
- Label* slow_case) {
- // Element is in arguments backing store, which is referenced by the
- // second element of the parameter_map.
- const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize;
- Register backing_store = parameter_map;
- __ mov(backing_store, FieldOperand(parameter_map, kBackingStoreOffset));
- Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map());
- __ CheckMap(backing_store, fixed_array_map, slow_case, DONT_DO_SMI_CHECK);
- __ mov(scratch, FieldOperand(backing_store, FixedArray::kLengthOffset));
- __ cmp(key, scratch);
- __ j(greater_equal, slow_case);
- return FieldOperand(backing_store,
- key,
- times_half_pointer_size,
- FixedArray::kHeaderSize);
-}
-
-
-void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- ecx : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label slow, check_string, index_smi, index_string, property_array_property;
- Label probe_dictionary, check_number_dictionary;
-
- // Check that the key is a smi.
- __ JumpIfNotSmi(ecx, &check_string);
- __ bind(&index_smi);
- // Now the key is known to be a smi. This place is also jumped to from
- // where a numeric string is converted to a smi.
-
- GenerateKeyedLoadReceiverCheck(
- masm, edx, eax, Map::kHasIndexedInterceptor, &slow);
-
- // Check the receiver's map to see if it has fast elements.
- __ CheckFastElements(eax, &check_number_dictionary);
-
- GenerateFastArrayLoad(masm, edx, ecx, eax, eax, NULL, &slow);
- Isolate* isolate = masm->isolate();
- Counters* counters = isolate->counters();
- __ IncrementCounter(counters->keyed_load_generic_smi(), 1);
- __ ret(0);
-
- __ bind(&check_number_dictionary);
- __ mov(ebx, ecx);
- __ SmiUntag(ebx);
- __ mov(eax, FieldOperand(edx, JSObject::kElementsOffset));
-
- // Check whether the elements is a number dictionary.
- // edx: receiver
- // ebx: untagged index
- // ecx: key
- // eax: elements
- __ CheckMap(eax,
- isolate->factory()->hash_table_map(),
- &slow,
- DONT_DO_SMI_CHECK);
- Label slow_pop_receiver;
- // Push receiver on the stack to free up a register for the dictionary
- // probing.
- __ push(edx);
- __ LoadFromNumberDictionary(&slow_pop_receiver, eax, ecx, ebx, edx, edi, eax);
- // Pop receiver before returning.
- __ pop(edx);
- __ ret(0);
-
- __ bind(&slow_pop_receiver);
- // Pop the receiver from the stack and jump to runtime.
- __ pop(edx);
-
- __ bind(&slow);
- // Slow case: jump to runtime.
- // edx: receiver
- // ecx: key
- __ IncrementCounter(counters->keyed_load_generic_slow(), 1);
- GenerateRuntimeGetProperty(masm);
-
- __ bind(&check_string);
- GenerateKeyStringCheck(masm, ecx, eax, ebx, &index_string, &slow);
-
- GenerateKeyedLoadReceiverCheck(
- masm, edx, eax, Map::kHasNamedInterceptor, &slow);
-
- // If the receiver is a fast-case object, check the keyed lookup
- // cache. Otherwise probe the dictionary.
- __ mov(ebx, FieldOperand(edx, JSObject::kPropertiesOffset));
- __ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
- Immediate(isolate->factory()->hash_table_map()));
- __ j(equal, &probe_dictionary);
-
- // The receiver's map is still in eax, compute the keyed lookup cache hash
- // based on 32 bits of the map pointer and the string hash.
- if (FLAG_debug_code) {
- __ cmp(eax, FieldOperand(edx, HeapObject::kMapOffset));
- __ Check(equal, "Map is no longer in eax.");
- }
- __ mov(ebx, eax); // Keep the map around for later.
- __ shr(eax, KeyedLookupCache::kMapHashShift);
- __ mov(edi, FieldOperand(ecx, String::kHashFieldOffset));
- __ shr(edi, String::kHashShift);
- __ xor_(eax, edi);
- __ and_(eax, KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask);
-
- // Load the key (consisting of map and internalized string) from the cache and
- // check for match.
- Label load_in_object_property;
- static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket;
- Label hit_on_nth_entry[kEntriesPerBucket];
- ExternalReference cache_keys =
- ExternalReference::keyed_lookup_cache_keys(masm->isolate());
-
- for (int i = 0; i < kEntriesPerBucket - 1; i++) {
- Label try_next_entry;
- __ mov(edi, eax);
- __ shl(edi, kPointerSizeLog2 + 1);
- if (i != 0) {
- __ add(edi, Immediate(kPointerSize * i * 2));
- }
- __ cmp(ebx, Operand::StaticArray(edi, times_1, cache_keys));
- __ j(not_equal, &try_next_entry);
- __ add(edi, Immediate(kPointerSize));
- __ cmp(ecx, Operand::StaticArray(edi, times_1, cache_keys));
- __ j(equal, &hit_on_nth_entry[i]);
- __ bind(&try_next_entry);
- }
-
- __ lea(edi, Operand(eax, 1));
- __ shl(edi, kPointerSizeLog2 + 1);
- __ add(edi, Immediate(kPointerSize * (kEntriesPerBucket - 1) * 2));
- __ cmp(ebx, Operand::StaticArray(edi, times_1, cache_keys));
- __ j(not_equal, &slow);
- __ add(edi, Immediate(kPointerSize));
- __ cmp(ecx, Operand::StaticArray(edi, times_1, cache_keys));
- __ j(not_equal, &slow);
-
- // Get field offset.
- // edx : receiver
- // ebx : receiver's map
- // ecx : key
- // eax : lookup cache index
- ExternalReference cache_field_offsets =
- ExternalReference::keyed_lookup_cache_field_offsets(masm->isolate());
-
- // Hit on nth entry.
- for (int i = kEntriesPerBucket - 1; i >= 0; i--) {
- __ bind(&hit_on_nth_entry[i]);
- if (i != 0) {
- __ add(eax, Immediate(i));
- }
- __ mov(edi,
- Operand::StaticArray(eax, times_pointer_size, cache_field_offsets));
- __ movzx_b(eax, FieldOperand(ebx, Map::kInObjectPropertiesOffset));
- __ sub(edi, eax);
- __ j(above_equal, &property_array_property);
- if (i != 0) {
- __ jmp(&load_in_object_property);
- }
- }
-
- // Load in-object property.
- __ bind(&load_in_object_property);
- __ movzx_b(eax, FieldOperand(ebx, Map::kInstanceSizeOffset));
- __ add(eax, edi);
- __ mov(eax, FieldOperand(edx, eax, times_pointer_size, 0));
- __ IncrementCounter(counters->keyed_load_generic_lookup_cache(), 1);
- __ ret(0);
-
- // Load property array property.
- __ bind(&property_array_property);
- __ mov(eax, FieldOperand(edx, JSObject::kPropertiesOffset));
- __ mov(eax, FieldOperand(eax, edi, times_pointer_size,
- FixedArray::kHeaderSize));
- __ IncrementCounter(counters->keyed_load_generic_lookup_cache(), 1);
- __ ret(0);
-
- // Do a quick inline probe of the receiver's dictionary, if it
- // exists.
- __ bind(&probe_dictionary);
-
- __ mov(eax, FieldOperand(edx, JSObject::kMapOffset));
- __ movzx_b(eax, FieldOperand(eax, Map::kInstanceTypeOffset));
- GenerateGlobalInstanceTypeCheck(masm, eax, &slow);
-
- GenerateDictionaryLoad(masm, &slow, ebx, ecx, eax, edi, eax);
- __ IncrementCounter(counters->keyed_load_generic_symbol(), 1);
- __ ret(0);
-
- __ bind(&index_string);
- __ IndexFromHash(ebx, ecx);
- // Now jump to the place where smi keys are handled.
- __ jmp(&index_smi);
-}
-
-
-void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- ecx : key (index)
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label miss;
-
- Register receiver = edx;
- Register index = ecx;
- Register scratch = ebx;
- Register result = eax;
-
- StringCharAtGenerator char_at_generator(receiver,
- index,
- scratch,
- result,
- &miss, // When not a string.
- &miss, // When not a number.
- &miss, // When index out of range.
- STRING_INDEX_IS_ARRAY_INDEX);
- char_at_generator.GenerateFast(masm);
- __ ret(0);
-
- StubRuntimeCallHelper call_helper;
- char_at_generator.GenerateSlow(masm, call_helper);
-
- __ bind(&miss);
- GenerateMiss(masm, MISS);
-}
-
-
-void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- ecx : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label slow;
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(edx, &slow);
-
- // Check that the key is an array index, that is Uint32.
- __ test(ecx, Immediate(kSmiTagMask | kSmiSignMask));
- __ j(not_zero, &slow);
-
- // Get the map of the receiver.
- __ mov(eax, FieldOperand(edx, HeapObject::kMapOffset));
-
- // Check that it has indexed interceptor and access checks
- // are not enabled for this object.
- __ movzx_b(eax, FieldOperand(eax, Map::kBitFieldOffset));
- __ and_(eax, Immediate(kSlowCaseBitFieldMask));
- __ cmp(eax, Immediate(1 << Map::kHasIndexedInterceptor));
- __ j(not_zero, &slow);
-
- // Everything is fine, call runtime.
- __ pop(eax);
- __ push(edx); // receiver
- __ push(ecx); // key
- __ push(eax); // return address
-
- // Perform tail call to the entry.
- ExternalReference ref =
- ExternalReference(IC_Utility(kKeyedLoadPropertyWithInterceptor),
- masm->isolate());
- __ TailCallExternalReference(ref, 2, 1);
-
- __ bind(&slow);
- GenerateMiss(masm, MISS);
-}
-
-
-void KeyedLoadIC::GenerateNonStrictArguments(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- ecx : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label slow, notin;
- Factory* factory = masm->isolate()->factory();
- Operand mapped_location =
- GenerateMappedArgumentsLookup(masm, edx, ecx, ebx, eax, &notin, &slow);
- __ mov(eax, mapped_location);
- __ Ret();
- __ bind(&notin);
- // The unmapped lookup expects that the parameter map is in ebx.
- Operand unmapped_location =
- GenerateUnmappedArgumentsLookup(masm, ecx, ebx, eax, &slow);
- __ cmp(unmapped_location, factory->the_hole_value());
- __ j(equal, &slow);
- __ mov(eax, unmapped_location);
- __ Ret();
- __ bind(&slow);
- GenerateMiss(masm, MISS);
-}
-
-
-void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax : value
- // -- ecx : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label slow, notin;
- Operand mapped_location =
- GenerateMappedArgumentsLookup(masm, edx, ecx, ebx, edi, &notin, &slow);
- __ mov(mapped_location, eax);
- __ lea(ecx, mapped_location);
- __ mov(edx, eax);
- __ RecordWrite(ebx, ecx, edx, kDontSaveFPRegs);
- __ Ret();
- __ bind(&notin);
- // The unmapped lookup expects that the parameter map is in ebx.
- Operand unmapped_location =
- GenerateUnmappedArgumentsLookup(masm, ecx, ebx, edi, &slow);
- __ mov(unmapped_location, eax);
- __ lea(edi, unmapped_location);
- __ mov(edx, eax);
- __ RecordWrite(ebx, edi, edx, kDontSaveFPRegs);
- __ Ret();
- __ bind(&slow);
- GenerateMiss(masm, MISS);
-}
-
-
-static void KeyedStoreGenerateGenericHelper(
- MacroAssembler* masm,
- Label* fast_object,
- Label* fast_double,
- Label* slow,
- KeyedStoreCheckMap check_map,
- KeyedStoreIncrementLength increment_length) {
- Label transition_smi_elements;
- Label finish_object_store, non_double_value, transition_double_elements;
- Label fast_double_without_map_check;
- // eax: value
- // ecx: key (a smi)
- // edx: receiver
- // ebx: FixedArray receiver->elements
- // edi: receiver map
- // Fast case: Do the store, could either Object or double.
- __ bind(fast_object);
- if (check_map == kCheckMap) {
- __ mov(edi, FieldOperand(ebx, HeapObject::kMapOffset));
- __ cmp(edi, masm->isolate()->factory()->fixed_array_map());
- __ j(not_equal, fast_double);
- }
- // Smi stores don't require further checks.
- Label non_smi_value;
- __ JumpIfNotSmi(eax, &non_smi_value);
- if (increment_length == kIncrementLength) {
- // Add 1 to receiver->length.
- __ add(FieldOperand(edx, JSArray::kLengthOffset),
- Immediate(Smi::FromInt(1)));
- }
- // It's irrelevant whether array is smi-only or not when writing a smi.
- __ mov(CodeGenerator::FixedArrayElementOperand(ebx, ecx), eax);
- __ ret(0);
-
- __ bind(&non_smi_value);
- // Escape to elements kind transition case.
- __ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
- __ CheckFastObjectElements(edi, &transition_smi_elements);
-
- // Fast elements array, store the value to the elements backing store.
- __ bind(&finish_object_store);
- if (increment_length == kIncrementLength) {
- // Add 1 to receiver->length.
- __ add(FieldOperand(edx, JSArray::kLengthOffset),
- Immediate(Smi::FromInt(1)));
- }
- __ mov(CodeGenerator::FixedArrayElementOperand(ebx, ecx), eax);
- // Update write barrier for the elements array address.
- __ mov(edx, eax); // Preserve the value which is returned.
- __ RecordWriteArray(
- ebx, edx, ecx, kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
- __ ret(0);
-
- __ bind(fast_double);
- if (check_map == kCheckMap) {
- // Check for fast double array case. If this fails, call through to the
- // runtime.
- __ cmp(edi, masm->isolate()->factory()->fixed_double_array_map());
- __ j(not_equal, slow);
- // If the value is a number, store it as a double in the FastDoubleElements
- // array.
- }
- __ bind(&fast_double_without_map_check);
- __ StoreNumberToDoubleElements(eax, ebx, ecx, edi, xmm0,
- &transition_double_elements, false);
- if (increment_length == kIncrementLength) {
- // Add 1 to receiver->length.
- __ add(FieldOperand(edx, JSArray::kLengthOffset),
- Immediate(Smi::FromInt(1)));
- }
- __ ret(0);
-
- __ bind(&transition_smi_elements);
- __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
-
- // Transition the array appropriately depending on the value type.
- __ CheckMap(eax,
- masm->isolate()->factory()->heap_number_map(),
- &non_double_value,
- DONT_DO_SMI_CHECK);
-
- // Value is a double. Transition FAST_SMI_ELEMENTS -> FAST_DOUBLE_ELEMENTS
- // and complete the store.
- __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- FAST_DOUBLE_ELEMENTS,
- ebx,
- edi,
- slow);
- AllocationSiteMode mode = AllocationSiteInfo::GetMode(FAST_SMI_ELEMENTS,
- FAST_DOUBLE_ELEMENTS);
- ElementsTransitionGenerator::GenerateSmiToDouble(masm, mode, slow);
- __ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
- __ jmp(&fast_double_without_map_check);
-
- __ bind(&non_double_value);
- // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
- __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- FAST_ELEMENTS,
- ebx,
- edi,
- slow);
- mode = AllocationSiteInfo::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
- ElementsTransitionGenerator::GenerateMapChangeElementsTransition(masm, mode,
- slow);
- __ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
- __ jmp(&finish_object_store);
-
- __ bind(&transition_double_elements);
- // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
- // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
- // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
- __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
- __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS,
- FAST_ELEMENTS,
- ebx,
- edi,
- slow);
- mode = AllocationSiteInfo::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
- ElementsTransitionGenerator::GenerateDoubleToObject(masm, mode, slow);
- __ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
- __ jmp(&finish_object_store);
-}
-
-
-void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
- StrictModeFlag strict_mode) {
- // ----------- S t a t e -------------
- // -- eax : value
- // -- ecx : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label slow, fast_object, fast_object_grow;
- Label fast_double, fast_double_grow;
- Label array, extra, check_if_double_array;
-
- // Check that the object isn't a smi.
- __ JumpIfSmi(edx, &slow);
- // Get the map from the receiver.
- __ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
- // Check that the receiver does not require access checks. We need
- // to do this because this generic stub does not perform map checks.
- __ test_b(FieldOperand(edi, Map::kBitFieldOffset),
- 1 << Map::kIsAccessCheckNeeded);
- __ j(not_zero, &slow);
- // Check that the key is a smi.
- __ JumpIfNotSmi(ecx, &slow);
- __ CmpInstanceType(edi, JS_ARRAY_TYPE);
- __ j(equal, &array);
- // Check that the object is some kind of JSObject.
- __ CmpInstanceType(edi, FIRST_JS_OBJECT_TYPE);
- __ j(below, &slow);
-
- // Object case: Check key against length in the elements array.
- // eax: value
- // edx: JSObject
- // ecx: key (a smi)
- // edi: receiver map
- __ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
- // Check array bounds. Both the key and the length of FixedArray are smis.
- __ cmp(ecx, FieldOperand(ebx, FixedArray::kLengthOffset));
- __ j(below, &fast_object);
-
- // Slow case: call runtime.
- __ bind(&slow);
- GenerateRuntimeSetProperty(masm, strict_mode);
-
- // Extra capacity case: Check if there is extra capacity to
- // perform the store and update the length. Used for adding one
- // element to the array by writing to array[array.length].
- __ bind(&extra);
- // eax: value
- // edx: receiver, a JSArray
- // ecx: key, a smi.
- // ebx: receiver->elements, a FixedArray
- // edi: receiver map
- // flags: compare (ecx, edx.length())
- // do not leave holes in the array:
- __ j(not_equal, &slow);
- __ cmp(ecx, FieldOperand(ebx, FixedArray::kLengthOffset));
- __ j(above_equal, &slow);
- __ mov(edi, FieldOperand(ebx, HeapObject::kMapOffset));
- __ cmp(edi, masm->isolate()->factory()->fixed_array_map());
- __ j(not_equal, &check_if_double_array);
- __ jmp(&fast_object_grow);
-
- __ bind(&check_if_double_array);
- __ cmp(edi, masm->isolate()->factory()->fixed_double_array_map());
- __ j(not_equal, &slow);
- __ jmp(&fast_double_grow);
-
- // Array case: Get the length and the elements array from the JS
- // array. Check that the array is in fast mode (and writable); if it
- // is the length is always a smi.
- __ bind(&array);
- // eax: value
- // edx: receiver, a JSArray
- // ecx: key, a smi.
- // edi: receiver map
- __ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
-
- // Check the key against the length in the array and fall through to the
- // common store code.
- __ cmp(ecx, FieldOperand(edx, JSArray::kLengthOffset)); // Compare smis.
- __ j(above_equal, &extra);
-
- KeyedStoreGenerateGenericHelper(masm, &fast_object, &fast_double,
- &slow, kCheckMap, kDontIncrementLength);
- KeyedStoreGenerateGenericHelper(masm, &fast_object_grow, &fast_double_grow,
- &slow, kDontCheckMap, kIncrementLength);
-}
-
-
-// The generated code does not accept smi keys.
-// The generated code falls through if both probes miss.
-void CallICBase::GenerateMonomorphicCacheProbe(MacroAssembler* masm,
- int argc,
- Code::Kind kind,
- Code::ExtraICState extra_state) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- edx : receiver
- // -----------------------------------
- Label number, non_number, non_string, boolean, probe, miss;
-
- // Probe the stub cache.
- Code::Flags flags = Code::ComputeFlags(kind,
- MONOMORPHIC,
- extra_state,
- Code::NORMAL,
- argc);
- Isolate* isolate = masm->isolate();
- isolate->stub_cache()->GenerateProbe(masm, flags, edx, ecx, ebx, eax);
-
- // If the stub cache probing failed, the receiver might be a value.
- // For value objects, we use the map of the prototype objects for
- // the corresponding JSValue for the cache and that is what we need
- // to probe.
- //
- // Check for number.
- __ JumpIfSmi(edx, &number);
- __ CmpObjectType(edx, HEAP_NUMBER_TYPE, ebx);
- __ j(not_equal, &non_number);
- __ bind(&number);
- StubCompiler::GenerateLoadGlobalFunctionPrototype(
- masm, Context::NUMBER_FUNCTION_INDEX, edx);
- __ jmp(&probe);
-
- // Check for string.
- __ bind(&non_number);
- __ CmpInstanceType(ebx, FIRST_NONSTRING_TYPE);
- __ j(above_equal, &non_string);
- StubCompiler::GenerateLoadGlobalFunctionPrototype(
- masm, Context::STRING_FUNCTION_INDEX, edx);
- __ jmp(&probe);
-
- // Check for boolean.
- __ bind(&non_string);
- __ cmp(edx, isolate->factory()->true_value());
- __ j(equal, &boolean);
- __ cmp(edx, isolate->factory()->false_value());
- __ j(not_equal, &miss);
- __ bind(&boolean);
- StubCompiler::GenerateLoadGlobalFunctionPrototype(
- masm, Context::BOOLEAN_FUNCTION_INDEX, edx);
-
- // Probe the stub cache for the value object.
- __ bind(&probe);
- isolate->stub_cache()->GenerateProbe(masm, flags, edx, ecx, ebx, no_reg);
- __ bind(&miss);
-}
-
-
-static void GenerateFunctionTailCall(MacroAssembler* masm,
- int argc,
- Label* miss) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- edi : function
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- ...
- // -- esp[(argc + 1) * 4] : receiver
- // -----------------------------------
-
- // Check that the result is not a smi.
- __ JumpIfSmi(edi, miss);
-
- // Check that the value is a JavaScript function, fetching its map into eax.
- __ CmpObjectType(edi, JS_FUNCTION_TYPE, eax);
- __ j(not_equal, miss);
-
- // Invoke the function.
- ParameterCount actual(argc);
- __ InvokeFunction(edi, actual, JUMP_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
-}
-
-
-// The generated code falls through if the call should be handled by runtime.
-void CallICBase::GenerateNormal(MacroAssembler* masm, int argc) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- ...
- // -- esp[(argc + 1) * 4] : receiver
- // -----------------------------------
- Label miss;
-
- // Get the receiver of the function from the stack; 1 ~ return address.
- __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
-
- GenerateStringDictionaryReceiverCheck(masm, edx, eax, ebx, &miss);
-
- // eax: elements
- // Search the dictionary placing the result in edi.
- GenerateDictionaryLoad(masm, &miss, eax, ecx, edi, ebx, edi);
- GenerateFunctionTailCall(masm, argc, &miss);
-
- __ bind(&miss);
-}
-
-
-void CallICBase::GenerateMiss(MacroAssembler* masm,
- int argc,
- IC::UtilityId id,
- Code::ExtraICState extra_state) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- ...
- // -- esp[(argc + 1) * 4] : receiver
- // -----------------------------------
-
- Counters* counters = masm->isolate()->counters();
- if (id == IC::kCallIC_Miss) {
- __ IncrementCounter(counters->call_miss(), 1);
- } else {
- __ IncrementCounter(counters->keyed_call_miss(), 1);
- }
-
- // Get the receiver of the function from the stack; 1 ~ return address.
- __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
-
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Push the receiver and the name of the function.
- __ push(edx);
- __ push(ecx);
-
- // Call the entry.
- CEntryStub stub(1);
- __ mov(eax, Immediate(2));
- __ mov(ebx, Immediate(ExternalReference(IC_Utility(id), masm->isolate())));
- __ CallStub(&stub);
-
- // Move result to edi and exit the internal frame.
- __ mov(edi, eax);
- }
-
- // Check if the receiver is a global object of some sort.
- // This can happen only for regular CallIC but not KeyedCallIC.
- if (id == IC::kCallIC_Miss) {
- Label invoke, global;
- __ mov(edx, Operand(esp, (argc + 1) * kPointerSize)); // receiver
- __ JumpIfSmi(edx, &invoke, Label::kNear);
- __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
- __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
- __ cmp(ebx, JS_GLOBAL_OBJECT_TYPE);
- __ j(equal, &global, Label::kNear);
- __ cmp(ebx, JS_BUILTINS_OBJECT_TYPE);
- __ j(not_equal, &invoke, Label::kNear);
-
- // Patch the receiver on the stack.
- __ bind(&global);
- __ mov(edx, FieldOperand(edx, GlobalObject::kGlobalReceiverOffset));
- __ mov(Operand(esp, (argc + 1) * kPointerSize), edx);
- __ bind(&invoke);
- }
-
- // Invoke the function.
- CallKind call_kind = CallICBase::Contextual::decode(extra_state)
- ? CALL_AS_FUNCTION
- : CALL_AS_METHOD;
- ParameterCount actual(argc);
- __ InvokeFunction(edi,
- actual,
- JUMP_FUNCTION,
- NullCallWrapper(),
- call_kind);
-}
-
-
-void CallIC::GenerateMegamorphic(MacroAssembler* masm,
- int argc,
- Code::ExtraICState extra_state) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- ...
- // -- esp[(argc + 1) * 4] : receiver
- // -----------------------------------
-
- // Get the receiver of the function from the stack; 1 ~ return address.
- __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
- CallICBase::GenerateMonomorphicCacheProbe(masm, argc, Code::CALL_IC,
- extra_state);
-
- GenerateMiss(masm, argc, extra_state);
-}
-
-
-void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- ...
- // -- esp[(argc + 1) * 4] : receiver
- // -----------------------------------
-
- // Get the receiver of the function from the stack; 1 ~ return address.
- __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
-
- Label do_call, slow_call, slow_load, slow_reload_receiver;
- Label check_number_dictionary, check_string, lookup_monomorphic_cache;
- Label index_smi, index_string;
-
- // Check that the key is a smi.
- __ JumpIfNotSmi(ecx, &check_string);
-
- __ bind(&index_smi);
- // Now the key is known to be a smi. This place is also jumped to from
- // where a numeric string is converted to a smi.
-
- GenerateKeyedLoadReceiverCheck(
- masm, edx, eax, Map::kHasIndexedInterceptor, &slow_call);
-
- GenerateFastArrayLoad(
- masm, edx, ecx, eax, edi, &check_number_dictionary, &slow_load);
- Isolate* isolate = masm->isolate();
- Counters* counters = isolate->counters();
- __ IncrementCounter(counters->keyed_call_generic_smi_fast(), 1);
-
- __ bind(&do_call);
- // receiver in edx is not used after this point.
- // ecx: key
- // edi: function
- GenerateFunctionTailCall(masm, argc, &slow_call);
-
- __ bind(&check_number_dictionary);
- // eax: elements
- // ecx: smi key
- // Check whether the elements is a number dictionary.
- __ CheckMap(eax,
- isolate->factory()->hash_table_map(),
- &slow_load,
- DONT_DO_SMI_CHECK);
- __ mov(ebx, ecx);
- __ SmiUntag(ebx);
- // ebx: untagged index
- // Receiver in edx will be clobbered, need to reload it on miss.
- __ LoadFromNumberDictionary(
- &slow_reload_receiver, eax, ecx, ebx, edx, edi, edi);
- __ IncrementCounter(counters->keyed_call_generic_smi_dict(), 1);
- __ jmp(&do_call);
-
- __ bind(&slow_reload_receiver);
- __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
-
- __ bind(&slow_load);
- // This branch is taken when calling KeyedCallIC_Miss is neither required
- // nor beneficial.
- __ IncrementCounter(counters->keyed_call_generic_slow_load(), 1);
-
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(ecx); // save the key
- __ push(edx); // pass the receiver
- __ push(ecx); // pass the key
- __ CallRuntime(Runtime::kKeyedGetProperty, 2);
- __ pop(ecx); // restore the key
- // Leave the internal frame.
- }
-
- __ mov(edi, eax);
- __ jmp(&do_call);
-
- __ bind(&check_string);
- GenerateKeyStringCheck(masm, ecx, eax, ebx, &index_string, &slow_call);
-
- // The key is known to be an internalized string.
- // If the receiver is a regular JS object with slow properties then do
- // a quick inline probe of the receiver's dictionary.
- // Otherwise do the monomorphic cache probe.
- GenerateKeyedLoadReceiverCheck(
- masm, edx, eax, Map::kHasNamedInterceptor, &lookup_monomorphic_cache);
-
- __ mov(ebx, FieldOperand(edx, JSObject::kPropertiesOffset));
- __ CheckMap(ebx,
- isolate->factory()->hash_table_map(),
- &lookup_monomorphic_cache,
- DONT_DO_SMI_CHECK);
-
- GenerateDictionaryLoad(masm, &slow_load, ebx, ecx, eax, edi, edi);
- __ IncrementCounter(counters->keyed_call_generic_lookup_dict(), 1);
- __ jmp(&do_call);
-
- __ bind(&lookup_monomorphic_cache);
- __ IncrementCounter(counters->keyed_call_generic_lookup_cache(), 1);
- CallICBase::GenerateMonomorphicCacheProbe(masm, argc, Code::KEYED_CALL_IC,
- Code::kNoExtraICState);
- // Fall through on miss.
-
- __ bind(&slow_call);
- // This branch is taken if:
- // - the receiver requires boxing or access check,
- // - the key is neither smi nor an internalized string,
- // - the value loaded is not a function,
- // - there is hope that the runtime will create a monomorphic call stub
- // that will get fetched next time.
- __ IncrementCounter(counters->keyed_call_generic_slow(), 1);
- GenerateMiss(masm, argc);
-
- __ bind(&index_string);
- __ IndexFromHash(ebx, ecx);
- // Now jump to the place where smi keys are handled.
- __ jmp(&index_smi);
-}
-
-
-void KeyedCallIC::GenerateNonStrictArguments(MacroAssembler* masm,
- int argc) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- ...
- // -- esp[(argc + 1) * 4] : receiver
- // -----------------------------------
- Label slow, notin;
- Factory* factory = masm->isolate()->factory();
- __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
- Operand mapped_location =
- GenerateMappedArgumentsLookup(masm, edx, ecx, ebx, eax, &notin, &slow);
- __ mov(edi, mapped_location);
- GenerateFunctionTailCall(masm, argc, &slow);
- __ bind(&notin);
- // The unmapped lookup expects that the parameter map is in ebx.
- Operand unmapped_location =
- GenerateUnmappedArgumentsLookup(masm, ecx, ebx, eax, &slow);
- __ cmp(unmapped_location, factory->the_hole_value());
- __ j(equal, &slow);
- __ mov(edi, unmapped_location);
- GenerateFunctionTailCall(masm, argc, &slow);
- __ bind(&slow);
- GenerateMiss(masm, argc);
-}
-
-
-void KeyedCallIC::GenerateNormal(MacroAssembler* masm, int argc) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- ...
- // -- esp[(argc + 1) * 4] : receiver
- // -----------------------------------
-
- // Check if the name is a string.
- Label miss;
- __ JumpIfSmi(ecx, &miss);
- Condition cond = masm->IsObjectStringType(ecx, eax, eax);
- __ j(NegateCondition(cond), &miss);
- CallICBase::GenerateNormal(masm, argc);
- __ bind(&miss);
- GenerateMiss(masm, argc);
-}
-
-
-void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
-
- // Probe the stub cache.
- Code::Flags flags = Code::ComputeFlags(
- Code::LOAD_IC, MONOMORPHIC, Code::HANDLER_FRAGMENT);
- Isolate::Current()->stub_cache()->GenerateProbe(
- masm, flags, edx, ecx, ebx, eax);
-
- // Cache miss: Jump to runtime.
- GenerateMiss(masm);
-}
-
-
-void LoadIC::GenerateNormal(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label miss;
-
- GenerateStringDictionaryReceiverCheck(masm, edx, eax, ebx, &miss);
-
- // eax: elements
- // Search the dictionary placing the result in eax.
- GenerateDictionaryLoad(masm, &miss, eax, ecx, edi, ebx, eax);
- __ ret(0);
-
- // Cache miss: Jump to runtime.
- __ bind(&miss);
- GenerateMiss(masm);
-}
-
-
-void LoadIC::GenerateMiss(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
-
- __ IncrementCounter(masm->isolate()->counters()->load_miss(), 1);
-
- __ pop(ebx);
- __ push(edx); // receiver
- __ push(ecx); // name
- __ push(ebx); // return address
-
- // Perform tail call to the entry.
- ExternalReference ref =
- ExternalReference(IC_Utility(kLoadIC_Miss), masm->isolate());
- __ TailCallExternalReference(ref, 2, 1);
-}
-
-
-void KeyedLoadIC::GenerateMiss(MacroAssembler* masm, ICMissMode miss_mode) {
- // ----------- S t a t e -------------
- // -- ecx : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
-
- __ IncrementCounter(masm->isolate()->counters()->keyed_load_miss(), 1);
-
- __ pop(ebx);
- __ push(edx); // receiver
- __ push(ecx); // name
- __ push(ebx); // return address
-
- // Perform tail call to the entry.
- ExternalReference ref = miss_mode == MISS_FORCE_GENERIC
- ? ExternalReference(IC_Utility(kKeyedLoadIC_MissForceGeneric),
- masm->isolate())
- : ExternalReference(IC_Utility(kKeyedLoadIC_Miss), masm->isolate());
- __ TailCallExternalReference(ref, 2, 1);
-}
-
-
-void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- ecx : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
-
- __ pop(ebx);
- __ push(edx); // receiver
- __ push(ecx); // name
- __ push(ebx); // return address
-
- // Perform tail call to the entry.
- __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
-}
-
-
-void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
- StrictModeFlag strict_mode) {
- // ----------- S t a t e -------------
- // -- eax : value
- // -- ecx : name
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
-
- Code::Flags flags =
- Code::ComputeFlags(Code::STORE_IC, MONOMORPHIC, strict_mode);
- Isolate::Current()->stub_cache()->GenerateProbe(masm, flags, edx, ecx, ebx,
- no_reg);
-
- // Cache miss: Jump to runtime.
- GenerateMiss(masm);
-}
-
-
-void StoreIC::GenerateMiss(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax : value
- // -- ecx : name
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
-
- __ pop(ebx);
- __ push(edx);
- __ push(ecx);
- __ push(eax);
- __ push(ebx);
-
- // Perform tail call to the entry.
- ExternalReference ref =
- ExternalReference(IC_Utility(kStoreIC_Miss), masm->isolate());
- __ TailCallExternalReference(ref, 3, 1);
-}
-
-
-void StoreIC::GenerateNormal(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax : value
- // -- ecx : name
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
-
- Label miss, restore_miss;
-
- GenerateStringDictionaryReceiverCheck(masm, edx, ebx, edi, &miss);
-
- // A lot of registers are needed for storing to slow case
- // objects. Push and restore receiver but rely on
- // GenerateDictionaryStore preserving the value and name.
- __ push(edx);
- GenerateDictionaryStore(masm, &restore_miss, ebx, ecx, eax, edx, edi);
- __ Drop(1);
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->store_normal_hit(), 1);
- __ ret(0);
-
- __ bind(&restore_miss);
- __ pop(edx);
-
- __ bind(&miss);
- __ IncrementCounter(counters->store_normal_miss(), 1);
- GenerateMiss(masm);
-}
-
-
-void StoreIC::GenerateGlobalProxy(MacroAssembler* masm,
- StrictModeFlag strict_mode) {
- // ----------- S t a t e -------------
- // -- eax : value
- // -- ecx : name
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- __ pop(ebx);
- __ push(edx);
- __ push(ecx);
- __ push(eax);
- __ push(Immediate(Smi::FromInt(NONE))); // PropertyAttributes
- __ push(Immediate(Smi::FromInt(strict_mode)));
- __ push(ebx); // return address
-
- // Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kSetProperty, 5, 1);
-}
-
-
-void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
- StrictModeFlag strict_mode) {
- // ----------- S t a t e -------------
- // -- eax : value
- // -- ecx : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
-
- __ pop(ebx);
- __ push(edx);
- __ push(ecx);
- __ push(eax);
- __ push(Immediate(Smi::FromInt(NONE))); // PropertyAttributes
- __ push(Immediate(Smi::FromInt(strict_mode))); // Strict mode.
- __ push(ebx); // return address
-
- // Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kSetProperty, 5, 1);
-}
-
-
-void KeyedStoreIC::GenerateMiss(MacroAssembler* masm, ICMissMode miss_mode) {
- // ----------- S t a t e -------------
- // -- eax : value
- // -- ecx : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
-
- __ pop(ebx);
- __ push(edx);
- __ push(ecx);
- __ push(eax);
- __ push(ebx);
-
- // Do tail-call to runtime routine.
- ExternalReference ref = miss_mode == MISS_FORCE_GENERIC
- ? ExternalReference(IC_Utility(kKeyedStoreIC_MissForceGeneric),
- masm->isolate())
- : ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
- __ TailCallExternalReference(ref, 3, 1);
-}
-
-
-void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax : value
- // -- ecx : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
-
- __ pop(ebx);
- __ push(edx);
- __ push(ecx);
- __ push(eax);
- __ push(ebx); // return address
-
- // Do tail-call to runtime routine.
- ExternalReference ref(IC_Utility(kKeyedStoreIC_Slow), masm->isolate());
- __ TailCallExternalReference(ref, 3, 1);
-}
-
-
-void KeyedStoreIC::GenerateTransitionElementsSmiToDouble(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- ebx : target map
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- // Must return the modified receiver in eax.
- if (!FLAG_trace_elements_transitions) {
- Label fail;
- AllocationSiteMode mode = AllocationSiteInfo::GetMode(FAST_SMI_ELEMENTS,
- FAST_DOUBLE_ELEMENTS);
- ElementsTransitionGenerator::GenerateSmiToDouble(masm, mode, &fail);
- __ mov(eax, edx);
- __ Ret();
- __ bind(&fail);
- }
-
- __ pop(ebx);
- __ push(edx);
- __ push(ebx); // return address
- // Leaving the code managed by the register allocator and return to the
- // convention of using esi as context register.
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- __ TailCallRuntime(Runtime::kTransitionElementsSmiToDouble, 1, 1);
-}
-
-
-void KeyedStoreIC::GenerateTransitionElementsDoubleToObject(
- MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- ebx : target map
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- // Must return the modified receiver in eax.
- if (!FLAG_trace_elements_transitions) {
- Label fail;
- AllocationSiteMode mode = AllocationSiteInfo::GetMode(FAST_DOUBLE_ELEMENTS,
- FAST_ELEMENTS);
- ElementsTransitionGenerator::GenerateDoubleToObject(masm, mode, &fail);
- __ mov(eax, edx);
- __ Ret();
- __ bind(&fail);
- }
-
- __ pop(ebx);
- __ push(edx);
- __ push(ebx); // return address
- // Leaving the code managed by the register allocator and return to the
- // convention of using esi as context register.
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- __ TailCallRuntime(Runtime::kTransitionElementsDoubleToObject, 1, 1);
-}
-
-
-#undef __
-
-
-Condition CompareIC::ComputeCondition(Token::Value op) {
- switch (op) {
- case Token::EQ_STRICT:
- case Token::EQ:
- return equal;
- case Token::LT:
- return less;
- case Token::GT:
- return greater;
- case Token::LTE:
- return less_equal;
- case Token::GTE:
- return greater_equal;
- default:
- UNREACHABLE();
- return no_condition;
- }
-}
-
-
-bool CompareIC::HasInlinedSmiCode(Address address) {
- // The address of the instruction following the call.
- Address test_instruction_address =
- address + Assembler::kCallTargetAddressOffset;
-
- // If the instruction following the call is not a test al, nothing
- // was inlined.
- return *test_instruction_address == Assembler::kTestAlByte;
-}
-
-
-void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
- // The address of the instruction following the call.
- Address test_instruction_address =
- address + Assembler::kCallTargetAddressOffset;
-
- // If the instruction following the call is not a test al, nothing
- // was inlined.
- if (*test_instruction_address != Assembler::kTestAlByte) {
- ASSERT(*test_instruction_address == Assembler::kNopByte);
- return;
- }
-
- Address delta_address = test_instruction_address + 1;
- // The delta to the start of the map check instruction and the
- // condition code uses at the patched jump.
- int8_t delta = *reinterpret_cast<int8_t*>(delta_address);
- if (FLAG_trace_ic) {
- PrintF("[ patching ic at %p, test=%p, delta=%d\n",
- address, test_instruction_address, delta);
- }
-
- // Patch with a short conditional jump. Enabling means switching from a short
- // jump-if-carry/not-carry to jump-if-zero/not-zero, whereas disabling is the
- // reverse operation of that.
- Address jmp_address = test_instruction_address - delta;
- ASSERT((check == ENABLE_INLINED_SMI_CHECK)
- ? (*jmp_address == Assembler::kJncShortOpcode ||
- *jmp_address == Assembler::kJcShortOpcode)
- : (*jmp_address == Assembler::kJnzShortOpcode ||
- *jmp_address == Assembler::kJzShortOpcode));
- Condition cc = (check == ENABLE_INLINED_SMI_CHECK)
- ? (*jmp_address == Assembler::kJncShortOpcode ? not_zero : zero)
- : (*jmp_address == Assembler::kJnzShortOpcode ? not_carry : carry);
- *jmp_address = static_cast<byte>(Assembler::kJccShortPrefix | cc);
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_IA32
diff --git a/src/3rdparty/v8/src/ia32/lithium-codegen-ia32.cc b/src/3rdparty/v8/src/ia32/lithium-codegen-ia32.cc
deleted file mode 100644
index 8ef3bdf..0000000
--- a/src/3rdparty/v8/src/ia32/lithium-codegen-ia32.cc
+++ /dev/null
@@ -1,6266 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_IA32)
-
-#include "ia32/lithium-codegen-ia32.h"
-#include "ic.h"
-#include "code-stubs.h"
-#include "deoptimizer.h"
-#include "stub-cache.h"
-#include "codegen.h"
-
-namespace v8 {
-namespace internal {
-
-
-// When invoking builtins, we need to record the safepoint in the middle of
-// the invoke instruction sequence generated by the macro assembler.
-class SafepointGenerator : public CallWrapper {
- public:
- SafepointGenerator(LCodeGen* codegen,
- LPointerMap* pointers,
- Safepoint::DeoptMode mode)
- : codegen_(codegen),
- pointers_(pointers),
- deopt_mode_(mode) {}
- virtual ~SafepointGenerator() { }
-
- virtual void BeforeCall(int call_size) const {}
-
- virtual void AfterCall() const {
- codegen_->RecordSafepoint(pointers_, deopt_mode_);
- }
-
- private:
- LCodeGen* codegen_;
- LPointerMap* pointers_;
- Safepoint::DeoptMode deopt_mode_;
-};
-
-
-#define __ masm()->
-
-bool LCodeGen::GenerateCode() {
- HPhase phase("Z_Code generation", chunk());
- ASSERT(is_unused());
- status_ = GENERATING;
-
- // Open a frame scope to indicate that there is a frame on the stack. The
- // MANUAL indicates that the scope shouldn't actually generate code to set up
- // the frame (that is done in GeneratePrologue).
- FrameScope frame_scope(masm_, StackFrame::MANUAL);
-
- support_aligned_spilled_doubles_ = info()->IsOptimizing();
-
- dynamic_frame_alignment_ = info()->IsOptimizing() &&
- ((chunk()->num_double_slots() > 2 &&
- !chunk()->graph()->is_recursive()) ||
- !info()->osr_ast_id().IsNone());
-
- return GeneratePrologue() &&
- GenerateBody() &&
- GenerateDeferredCode() &&
- GenerateJumpTable() &&
- GenerateSafepointTable();
-}
-
-
-void LCodeGen::FinishCode(Handle<Code> code) {
- ASSERT(is_done());
- code->set_stack_slots(GetStackSlotCount());
- code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
- if (FLAG_weak_embedded_maps_in_optimized_code) {
- RegisterDependentCodeForEmbeddedMaps(code);
- }
- PopulateDeoptimizationData(code);
- if (!info()->IsStub()) {
- Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(code);
- }
- for (int i = 0 ; i < prototype_maps_.length(); i++) {
- prototype_maps_.at(i)->AddDependentCode(
- DependentCode::kPrototypeCheckGroup, code);
- }
-}
-
-
-void LCodeGen::Abort(const char* reason) {
- info()->set_bailout_reason(reason);
- status_ = ABORTED;
-}
-
-
-void LCodeGen::Comment(const char* format, ...) {
- if (!FLAG_code_comments) return;
- char buffer[4 * KB];
- StringBuilder builder(buffer, ARRAY_SIZE(buffer));
- va_list arguments;
- va_start(arguments, format);
- builder.AddFormattedList(format, arguments);
- va_end(arguments);
-
- // Copy the string before recording it in the assembler to avoid
- // issues when the stack allocated buffer goes out of scope.
- size_t length = builder.position();
- Vector<char> copy = Vector<char>::New(length + 1);
- memcpy(copy.start(), builder.Finalize(), copy.length());
- masm()->RecordComment(copy.start());
-}
-
-
-bool LCodeGen::GeneratePrologue() {
- ASSERT(is_generating());
-
- if (info()->IsOptimizing()) {
- ProfileEntryHookStub::MaybeCallEntryHook(masm_);
-
-#ifdef DEBUG
- if (strlen(FLAG_stop_at) > 0 &&
- info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
- __ int3();
- }
-#endif
-
- // Strict mode functions and builtins need to replace the receiver
- // with undefined when called as functions (without an explicit
- // receiver object). ecx is zero for method calls and non-zero for
- // function calls.
- if (!info_->is_classic_mode() || info_->is_native()) {
- Label ok;
- __ test(ecx, Operand(ecx));
- __ j(zero, &ok, Label::kNear);
- // +1 for return address.
- int receiver_offset = (scope()->num_parameters() + 1) * kPointerSize;
- __ mov(Operand(esp, receiver_offset),
- Immediate(isolate()->factory()->undefined_value()));
- __ bind(&ok);
- }
-
- if (support_aligned_spilled_doubles_ && dynamic_frame_alignment_) {
- // Move state of dynamic frame alignment into edx.
- __ mov(edx, Immediate(kNoAlignmentPadding));
-
- Label do_not_pad, align_loop;
- STATIC_ASSERT(kDoubleSize == 2 * kPointerSize);
- // Align esp + 4 to a multiple of 2 * kPointerSize.
- __ test(esp, Immediate(kPointerSize));
- __ j(not_zero, &do_not_pad, Label::kNear);
- __ push(Immediate(0));
- __ mov(ebx, esp);
- __ mov(edx, Immediate(kAlignmentPaddingPushed));
- // Copy arguments, receiver, and return address.
- __ mov(ecx, Immediate(scope()->num_parameters() + 2));
-
- __ bind(&align_loop);
- __ mov(eax, Operand(ebx, 1 * kPointerSize));
- __ mov(Operand(ebx, 0), eax);
- __ add(Operand(ebx), Immediate(kPointerSize));
- __ dec(ecx);
- __ j(not_zero, &align_loop, Label::kNear);
- __ mov(Operand(ebx, 0), Immediate(kAlignmentZapValue));
- __ bind(&do_not_pad);
- }
- }
-
- info()->set_prologue_offset(masm_->pc_offset());
- if (NeedsEagerFrame()) {
- ASSERT(!frame_is_built_);
- frame_is_built_ = true;
- __ push(ebp); // Caller's frame pointer.
- __ mov(ebp, esp);
- __ push(esi); // Callee's context.
- if (info()->IsStub()) {
- __ push(Immediate(Smi::FromInt(StackFrame::STUB)));
- } else {
- __ push(edi); // Callee's JS function.
- }
- }
-
- if (info()->IsOptimizing() &&
- dynamic_frame_alignment_ &&
- FLAG_debug_code) {
- __ test(esp, Immediate(kPointerSize));
- __ Assert(zero, "frame is expected to be aligned");
- }
-
- // Reserve space for the stack slots needed by the code.
- int slots = GetStackSlotCount();
- ASSERT(slots != 0 || !info()->IsOptimizing());
- if (slots > 0) {
- if (slots == 1) {
- if (dynamic_frame_alignment_) {
- __ push(edx);
- } else {
- __ push(Immediate(kNoAlignmentPadding));
- }
- } else {
- if (FLAG_debug_code) {
- __ sub(Operand(esp), Immediate(slots * kPointerSize));
- __ push(eax);
- __ mov(Operand(eax), Immediate(slots));
- Label loop;
- __ bind(&loop);
- __ mov(MemOperand(esp, eax, times_4, 0),
- Immediate(kSlotsZapValue));
- __ dec(eax);
- __ j(not_zero, &loop);
- __ pop(eax);
- } else {
- __ sub(Operand(esp), Immediate(slots * kPointerSize));
-#ifdef _MSC_VER
- // On windows, you may not access the stack more than one page below
- // the most recently mapped page. To make the allocated area randomly
- // accessible, we write to each page in turn (the value is irrelevant).
- const int kPageSize = 4 * KB;
- for (int offset = slots * kPointerSize - kPageSize;
- offset > 0;
- offset -= kPageSize) {
- __ mov(Operand(esp, offset), eax);
- }
-#endif
- }
-
- if (support_aligned_spilled_doubles_) {
- Comment(";;; Store dynamic frame alignment tag for spilled doubles");
- // Store dynamic frame alignment state in the first local.
- int offset = JavaScriptFrameConstants::kDynamicAlignmentStateOffset;
- if (dynamic_frame_alignment_) {
- __ mov(Operand(ebp, offset), edx);
- } else {
- __ mov(Operand(ebp, offset), Immediate(kNoAlignmentPadding));
- }
- }
- }
-
- if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(SSE2)) {
- Comment(";;; Save clobbered callee double registers");
- CpuFeatures::Scope scope(SSE2);
- int count = 0;
- BitVector* doubles = chunk()->allocated_double_registers();
- BitVector::Iterator save_iterator(doubles);
- while (!save_iterator.Done()) {
- __ movdbl(MemOperand(esp, count * kDoubleSize),
- XMMRegister::FromAllocationIndex(save_iterator.Current()));
- save_iterator.Advance();
- count++;
- }
- }
- }
-
- // Possibly allocate a local context.
- int heap_slots = info_->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
- if (heap_slots > 0 ||
- (scope() != NULL && scope()->is_qml_mode() && scope()->is_global_scope())) {
- Comment(";;; Allocate local context");
- // Argument to NewContext is the function, which is still in edi.
- __ push(edi);
- if (heap_slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub((heap_slots < 0)?0:heap_slots);
- __ CallStub(&stub);
- } else {
- __ CallRuntime(Runtime::kNewFunctionContext, 1);
- }
- RecordSafepoint(Safepoint::kNoLazyDeopt);
- // Context is returned in both eax and esi. It replaces the context
- // passed to us. It's saved in the stack and kept live in esi.
- __ mov(Operand(ebp, StandardFrameConstants::kContextOffset), esi);
-
- // Copy parameters into context if necessary.
- int num_parameters = scope()->num_parameters();
- for (int i = 0; i < num_parameters; i++) {
- Variable* var = scope()->parameter(i);
- if (var->IsContextSlot()) {
- int parameter_offset = StandardFrameConstants::kCallerSPOffset +
- (num_parameters - 1 - i) * kPointerSize;
- // Load parameter from stack.
- __ mov(eax, Operand(ebp, parameter_offset));
- // Store it in the context.
- int context_offset = Context::SlotOffset(var->index());
- __ mov(Operand(esi, context_offset), eax);
- // Update the write barrier. This clobbers eax and ebx.
- __ RecordWriteContextSlot(esi,
- context_offset,
- eax,
- ebx,
- kDontSaveFPRegs);
- }
- }
- Comment(";;; End allocate local context");
- }
-
- // Trace the call.
- if (FLAG_trace && info()->IsOptimizing()) {
- // We have not executed any compiled code yet, so esi still holds the
- // incoming context.
- __ CallRuntime(Runtime::kTraceEnter, 0);
- }
- return !is_aborted();
-}
-
-
-bool LCodeGen::GenerateBody() {
- ASSERT(is_generating());
- bool emit_instructions = true;
- for (current_instruction_ = 0;
- !is_aborted() && current_instruction_ < instructions_->length();
- current_instruction_++) {
- LInstruction* instr = instructions_->at(current_instruction_);
- if (instr->IsLabel()) {
- LLabel* label = LLabel::cast(instr);
- emit_instructions = !label->HasReplacement();
- }
-
- if (emit_instructions) {
- if (FLAG_code_comments) {
- HValue* hydrogen = instr->hydrogen_value();
- if (hydrogen != NULL) {
- if (hydrogen->IsChange()) {
- HValue* changed_value = HChange::cast(hydrogen)->value();
- int use_id = 0;
- const char* use_mnemo = "dead";
- if (hydrogen->UseCount() >= 1) {
- HValue* use_value = hydrogen->uses().value();
- use_id = use_value->id();
- use_mnemo = use_value->Mnemonic();
- }
- Comment(";;; @%d: %s. <of #%d %s for #%d %s>",
- current_instruction_, instr->Mnemonic(),
- changed_value->id(), changed_value->Mnemonic(),
- use_id, use_mnemo);
- } else {
- Comment(";;; @%d: %s. <#%d>", current_instruction_,
- instr->Mnemonic(), hydrogen->id());
- }
- } else {
- Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic());
- }
- }
- instr->CompileToNative(this);
- }
- }
- EnsureSpaceForLazyDeopt();
- return !is_aborted();
-}
-
-
-bool LCodeGen::GenerateJumpTable() {
- Label needs_frame_not_call;
- Label needs_frame_is_call;
- for (int i = 0; i < jump_table_.length(); i++) {
- __ bind(&jump_table_[i].label);
- Address entry = jump_table_[i].address;
- bool is_lazy_deopt = jump_table_[i].is_lazy_deopt;
- Deoptimizer::BailoutType type =
- is_lazy_deopt ? Deoptimizer::LAZY : Deoptimizer::EAGER;
- int id = Deoptimizer::GetDeoptimizationId(entry, type);
- if (id == Deoptimizer::kNotDeoptimizationEntry) {
- Comment(";;; jump table entry %d.", i);
- } else {
- Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
- }
- if (jump_table_[i].needs_frame) {
- __ push(Immediate(ExternalReference::ForDeoptEntry(entry)));
- if (is_lazy_deopt) {
- if (needs_frame_is_call.is_bound()) {
- __ jmp(&needs_frame_is_call);
- } else {
- __ bind(&needs_frame_is_call);
- __ push(MemOperand(ebp, StandardFrameConstants::kContextOffset));
- // This variant of deopt can only be used with stubs. Since we don't
- // have a function pointer to install in the stack frame that we're
- // building, install a special marker there instead.
- ASSERT(info()->IsStub());
- __ push(Immediate(Smi::FromInt(StackFrame::STUB)));
- // Push a PC inside the function so that the deopt code can find where
- // the deopt comes from. It doesn't have to be the precise return
- // address of a "calling" LAZY deopt, it only has to be somewhere
- // inside the code body.
- Label push_approx_pc;
- __ call(&push_approx_pc);
- __ bind(&push_approx_pc);
- // Push the continuation which was stashed were the ebp should
- // be. Replace it with the saved ebp.
- __ push(MemOperand(esp, 3 * kPointerSize));
- __ mov(MemOperand(esp, 4 * kPointerSize), ebp);
- __ lea(ebp, MemOperand(esp, 4 * kPointerSize));
- __ ret(0); // Call the continuation without clobbering registers.
- }
- } else {
- if (needs_frame_not_call.is_bound()) {
- __ jmp(&needs_frame_not_call);
- } else {
- __ bind(&needs_frame_not_call);
- __ push(MemOperand(ebp, StandardFrameConstants::kContextOffset));
- // This variant of deopt can only be used with stubs. Since we don't
- // have a function pointer to install in the stack frame that we're
- // building, install a special marker there instead.
- ASSERT(info()->IsStub());
- __ push(Immediate(Smi::FromInt(StackFrame::STUB)));
- // Push the continuation which was stashed were the ebp should
- // be. Replace it with the saved ebp.
- __ push(MemOperand(esp, 2 * kPointerSize));
- __ mov(MemOperand(esp, 3 * kPointerSize), ebp);
- __ lea(ebp, MemOperand(esp, 3 * kPointerSize));
- __ ret(0); // Call the continuation without clobbering registers.
- }
- }
- } else {
- if (is_lazy_deopt) {
- __ call(entry, RelocInfo::RUNTIME_ENTRY);
- } else {
- __ jmp(entry, RelocInfo::RUNTIME_ENTRY);
- }
- }
- }
- return !is_aborted();
-}
-
-
-bool LCodeGen::GenerateDeferredCode() {
- ASSERT(is_generating());
- if (deferred_.length() > 0) {
- for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
- LDeferredCode* code = deferred_[i];
- __ bind(code->entry());
- if (NeedsDeferredFrame()) {
- Comment(";;; Deferred build frame",
- code->instruction_index(),
- code->instr()->Mnemonic());
- ASSERT(!frame_is_built_);
- ASSERT(info()->IsStub());
- frame_is_built_ = true;
- // Build the frame in such a way that esi isn't trashed.
- __ push(ebp); // Caller's frame pointer.
- __ push(Operand(ebp, StandardFrameConstants::kContextOffset));
- __ push(Immediate(Smi::FromInt(StackFrame::STUB)));
- __ lea(ebp, Operand(esp, 2 * kPointerSize));
- }
- Comment(";;; Deferred code @%d: %s.",
- code->instruction_index(),
- code->instr()->Mnemonic());
- code->Generate();
- if (NeedsDeferredFrame()) {
- Comment(";;; Deferred destroy frame",
- code->instruction_index(),
- code->instr()->Mnemonic());
- ASSERT(frame_is_built_);
- frame_is_built_ = false;
- __ mov(esp, ebp);
- __ pop(ebp);
- }
- __ jmp(code->exit());
- }
- }
-
- // Deferred code is the last part of the instruction sequence. Mark
- // the generated code as done unless we bailed out.
- if (!is_aborted()) status_ = DONE;
- return !is_aborted();
-}
-
-
-bool LCodeGen::GenerateSafepointTable() {
- ASSERT(is_done());
- if (!info()->IsStub()) {
- // For lazy deoptimization we need space to patch a call after every call.
- // Ensure there is always space for such patching, even if the code ends
- // in a call.
- int target_offset = masm()->pc_offset() + Deoptimizer::patch_size();
- while (masm()->pc_offset() < target_offset) {
- masm()->nop();
- }
- }
- safepoints_.Emit(masm(), GetStackSlotCount());
- return !is_aborted();
-}
-
-
-Register LCodeGen::ToRegister(int index) const {
- return Register::FromAllocationIndex(index);
-}
-
-
-XMMRegister LCodeGen::ToDoubleRegister(int index) const {
- return XMMRegister::FromAllocationIndex(index);
-}
-
-
-bool LCodeGen::IsX87TopOfStack(LOperand* op) const {
- return op->IsDoubleRegister();
-}
-
-
-Register LCodeGen::ToRegister(LOperand* op) const {
- ASSERT(op->IsRegister());
- return ToRegister(op->index());
-}
-
-
-XMMRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
- ASSERT(op->IsDoubleRegister());
- return ToDoubleRegister(op->index());
-}
-
-
-int LCodeGen::ToInteger32(LConstantOperand* op) const {
- HConstant* constant = chunk_->LookupConstant(op);
- return constant->Integer32Value();
-}
-
-
-Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
- HConstant* constant = chunk_->LookupConstant(op);
- ASSERT(chunk_->LookupLiteralRepresentation(op).IsTagged());
- return constant->handle();
-}
-
-
-double LCodeGen::ToDouble(LConstantOperand* op) const {
- HConstant* constant = chunk_->LookupConstant(op);
- ASSERT(constant->HasDoubleValue());
- return constant->DoubleValue();
-}
-
-
-bool LCodeGen::IsInteger32(LConstantOperand* op) const {
- return chunk_->LookupLiteralRepresentation(op).IsInteger32();
-}
-
-
-Operand LCodeGen::ToOperand(LOperand* op) const {
- if (op->IsRegister()) return Operand(ToRegister(op));
- if (op->IsDoubleRegister()) return Operand(ToDoubleRegister(op));
- ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
- return Operand(ebp, StackSlotOffset(op->index()));
-}
-
-
-Operand LCodeGen::HighOperand(LOperand* op) {
- ASSERT(op->IsDoubleStackSlot());
- return Operand(ebp, StackSlotOffset(op->index()) + kPointerSize);
-}
-
-
-void LCodeGen::WriteTranslation(LEnvironment* environment,
- Translation* translation,
- int* pushed_arguments_index,
- int* pushed_arguments_count) {
- if (environment == NULL) return;
-
- // The translation includes one command per value in the environment.
- int translation_size = environment->values()->length();
- // The output frame height does not include the parameters.
- int height = translation_size - environment->parameter_count();
-
- // Function parameters are arguments to the outermost environment. The
- // arguments index points to the first element of a sequence of tagged
- // values on the stack that represent the arguments. This needs to be
- // kept in sync with the LArgumentsElements implementation.
- *pushed_arguments_index = -environment->parameter_count();
- *pushed_arguments_count = environment->parameter_count();
-
- WriteTranslation(environment->outer(),
- translation,
- pushed_arguments_index,
- pushed_arguments_count);
- bool has_closure_id = !info()->closure().is_null() &&
- *info()->closure() != *environment->closure();
- int closure_id = has_closure_id
- ? DefineDeoptimizationLiteral(environment->closure())
- : Translation::kSelfLiteralId;
- switch (environment->frame_type()) {
- case JS_FUNCTION:
- translation->BeginJSFrame(environment->ast_id(), closure_id, height);
- break;
- case JS_CONSTRUCT:
- translation->BeginConstructStubFrame(closure_id, translation_size);
- break;
- case JS_GETTER:
- ASSERT(translation_size == 1);
- ASSERT(height == 0);
- translation->BeginGetterStubFrame(closure_id);
- break;
- case JS_SETTER:
- ASSERT(translation_size == 2);
- ASSERT(height == 0);
- translation->BeginSetterStubFrame(closure_id);
- break;
- case ARGUMENTS_ADAPTOR:
- translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
- break;
- case STUB:
- translation->BeginCompiledStubFrame();
- break;
- default:
- UNREACHABLE();
- }
-
- // Inlined frames which push their arguments cause the index to be
- // bumped and another stack area to be used for materialization,
- // otherwise actual argument values are unknown for inlined frames.
- bool arguments_known = true;
- int arguments_index = *pushed_arguments_index;
- int arguments_count = *pushed_arguments_count;
- if (environment->entry() != NULL) {
- arguments_known = environment->entry()->arguments_pushed();
- arguments_index = arguments_index < 0
- ? GetStackSlotCount() : arguments_index + arguments_count;
- arguments_count = environment->entry()->arguments_count() + 1;
- if (environment->entry()->arguments_pushed()) {
- *pushed_arguments_index = arguments_index;
- *pushed_arguments_count = arguments_count;
- }
- }
-
- for (int i = 0; i < translation_size; ++i) {
- LOperand* value = environment->values()->at(i);
- // spilled_registers_ and spilled_double_registers_ are either
- // both NULL or both set.
- if (environment->spilled_registers() != NULL && value != NULL) {
- if (value->IsRegister() &&
- environment->spilled_registers()[value->index()] != NULL) {
- translation->MarkDuplicate();
- AddToTranslation(translation,
- environment->spilled_registers()[value->index()],
- environment->HasTaggedValueAt(i),
- environment->HasUint32ValueAt(i),
- arguments_known,
- arguments_index,
- arguments_count);
- } else if (
- value->IsDoubleRegister() &&
- environment->spilled_double_registers()[value->index()] != NULL) {
- translation->MarkDuplicate();
- AddToTranslation(
- translation,
- environment->spilled_double_registers()[value->index()],
- false,
- false,
- arguments_known,
- arguments_index,
- arguments_count);
- }
- }
-
- AddToTranslation(translation,
- value,
- environment->HasTaggedValueAt(i),
- environment->HasUint32ValueAt(i),
- arguments_known,
- arguments_index,
- arguments_count);
- }
-}
-
-
-void LCodeGen::AddToTranslation(Translation* translation,
- LOperand* op,
- bool is_tagged,
- bool is_uint32,
- bool arguments_known,
- int arguments_index,
- int arguments_count) {
- if (op == NULL) {
- // TODO(twuerthinger): Introduce marker operands to indicate that this value
- // is not present and must be reconstructed from the deoptimizer. Currently
- // this is only used for the arguments object.
- translation->StoreArgumentsObject(
- arguments_known, arguments_index, arguments_count);
- } else if (op->IsStackSlot()) {
- if (is_tagged) {
- translation->StoreStackSlot(op->index());
- } else if (is_uint32) {
- translation->StoreUint32StackSlot(op->index());
- } else {
- translation->StoreInt32StackSlot(op->index());
- }
- } else if (op->IsDoubleStackSlot()) {
- translation->StoreDoubleStackSlot(op->index());
- } else if (op->IsArgument()) {
- ASSERT(is_tagged);
- int src_index = GetStackSlotCount() + op->index();
- translation->StoreStackSlot(src_index);
- } else if (op->IsRegister()) {
- Register reg = ToRegister(op);
- if (is_tagged) {
- translation->StoreRegister(reg);
- } else if (is_uint32) {
- translation->StoreUint32Register(reg);
- } else {
- translation->StoreInt32Register(reg);
- }
- } else if (op->IsDoubleRegister()) {
- XMMRegister reg = ToDoubleRegister(op);
- translation->StoreDoubleRegister(reg);
- } else if (op->IsConstantOperand()) {
- HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
- int src_index = DefineDeoptimizationLiteral(constant->handle());
- translation->StoreLiteral(src_index);
- } else {
- UNREACHABLE();
- }
-}
-
-
-void LCodeGen::CallCodeGeneric(Handle<Code> code,
- RelocInfo::Mode mode,
- LInstruction* instr,
- SafepointMode safepoint_mode) {
- ASSERT(instr != NULL);
- LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
- __ call(code, mode);
- RecordSafepointWithLazyDeopt(instr, safepoint_mode);
-
- // Signal that we don't inline smi code before these stubs in the
- // optimizing code generator.
- if (code->kind() == Code::BINARY_OP_IC ||
- code->kind() == Code::COMPARE_IC) {
- __ nop();
- }
-}
-
-
-void LCodeGen::CallCode(Handle<Code> code,
- RelocInfo::Mode mode,
- LInstruction* instr) {
- CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
-}
-
-
-void LCodeGen::CallRuntime(const Runtime::Function* fun,
- int argc,
- LInstruction* instr) {
- ASSERT(instr != NULL);
- ASSERT(instr->HasPointerMap());
- LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
-
- __ CallRuntime(fun, argc);
-
- RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
-
- ASSERT(info()->is_calling());
-}
-
-
-void LCodeGen::LoadContextFromDeferred(LOperand* context) {
- if (context->IsRegister()) {
- if (!ToRegister(context).is(esi)) {
- __ mov(esi, ToRegister(context));
- }
- } else if (context->IsStackSlot()) {
- __ mov(esi, ToOperand(context));
- } else if (context->IsConstantOperand()) {
- HConstant* constant =
- chunk_->LookupConstant(LConstantOperand::cast(context));
- __ LoadHeapObject(esi, Handle<Context>::cast(constant->handle()));
- } else {
- UNREACHABLE();
- }
-}
-
-void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
- int argc,
- LInstruction* instr,
- LOperand* context) {
- LoadContextFromDeferred(context);
-
- __ CallRuntimeSaveDoubles(id);
- RecordSafepointWithRegisters(
- instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
-
- ASSERT(info()->is_calling());
-}
-
-
-void LCodeGen::RegisterEnvironmentForDeoptimization(
- LEnvironment* environment, Safepoint::DeoptMode mode) {
- if (!environment->HasBeenRegistered()) {
- // Physical stack frame layout:
- // -x ............. -4 0 ..................................... y
- // [incoming arguments] [spill slots] [pushed outgoing arguments]
-
- // Layout of the environment:
- // 0 ..................................................... size-1
- // [parameters] [locals] [expression stack including arguments]
-
- // Layout of the translation:
- // 0 ........................................................ size - 1 + 4
- // [expression stack including arguments] [locals] [4 words] [parameters]
- // |>------------ translation_size ------------<|
-
- int frame_count = 0;
- int jsframe_count = 0;
- int args_index = 0;
- int args_count = 0;
- for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
- ++frame_count;
- if (e->frame_type() == JS_FUNCTION) {
- ++jsframe_count;
- }
- }
- Translation translation(&translations_, frame_count, jsframe_count, zone());
- WriteTranslation(environment, &translation, &args_index, &args_count);
- int deoptimization_index = deoptimizations_.length();
- int pc_offset = masm()->pc_offset();
- environment->Register(deoptimization_index,
- translation.index(),
- (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
- deoptimizations_.Add(environment, zone());
- }
-}
-
-
-void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
- RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
- ASSERT(environment->HasBeenRegistered());
- int id = environment->deoptimization_index();
- ASSERT(info()->IsOptimizing() || info()->IsStub());
- Deoptimizer::BailoutType bailout_type = info()->IsStub()
- ? Deoptimizer::LAZY
- : Deoptimizer::EAGER;
- Address entry =
- Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
- if (entry == NULL) {
- Abort("bailout was not prepared");
- return;
- }
-
- if (FLAG_deopt_every_n_times != 0) {
- Handle<SharedFunctionInfo> shared(info_->shared_info());
- Label no_deopt;
- __ pushfd();
- __ push(eax);
- __ push(ebx);
- __ mov(ebx, shared);
- __ mov(eax,
- FieldOperand(ebx, SharedFunctionInfo::kStressDeoptCounterOffset));
- __ sub(Operand(eax), Immediate(Smi::FromInt(1)));
- __ j(not_zero, &no_deopt, Label::kNear);
- if (FLAG_trap_on_deopt) __ int3();
- __ mov(eax, Immediate(Smi::FromInt(FLAG_deopt_every_n_times)));
- __ mov(FieldOperand(ebx, SharedFunctionInfo::kStressDeoptCounterOffset),
- eax);
- __ pop(ebx);
- __ pop(eax);
- __ popfd();
- __ jmp(entry, RelocInfo::RUNTIME_ENTRY);
-
- __ bind(&no_deopt);
- __ mov(FieldOperand(ebx, SharedFunctionInfo::kStressDeoptCounterOffset),
- eax);
- __ pop(ebx);
- __ pop(eax);
- __ popfd();
- }
-
- if (FLAG_trap_on_deopt) {
- Label done;
- if (cc != no_condition) {
- __ j(NegateCondition(cc), &done, Label::kNear);
- }
- __ int3();
- __ bind(&done);
- }
-
- ASSERT(info()->IsStub() || frame_is_built_);
- bool needs_lazy_deopt = info()->IsStub();
- if (cc == no_condition && frame_is_built_) {
- if (needs_lazy_deopt) {
- __ call(entry, RelocInfo::RUNTIME_ENTRY);
- } else {
- __ jmp(entry, RelocInfo::RUNTIME_ENTRY);
- }
- } else {
- // We often have several deopts to the same entry, reuse the last
- // jump entry if this is the case.
- if (jump_table_.is_empty() ||
- jump_table_.last().address != entry ||
- jump_table_.last().needs_frame != !frame_is_built_ ||
- jump_table_.last().is_lazy_deopt != needs_lazy_deopt) {
- JumpTableEntry table_entry(entry, !frame_is_built_, needs_lazy_deopt);
- jump_table_.Add(table_entry, zone());
- }
- if (cc == no_condition) {
- __ jmp(&jump_table_.last().label);
- } else {
- __ j(cc, &jump_table_.last().label);
- }
- }
-}
-
-
-void LCodeGen::RegisterDependentCodeForEmbeddedMaps(Handle<Code> code) {
- ZoneList<Handle<Map> > maps(1, zone());
- int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
- for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
- RelocInfo::Mode mode = it.rinfo()->rmode();
- if (mode == RelocInfo::EMBEDDED_OBJECT &&
- it.rinfo()->target_object()->IsMap()) {
- Handle<Map> map(Map::cast(it.rinfo()->target_object()));
- if (map->CanTransition()) {
- maps.Add(map, zone());
- }
- }
- }
-#ifdef VERIFY_HEAP
- // This disables verification of weak embedded maps after full GC.
- // AddDependentCode can cause a GC, which would observe the state where
- // this code is not yet in the depended code lists of the embedded maps.
- NoWeakEmbeddedMapsVerificationScope disable_verification_of_embedded_maps;
-#endif
- for (int i = 0; i < maps.length(); i++) {
- maps.at(i)->AddDependentCode(DependentCode::kWeaklyEmbeddedGroup, code);
- }
-}
-
-
-void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
- int length = deoptimizations_.length();
- if (length == 0) return;
- Handle<DeoptimizationInputData> data =
- factory()->NewDeoptimizationInputData(length, TENURED);
-
- Handle<ByteArray> translations = translations_.CreateByteArray();
- data->SetTranslationByteArray(*translations);
- data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
-
- Handle<FixedArray> literals =
- factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
- for (int i = 0; i < deoptimization_literals_.length(); i++) {
- literals->set(i, *deoptimization_literals_[i]);
- }
- data->SetLiteralArray(*literals);
-
- data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
- data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
-
- // Populate the deoptimization entries.
- for (int i = 0; i < length; i++) {
- LEnvironment* env = deoptimizations_[i];
- data->SetAstId(i, env->ast_id());
- data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
- data->SetArgumentsStackHeight(i,
- Smi::FromInt(env->arguments_stack_height()));
- data->SetPc(i, Smi::FromInt(env->pc_offset()));
- }
- code->set_deoptimization_data(*data);
-}
-
-
-int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
- int result = deoptimization_literals_.length();
- for (int i = 0; i < deoptimization_literals_.length(); ++i) {
- if (deoptimization_literals_[i].is_identical_to(literal)) return i;
- }
- deoptimization_literals_.Add(literal, zone());
- return result;
-}
-
-
-void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
- ASSERT(deoptimization_literals_.length() == 0);
-
- const ZoneList<Handle<JSFunction> >* inlined_closures =
- chunk()->inlined_closures();
-
- for (int i = 0, length = inlined_closures->length();
- i < length;
- i++) {
- DefineDeoptimizationLiteral(inlined_closures->at(i));
- }
-
- inlined_function_count_ = deoptimization_literals_.length();
-}
-
-
-void LCodeGen::RecordSafepointWithLazyDeopt(
- LInstruction* instr, SafepointMode safepoint_mode) {
- if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
- RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
- } else {
- ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 0, Safepoint::kLazyDeopt);
- }
-}
-
-
-void LCodeGen::RecordSafepoint(
- LPointerMap* pointers,
- Safepoint::Kind kind,
- int arguments,
- Safepoint::DeoptMode deopt_mode) {
- ASSERT(kind == expected_safepoint_kind_);
- const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
- Safepoint safepoint =
- safepoints_.DefineSafepoint(masm(), kind, arguments, deopt_mode);
- for (int i = 0; i < operands->length(); i++) {
- LOperand* pointer = operands->at(i);
- if (pointer->IsStackSlot()) {
- safepoint.DefinePointerSlot(pointer->index(), zone());
- } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
- safepoint.DefinePointerRegister(ToRegister(pointer), zone());
- }
- }
-}
-
-
-void LCodeGen::RecordSafepoint(LPointerMap* pointers,
- Safepoint::DeoptMode mode) {
- RecordSafepoint(pointers, Safepoint::kSimple, 0, mode);
-}
-
-
-void LCodeGen::RecordSafepoint(Safepoint::DeoptMode mode) {
- LPointerMap empty_pointers(RelocInfo::kNoPosition, zone());
- RecordSafepoint(&empty_pointers, mode);
-}
-
-
-void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
- int arguments,
- Safepoint::DeoptMode mode) {
- RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, mode);
-}
-
-
-void LCodeGen::RecordPosition(int position) {
- if (position == RelocInfo::kNoPosition) return;
- masm()->positions_recorder()->RecordPosition(position);
-}
-
-
-void LCodeGen::DoLabel(LLabel* label) {
- if (label->is_loop_header()) {
- Comment(";;; B%d - LOOP entry", label->block_id());
- } else {
- Comment(";;; B%d", label->block_id());
- }
- __ bind(label->label());
- current_block_ = label->block_id();
- DoGap(label);
-}
-
-
-void LCodeGen::DoParallelMove(LParallelMove* move) {
- resolver_.Resolve(move);
-}
-
-
-void LCodeGen::DoGap(LGap* gap) {
- for (int i = LGap::FIRST_INNER_POSITION;
- i <= LGap::LAST_INNER_POSITION;
- i++) {
- LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
- LParallelMove* move = gap->GetParallelMove(inner_pos);
- if (move != NULL) DoParallelMove(move);
- }
-}
-
-
-void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
- DoGap(instr);
-}
-
-
-void LCodeGen::DoParameter(LParameter* instr) {
- // Nothing to do.
-}
-
-
-void LCodeGen::DoCallStub(LCallStub* instr) {
- ASSERT(ToRegister(instr->context()).is(esi));
- ASSERT(ToRegister(instr->result()).is(eax));
- switch (instr->hydrogen()->major_key()) {
- case CodeStub::RegExpConstructResult: {
- RegExpConstructResultStub stub;
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- break;
- }
- case CodeStub::RegExpExec: {
- RegExpExecStub stub;
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- break;
- }
- case CodeStub::SubString: {
- SubStringStub stub;
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- break;
- }
- case CodeStub::NumberToString: {
- NumberToStringStub stub;
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- break;
- }
- case CodeStub::StringAdd: {
- StringAddStub stub(NO_STRING_ADD_FLAGS);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- break;
- }
- case CodeStub::StringCompare: {
- StringCompareStub stub;
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- break;
- }
- case CodeStub::TranscendentalCache: {
- TranscendentalCacheStub stub(instr->transcendental_type(),
- TranscendentalCacheStub::TAGGED);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- break;
- }
- default:
- UNREACHABLE();
- }
-}
-
-
-void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
- // Nothing to do.
-}
-
-
-void LCodeGen::DoModI(LModI* instr) {
- if (instr->hydrogen()->HasPowerOf2Divisor()) {
- Register dividend = ToRegister(instr->left());
-
- int32_t divisor =
- HConstant::cast(instr->hydrogen()->right())->Integer32Value();
-
- if (divisor < 0) divisor = -divisor;
-
- Label positive_dividend, done;
- __ test(dividend, Operand(dividend));
- __ j(not_sign, &positive_dividend, Label::kNear);
- __ neg(dividend);
- __ and_(dividend, divisor - 1);
- __ neg(dividend);
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ j(not_zero, &done, Label::kNear);
- DeoptimizeIf(no_condition, instr->environment());
- } else {
- __ jmp(&done, Label::kNear);
- }
- __ bind(&positive_dividend);
- __ and_(dividend, divisor - 1);
- __ bind(&done);
- } else {
- Label done, remainder_eq_dividend, slow, do_subtraction, both_positive;
- Register left_reg = ToRegister(instr->left());
- Register right_reg = ToRegister(instr->right());
- Register result_reg = ToRegister(instr->result());
-
- ASSERT(left_reg.is(eax));
- ASSERT(result_reg.is(edx));
- ASSERT(!right_reg.is(eax));
- ASSERT(!right_reg.is(edx));
-
- // Check for x % 0.
- if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
- __ test(right_reg, Operand(right_reg));
- DeoptimizeIf(zero, instr->environment());
- }
-
- __ test(left_reg, Operand(left_reg));
- __ j(zero, &remainder_eq_dividend, Label::kNear);
- __ j(sign, &slow, Label::kNear);
-
- __ test(right_reg, Operand(right_reg));
- __ j(not_sign, &both_positive, Label::kNear);
- // The sign of the divisor doesn't matter.
- __ neg(right_reg);
-
- __ bind(&both_positive);
- // If the dividend is smaller than the nonnegative
- // divisor, the dividend is the result.
- __ cmp(left_reg, Operand(right_reg));
- __ j(less, &remainder_eq_dividend, Label::kNear);
-
- // Check if the divisor is a PowerOfTwo integer.
- Register scratch = ToRegister(instr->temp());
- __ mov(scratch, right_reg);
- __ sub(Operand(scratch), Immediate(1));
- __ test(scratch, Operand(right_reg));
- __ j(not_zero, &do_subtraction, Label::kNear);
- __ and_(left_reg, Operand(scratch));
- __ jmp(&remainder_eq_dividend, Label::kNear);
-
- __ bind(&do_subtraction);
- const int kUnfolds = 3;
- // Try a few subtractions of the dividend.
- __ mov(scratch, left_reg);
- for (int i = 0; i < kUnfolds; i++) {
- // Reduce the dividend by the divisor.
- __ sub(left_reg, Operand(right_reg));
- // Check if the dividend is less than the divisor.
- __ cmp(left_reg, Operand(right_reg));
- __ j(less, &remainder_eq_dividend, Label::kNear);
- }
- __ mov(left_reg, scratch);
-
- // Slow case, using idiv instruction.
- __ bind(&slow);
-
- // Check for (kMinInt % -1).
- if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- Label left_not_min_int;
- __ cmp(left_reg, kMinInt);
- __ j(not_zero, &left_not_min_int, Label::kNear);
- __ cmp(right_reg, -1);
- DeoptimizeIf(zero, instr->environment());
- __ bind(&left_not_min_int);
- }
-
- // Sign extend to edx.
- __ cdq();
-
- // Check for (0 % -x) that will produce negative zero.
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- Label positive_left;
- Label done;
- __ test(left_reg, Operand(left_reg));
- __ j(not_sign, &positive_left, Label::kNear);
- __ idiv(right_reg);
-
- // Test the remainder for 0, because then the result would be -0.
- __ test(result_reg, Operand(result_reg));
- __ j(not_zero, &done, Label::kNear);
-
- DeoptimizeIf(no_condition, instr->environment());
- __ bind(&positive_left);
- __ idiv(right_reg);
- __ bind(&done);
- } else {
- __ idiv(right_reg);
- }
- __ jmp(&done, Label::kNear);
-
- __ bind(&remainder_eq_dividend);
- __ mov(result_reg, left_reg);
-
- __ bind(&done);
- }
-}
-
-
-void LCodeGen::DoDivI(LDivI* instr) {
- if (!instr->is_flooring() && instr->hydrogen()->HasPowerOf2Divisor()) {
- Register dividend = ToRegister(instr->left());
- int32_t divisor =
- HConstant::cast(instr->hydrogen()->right())->Integer32Value();
- int32_t test_value = 0;
- int32_t power = 0;
-
- if (divisor > 0) {
- test_value = divisor - 1;
- power = WhichPowerOf2(divisor);
- } else {
- // Check for (0 / -x) that will produce negative zero.
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ test(dividend, Operand(dividend));
- DeoptimizeIf(zero, instr->environment());
- }
- // Check for (kMinInt / -1).
- if (divisor == -1 && instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- __ cmp(dividend, kMinInt);
- DeoptimizeIf(zero, instr->environment());
- }
- test_value = - divisor - 1;
- power = WhichPowerOf2(-divisor);
- }
-
- if (test_value != 0) {
- // Deoptimize if remainder is not 0.
- __ test(dividend, Immediate(test_value));
- DeoptimizeIf(not_zero, instr->environment());
- __ sar(dividend, power);
- }
-
- if (divisor < 0) __ neg(dividend);
-
- return;
- }
-
- LOperand* right = instr->right();
- ASSERT(ToRegister(instr->result()).is(eax));
- ASSERT(ToRegister(instr->left()).is(eax));
- ASSERT(!ToRegister(instr->right()).is(eax));
- ASSERT(!ToRegister(instr->right()).is(edx));
-
- Register left_reg = eax;
-
- // Check for x / 0.
- Register right_reg = ToRegister(right);
- if (instr->hydrogen_value()->CheckFlag(HValue::kCanBeDivByZero)) {
- __ test(right_reg, ToOperand(right));
- DeoptimizeIf(zero, instr->environment());
- }
-
- // Check for (0 / -x) that will produce negative zero.
- if (instr->hydrogen_value()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- Label left_not_zero;
- __ test(left_reg, Operand(left_reg));
- __ j(not_zero, &left_not_zero, Label::kNear);
- __ test(right_reg, ToOperand(right));
- DeoptimizeIf(sign, instr->environment());
- __ bind(&left_not_zero);
- }
-
- // Check for (kMinInt / -1).
- if (instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow)) {
- Label left_not_min_int;
- __ cmp(left_reg, kMinInt);
- __ j(not_zero, &left_not_min_int, Label::kNear);
- __ cmp(right_reg, -1);
- DeoptimizeIf(zero, instr->environment());
- __ bind(&left_not_min_int);
- }
-
- // Sign extend to edx.
- __ cdq();
- __ idiv(right_reg);
-
- if (!instr->is_flooring()) {
- // Deoptimize if remainder is not 0.
- __ test(edx, Operand(edx));
- DeoptimizeIf(not_zero, instr->environment());
- } else {
- Label done;
- __ test(edx, edx);
- __ j(zero, &done, Label::kNear);
- __ xor_(edx, right_reg);
- __ sar(edx, 31);
- __ add(eax, edx);
- __ bind(&done);
- }
-}
-
-
-void LCodeGen::DoMathFloorOfDiv(LMathFloorOfDiv* instr) {
- ASSERT(instr->right()->IsConstantOperand());
-
- Register dividend = ToRegister(instr->left());
- int32_t divisor = ToInteger32(LConstantOperand::cast(instr->right()));
- Register result = ToRegister(instr->result());
-
- switch (divisor) {
- case 0:
- DeoptimizeIf(no_condition, instr->environment());
- return;
-
- case 1:
- __ Move(result, dividend);
- return;
-
- case -1:
- __ Move(result, dividend);
- __ neg(result);
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(zero, instr->environment());
- }
- if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- DeoptimizeIf(overflow, instr->environment());
- }
- return;
- }
-
- uint32_t divisor_abs = abs(divisor);
- if (IsPowerOf2(divisor_abs)) {
- int32_t power = WhichPowerOf2(divisor_abs);
- if (divisor < 0) {
- // Input[dividend] is clobbered.
- // The sequence is tedious because neg(dividend) might overflow.
- __ mov(result, dividend);
- __ sar(dividend, 31);
- __ neg(result);
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(zero, instr->environment());
- }
- __ shl(dividend, 32 - power);
- __ sar(result, power);
- __ not_(dividend);
- // Clear result.sign if dividend.sign is set.
- __ and_(result, dividend);
- } else {
- __ Move(result, dividend);
- __ sar(result, power);
- }
- } else {
- ASSERT(ToRegister(instr->left()).is(eax));
- ASSERT(ToRegister(instr->result()).is(edx));
- Register scratch = ToRegister(instr->temp());
-
- // Find b which: 2^b < divisor_abs < 2^(b+1).
- unsigned b = 31 - CompilerIntrinsics::CountLeadingZeros(divisor_abs);
- unsigned shift = 32 + b; // Precision +1bit (effectively).
- double multiplier_f =
- static_cast<double>(static_cast<uint64_t>(1) << shift) / divisor_abs;
- int64_t multiplier;
- if (multiplier_f - floor(multiplier_f) < 0.5) {
- multiplier = static_cast<int64_t>(floor(multiplier_f));
- } else {
- multiplier = static_cast<int64_t>(floor(multiplier_f)) + 1;
- }
- // The multiplier is a uint32.
- ASSERT(multiplier > 0 &&
- multiplier < (static_cast<int64_t>(1) << 32));
- __ mov(scratch, dividend);
- if (divisor < 0 &&
- instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ test(dividend, dividend);
- DeoptimizeIf(zero, instr->environment());
- }
- __ mov(edx, static_cast<int32_t>(multiplier));
- __ imul(edx);
- if (static_cast<int32_t>(multiplier) < 0) {
- __ add(edx, scratch);
- }
- Register reg_lo = eax;
- Register reg_byte_scratch = scratch;
- if (!reg_byte_scratch.is_byte_register()) {
- __ xchg(reg_lo, reg_byte_scratch);
- reg_lo = scratch;
- reg_byte_scratch = eax;
- }
- if (divisor < 0) {
- __ xor_(reg_byte_scratch, reg_byte_scratch);
- __ cmp(reg_lo, 0x40000000);
- __ setcc(above, reg_byte_scratch);
- __ neg(edx);
- __ sub(edx, reg_byte_scratch);
- } else {
- __ xor_(reg_byte_scratch, reg_byte_scratch);
- __ cmp(reg_lo, 0xC0000000);
- __ setcc(above_equal, reg_byte_scratch);
- __ add(edx, reg_byte_scratch);
- }
- __ sar(edx, shift - 32);
- }
-}
-
-
-void LCodeGen::DoMulI(LMulI* instr) {
- Register left = ToRegister(instr->left());
- LOperand* right = instr->right();
-
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ mov(ToRegister(instr->temp()), left);
- }
-
- if (right->IsConstantOperand()) {
- // Try strength reductions on the multiplication.
- // All replacement instructions are at most as long as the imul
- // and have better latency.
- int constant = ToInteger32(LConstantOperand::cast(right));
- if (constant == -1) {
- __ neg(left);
- } else if (constant == 0) {
- __ xor_(left, Operand(left));
- } else if (constant == 2) {
- __ add(left, Operand(left));
- } else if (!instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- // If we know that the multiplication can't overflow, it's safe to
- // use instructions that don't set the overflow flag for the
- // multiplication.
- switch (constant) {
- case 1:
- // Do nothing.
- break;
- case 3:
- __ lea(left, Operand(left, left, times_2, 0));
- break;
- case 4:
- __ shl(left, 2);
- break;
- case 5:
- __ lea(left, Operand(left, left, times_4, 0));
- break;
- case 8:
- __ shl(left, 3);
- break;
- case 9:
- __ lea(left, Operand(left, left, times_8, 0));
- break;
- case 16:
- __ shl(left, 4);
- break;
- default:
- __ imul(left, left, constant);
- break;
- }
- } else {
- __ imul(left, left, constant);
- }
- } else {
- __ imul(left, ToOperand(right));
- }
-
- if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- DeoptimizeIf(overflow, instr->environment());
- }
-
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- // Bail out if the result is supposed to be negative zero.
- Label done;
- __ test(left, Operand(left));
- __ j(not_zero, &done, Label::kNear);
- if (right->IsConstantOperand()) {
- if (ToInteger32(LConstantOperand::cast(right)) < 0) {
- DeoptimizeIf(no_condition, instr->environment());
- } else if (ToInteger32(LConstantOperand::cast(right)) == 0) {
- __ cmp(ToRegister(instr->temp()), Immediate(0));
- DeoptimizeIf(less, instr->environment());
- }
- } else {
- // Test the non-zero operand for negative sign.
- __ or_(ToRegister(instr->temp()), ToOperand(right));
- DeoptimizeIf(sign, instr->environment());
- }
- __ bind(&done);
- }
-}
-
-
-void LCodeGen::DoBitI(LBitI* instr) {
- LOperand* left = instr->left();
- LOperand* right = instr->right();
- ASSERT(left->Equals(instr->result()));
- ASSERT(left->IsRegister());
-
- if (right->IsConstantOperand()) {
- int right_operand = ToInteger32(LConstantOperand::cast(right));
- switch (instr->op()) {
- case Token::BIT_AND:
- __ and_(ToRegister(left), right_operand);
- break;
- case Token::BIT_OR:
- __ or_(ToRegister(left), right_operand);
- break;
- case Token::BIT_XOR:
- __ xor_(ToRegister(left), right_operand);
- break;
- default:
- UNREACHABLE();
- break;
- }
- } else {
- switch (instr->op()) {
- case Token::BIT_AND:
- __ and_(ToRegister(left), ToOperand(right));
- break;
- case Token::BIT_OR:
- __ or_(ToRegister(left), ToOperand(right));
- break;
- case Token::BIT_XOR:
- __ xor_(ToRegister(left), ToOperand(right));
- break;
- default:
- UNREACHABLE();
- break;
- }
- }
-}
-
-
-void LCodeGen::DoShiftI(LShiftI* instr) {
- LOperand* left = instr->left();
- LOperand* right = instr->right();
- ASSERT(left->Equals(instr->result()));
- ASSERT(left->IsRegister());
- if (right->IsRegister()) {
- ASSERT(ToRegister(right).is(ecx));
-
- switch (instr->op()) {
- case Token::ROR:
- __ ror_cl(ToRegister(left));
- if (instr->can_deopt()) {
- __ test(ToRegister(left), Immediate(0x80000000));
- DeoptimizeIf(not_zero, instr->environment());
- }
- break;
- case Token::SAR:
- __ sar_cl(ToRegister(left));
- break;
- case Token::SHR:
- __ shr_cl(ToRegister(left));
- if (instr->can_deopt()) {
- __ test(ToRegister(left), Immediate(0x80000000));
- DeoptimizeIf(not_zero, instr->environment());
- }
- break;
- case Token::SHL:
- __ shl_cl(ToRegister(left));
- break;
- default:
- UNREACHABLE();
- break;
- }
- } else {
- int value = ToInteger32(LConstantOperand::cast(right));
- uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
- switch (instr->op()) {
- case Token::ROR:
- if (shift_count == 0 && instr->can_deopt()) {
- __ test(ToRegister(left), Immediate(0x80000000));
- DeoptimizeIf(not_zero, instr->environment());
- } else {
- __ ror(ToRegister(left), shift_count);
- }
- break;
- case Token::SAR:
- if (shift_count != 0) {
- __ sar(ToRegister(left), shift_count);
- }
- break;
- case Token::SHR:
- if (shift_count == 0 && instr->can_deopt()) {
- __ test(ToRegister(left), Immediate(0x80000000));
- DeoptimizeIf(not_zero, instr->environment());
- } else {
- __ shr(ToRegister(left), shift_count);
- }
- break;
- case Token::SHL:
- if (shift_count != 0) {
- __ shl(ToRegister(left), shift_count);
- }
- break;
- default:
- UNREACHABLE();
- break;
- }
- }
-}
-
-
-void LCodeGen::DoSubI(LSubI* instr) {
- LOperand* left = instr->left();
- LOperand* right = instr->right();
- ASSERT(left->Equals(instr->result()));
-
- if (right->IsConstantOperand()) {
- __ sub(ToOperand(left), ToInteger32Immediate(right));
- } else {
- __ sub(ToRegister(left), ToOperand(right));
- }
- if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- DeoptimizeIf(overflow, instr->environment());
- }
-}
-
-
-void LCodeGen::DoConstantI(LConstantI* instr) {
- ASSERT(instr->result()->IsRegister());
- __ Set(ToRegister(instr->result()), Immediate(instr->value()));
-}
-
-
-void LCodeGen::DoConstantD(LConstantD* instr) {
- ASSERT(instr->result()->IsDoubleRegister());
- XMMRegister res = ToDoubleRegister(instr->result());
- double v = instr->value();
- // Use xor to produce +0.0 in a fast and compact way, but avoid to
- // do so if the constant is -0.0.
- if (BitCast<uint64_t, double>(v) == 0) {
- __ xorps(res, res);
- } else {
- Register temp = ToRegister(instr->temp());
- uint64_t int_val = BitCast<uint64_t, double>(v);
- int32_t lower = static_cast<int32_t>(int_val);
- int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt));
- if (CpuFeatures::IsSupported(SSE4_1)) {
- CpuFeatures::Scope scope1(SSE2);
- CpuFeatures::Scope scope2(SSE4_1);
- if (lower != 0) {
- __ Set(temp, Immediate(lower));
- __ movd(res, Operand(temp));
- __ Set(temp, Immediate(upper));
- __ pinsrd(res, Operand(temp), 1);
- } else {
- __ xorps(res, res);
- __ Set(temp, Immediate(upper));
- __ pinsrd(res, Operand(temp), 1);
- }
- } else {
- CpuFeatures::Scope scope(SSE2);
- __ Set(temp, Immediate(upper));
- __ movd(res, Operand(temp));
- __ psllq(res, 32);
- if (lower != 0) {
- __ Set(temp, Immediate(lower));
- __ movd(xmm0, Operand(temp));
- __ por(res, xmm0);
- }
- }
- }
-}
-
-
-void LCodeGen::DoConstantT(LConstantT* instr) {
- Register reg = ToRegister(instr->result());
- Handle<Object> handle = instr->value();
- if (handle->IsHeapObject()) {
- __ LoadHeapObject(reg, Handle<HeapObject>::cast(handle));
- } else {
- __ Set(reg, Immediate(handle));
- }
-}
-
-
-void LCodeGen::DoJSArrayLength(LJSArrayLength* instr) {
- Register result = ToRegister(instr->result());
- Register array = ToRegister(instr->value());
- __ mov(result, FieldOperand(array, JSArray::kLengthOffset));
-}
-
-
-void LCodeGen::DoFixedArrayBaseLength(
- LFixedArrayBaseLength* instr) {
- Register result = ToRegister(instr->result());
- Register array = ToRegister(instr->value());
- __ mov(result, FieldOperand(array, FixedArrayBase::kLengthOffset));
-}
-
-
-void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
- Register result = ToRegister(instr->result());
- Register map = ToRegister(instr->value());
- __ EnumLength(result, map);
-}
-
-
-void LCodeGen::DoElementsKind(LElementsKind* instr) {
- Register result = ToRegister(instr->result());
- Register input = ToRegister(instr->value());
-
- // Load map into |result|.
- __ mov(result, FieldOperand(input, HeapObject::kMapOffset));
- // Load the map's "bit field 2" into |result|. We only need the first byte,
- // but the following masking takes care of that anyway.
- __ mov(result, FieldOperand(result, Map::kBitField2Offset));
- // Retrieve elements_kind from bit field 2.
- __ and_(result, Map::kElementsKindMask);
- __ shr(result, Map::kElementsKindShift);
-}
-
-
-void LCodeGen::DoValueOf(LValueOf* instr) {
- Register input = ToRegister(instr->value());
- Register result = ToRegister(instr->result());
- Register map = ToRegister(instr->temp());
- ASSERT(input.is(result));
-
- Label done;
- // If the object is a smi return the object.
- __ JumpIfSmi(input, &done, Label::kNear);
-
- // If the object is not a value type, return the object.
- __ CmpObjectType(input, JS_VALUE_TYPE, map);
- __ j(not_equal, &done, Label::kNear);
- __ mov(result, FieldOperand(input, JSValue::kValueOffset));
-
- __ bind(&done);
-}
-
-
-void LCodeGen::DoDateField(LDateField* instr) {
- Register object = ToRegister(instr->date());
- Register result = ToRegister(instr->result());
- Register scratch = ToRegister(instr->temp());
- Smi* index = instr->index();
- Label runtime, done;
- ASSERT(object.is(result));
- ASSERT(object.is(eax));
-
- __ test(object, Immediate(kSmiTagMask));
- DeoptimizeIf(zero, instr->environment());
- __ CmpObjectType(object, JS_DATE_TYPE, scratch);
- DeoptimizeIf(not_equal, instr->environment());
-
- if (index->value() == 0) {
- __ mov(result, FieldOperand(object, JSDate::kValueOffset));
- } else {
- if (index->value() < JSDate::kFirstUncachedField) {
- ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
- __ mov(scratch, Operand::StaticVariable(stamp));
- __ cmp(scratch, FieldOperand(object, JSDate::kCacheStampOffset));
- __ j(not_equal, &runtime, Label::kNear);
- __ mov(result, FieldOperand(object, JSDate::kValueOffset +
- kPointerSize * index->value()));
- __ jmp(&done);
- }
- __ bind(&runtime);
- __ PrepareCallCFunction(2, scratch);
- __ mov(Operand(esp, 0), object);
- __ mov(Operand(esp, 1 * kPointerSize), Immediate(index));
- __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
- __ bind(&done);
- }
-}
-
-
-void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
- SeqStringSetCharGenerator::Generate(masm(),
- instr->encoding(),
- ToRegister(instr->string()),
- ToRegister(instr->index()),
- ToRegister(instr->value()));
-}
-
-
-void LCodeGen::DoBitNotI(LBitNotI* instr) {
- LOperand* input = instr->value();
- ASSERT(input->Equals(instr->result()));
- __ not_(ToRegister(input));
-}
-
-
-void LCodeGen::DoThrow(LThrow* instr) {
- __ push(ToOperand(instr->value()));
- ASSERT(ToRegister(instr->context()).is(esi));
- CallRuntime(Runtime::kThrow, 1, instr);
-
- if (FLAG_debug_code) {
- Comment("Unreachable code.");
- __ int3();
- }
-}
-
-
-void LCodeGen::DoAddI(LAddI* instr) {
- LOperand* left = instr->left();
- LOperand* right = instr->right();
- ASSERT(left->Equals(instr->result()));
-
- if (right->IsConstantOperand()) {
- __ add(ToOperand(left), ToInteger32Immediate(right));
- } else {
- __ add(ToRegister(left), ToOperand(right));
- }
-
- if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- DeoptimizeIf(overflow, instr->environment());
- }
-}
-
-
-void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
- CpuFeatures::Scope scope(SSE2);
- LOperand* left = instr->left();
- LOperand* right = instr->right();
- ASSERT(left->Equals(instr->result()));
- HMathMinMax::Operation operation = instr->hydrogen()->operation();
- if (instr->hydrogen()->representation().IsInteger32()) {
- Label return_left;
- Condition condition = (operation == HMathMinMax::kMathMin)
- ? less_equal
- : greater_equal;
- if (right->IsConstantOperand()) {
- Operand left_op = ToOperand(left);
- Immediate right_imm = ToInteger32Immediate(right);
- __ cmp(left_op, right_imm);
- __ j(condition, &return_left, Label::kNear);
- __ mov(left_op, right_imm);
- } else {
- Register left_reg = ToRegister(left);
- Operand right_op = ToOperand(right);
- __ cmp(left_reg, right_op);
- __ j(condition, &return_left, Label::kNear);
- __ mov(left_reg, right_op);
- }
- __ bind(&return_left);
- } else {
- ASSERT(instr->hydrogen()->representation().IsDouble());
- Label check_nan_left, check_zero, return_left, return_right;
- Condition condition = (operation == HMathMinMax::kMathMin) ? below : above;
- XMMRegister left_reg = ToDoubleRegister(left);
- XMMRegister right_reg = ToDoubleRegister(right);
- __ ucomisd(left_reg, right_reg);
- __ j(parity_even, &check_nan_left, Label::kNear); // At least one NaN.
- __ j(equal, &check_zero, Label::kNear); // left == right.
- __ j(condition, &return_left, Label::kNear);
- __ jmp(&return_right, Label::kNear);
-
- __ bind(&check_zero);
- XMMRegister xmm_scratch = xmm0;
- __ xorps(xmm_scratch, xmm_scratch);
- __ ucomisd(left_reg, xmm_scratch);
- __ j(not_equal, &return_left, Label::kNear); // left == right != 0.
- // At this point, both left and right are either 0 or -0.
- if (operation == HMathMinMax::kMathMin) {
- __ orpd(left_reg, right_reg);
- } else {
- // Since we operate on +0 and/or -0, addsd and andsd have the same effect.
- __ addsd(left_reg, right_reg);
- }
- __ jmp(&return_left, Label::kNear);
-
- __ bind(&check_nan_left);
- __ ucomisd(left_reg, left_reg); // NaN check.
- __ j(parity_even, &return_left, Label::kNear); // left == NaN.
- __ bind(&return_right);
- __ movsd(left_reg, right_reg);
-
- __ bind(&return_left);
- }
-}
-
-
-void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
- CpuFeatures::Scope scope(SSE2);
- XMMRegister left = ToDoubleRegister(instr->left());
- XMMRegister right = ToDoubleRegister(instr->right());
- XMMRegister result = ToDoubleRegister(instr->result());
- // Modulo uses a fixed result register.
- ASSERT(instr->op() == Token::MOD || left.is(result));
- switch (instr->op()) {
- case Token::ADD:
- __ addsd(left, right);
- break;
- case Token::SUB:
- __ subsd(left, right);
- break;
- case Token::MUL:
- __ mulsd(left, right);
- break;
- case Token::DIV:
- __ divsd(left, right);
- break;
- case Token::MOD: {
- // Pass two doubles as arguments on the stack.
- __ PrepareCallCFunction(4, eax);
- __ movdbl(Operand(esp, 0 * kDoubleSize), left);
- __ movdbl(Operand(esp, 1 * kDoubleSize), right);
- __ CallCFunction(
- ExternalReference::double_fp_operation(Token::MOD, isolate()),
- 4);
-
- // Return value is in st(0) on ia32.
- // Store it into the (fixed) result register.
- __ sub(Operand(esp), Immediate(kDoubleSize));
- __ fstp_d(Operand(esp, 0));
- __ movdbl(result, Operand(esp, 0));
- __ add(Operand(esp), Immediate(kDoubleSize));
- break;
- }
- default:
- UNREACHABLE();
- break;
- }
-}
-
-
-void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
- ASSERT(ToRegister(instr->context()).is(esi));
- ASSERT(ToRegister(instr->left()).is(edx));
- ASSERT(ToRegister(instr->right()).is(eax));
- ASSERT(ToRegister(instr->result()).is(eax));
-
- BinaryOpStub stub(instr->op(), NO_OVERWRITE);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- __ nop(); // Signals no inlined code.
-}
-
-
-int LCodeGen::GetNextEmittedBlock(int block) {
- for (int i = block + 1; i < graph()->blocks()->length(); ++i) {
- LLabel* label = chunk_->GetLabel(i);
- if (!label->HasReplacement()) return i;
- }
- return -1;
-}
-
-
-void LCodeGen::EmitBranch(int left_block, int right_block, Condition cc) {
- int next_block = GetNextEmittedBlock(current_block_);
- right_block = chunk_->LookupDestination(right_block);
- left_block = chunk_->LookupDestination(left_block);
-
- if (right_block == left_block) {
- EmitGoto(left_block);
- } else if (left_block == next_block) {
- __ j(NegateCondition(cc), chunk_->GetAssemblyLabel(right_block));
- } else if (right_block == next_block) {
- __ j(cc, chunk_->GetAssemblyLabel(left_block));
- } else {
- __ j(cc, chunk_->GetAssemblyLabel(left_block));
- __ jmp(chunk_->GetAssemblyLabel(right_block));
- }
-}
-
-
-void LCodeGen::DoBranch(LBranch* instr) {
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
- CpuFeatures::Scope scope(SSE2);
-
- Representation r = instr->hydrogen()->value()->representation();
- if (r.IsInteger32()) {
- Register reg = ToRegister(instr->value());
- __ test(reg, Operand(reg));
- EmitBranch(true_block, false_block, not_zero);
- } else if (r.IsDouble()) {
- XMMRegister reg = ToDoubleRegister(instr->value());
- __ xorps(xmm0, xmm0);
- __ ucomisd(reg, xmm0);
- EmitBranch(true_block, false_block, not_equal);
- } else {
- ASSERT(r.IsTagged());
- Register reg = ToRegister(instr->value());
- HType type = instr->hydrogen()->value()->type();
- if (type.IsBoolean()) {
- __ cmp(reg, factory()->true_value());
- EmitBranch(true_block, false_block, equal);
- } else if (type.IsSmi()) {
- __ test(reg, Operand(reg));
- EmitBranch(true_block, false_block, not_equal);
- } else {
- Label* true_label = chunk_->GetAssemblyLabel(true_block);
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
-
- ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
- // Avoid deopts in the case where we've never executed this path before.
- if (expected.IsEmpty()) expected = ToBooleanStub::all_types();
-
- if (expected.Contains(ToBooleanStub::UNDEFINED)) {
- // undefined -> false.
- __ cmp(reg, factory()->undefined_value());
- __ j(equal, false_label);
- }
- if (expected.Contains(ToBooleanStub::BOOLEAN)) {
- // true -> true.
- __ cmp(reg, factory()->true_value());
- __ j(equal, true_label);
- // false -> false.
- __ cmp(reg, factory()->false_value());
- __ j(equal, false_label);
- }
- if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
- // 'null' -> false.
- __ cmp(reg, factory()->null_value());
- __ j(equal, false_label);
- }
-
- if (expected.Contains(ToBooleanStub::SMI)) {
- // Smis: 0 -> false, all other -> true.
- __ test(reg, Operand(reg));
- __ j(equal, false_label);
- __ JumpIfSmi(reg, true_label);
- } else if (expected.NeedsMap()) {
- // If we need a map later and have a Smi -> deopt.
- __ test(reg, Immediate(kSmiTagMask));
- DeoptimizeIf(zero, instr->environment());
- }
-
- Register map = no_reg; // Keep the compiler happy.
- if (expected.NeedsMap()) {
- map = ToRegister(instr->temp());
- ASSERT(!map.is(reg));
- __ mov(map, FieldOperand(reg, HeapObject::kMapOffset));
-
- if (expected.CanBeUndetectable()) {
- // Undetectable -> false.
- __ test_b(FieldOperand(map, Map::kBitFieldOffset),
- 1 << Map::kIsUndetectable);
- __ j(not_zero, false_label);
- }
- }
-
- if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
- // spec object -> true.
- __ CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
- __ j(above_equal, true_label);
- }
-
- if (expected.Contains(ToBooleanStub::STRING)) {
- // String value -> false iff empty.
- Label not_string;
- __ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
- __ j(above_equal, &not_string, Label::kNear);
- __ cmp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
- __ j(not_zero, true_label);
- __ jmp(false_label);
- __ bind(&not_string);
- }
-
- if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
- // heap number -> false iff +0, -0, or NaN.
- Label not_heap_number;
- __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
- factory()->heap_number_map());
- __ j(not_equal, &not_heap_number, Label::kNear);
- __ xorps(xmm0, xmm0);
- __ ucomisd(xmm0, FieldOperand(reg, HeapNumber::kValueOffset));
- __ j(zero, false_label);
- __ jmp(true_label);
- __ bind(&not_heap_number);
- }
-
- // We've seen something for the first time -> deopt.
- DeoptimizeIf(no_condition, instr->environment());
- }
- }
-}
-
-
-void LCodeGen::EmitGoto(int block) {
- block = chunk_->LookupDestination(block);
- int next_block = GetNextEmittedBlock(current_block_);
- if (block != next_block) {
- __ jmp(chunk_->GetAssemblyLabel(block));
- }
-}
-
-
-void LCodeGen::DoGoto(LGoto* instr) {
- EmitGoto(instr->block_id());
-}
-
-
-Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
- Condition cond = no_condition;
- switch (op) {
- case Token::EQ:
- case Token::EQ_STRICT:
- cond = equal;
- break;
- case Token::LT:
- cond = is_unsigned ? below : less;
- break;
- case Token::GT:
- cond = is_unsigned ? above : greater;
- break;
- case Token::LTE:
- cond = is_unsigned ? below_equal : less_equal;
- break;
- case Token::GTE:
- cond = is_unsigned ? above_equal : greater_equal;
- break;
- case Token::IN:
- case Token::INSTANCEOF:
- default:
- UNREACHABLE();
- }
- return cond;
-}
-
-
-void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
- LOperand* left = instr->left();
- LOperand* right = instr->right();
- int false_block = chunk_->LookupDestination(instr->false_block_id());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- Condition cc = TokenToCondition(instr->op(), instr->is_double());
-
- if (left->IsConstantOperand() && right->IsConstantOperand()) {
- // We can statically evaluate the comparison.
- double left_val = ToDouble(LConstantOperand::cast(left));
- double right_val = ToDouble(LConstantOperand::cast(right));
- int next_block =
- EvalComparison(instr->op(), left_val, right_val) ? true_block
- : false_block;
- EmitGoto(next_block);
- } else {
- if (instr->is_double()) {
- CpuFeatures::Scope scope(SSE2);
- // Don't base result on EFLAGS when a NaN is involved. Instead
- // jump to the false block.
- __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
- __ j(parity_even, chunk_->GetAssemblyLabel(false_block));
- } else {
- if (right->IsConstantOperand()) {
- __ cmp(ToRegister(left), ToInteger32Immediate(right));
- } else if (left->IsConstantOperand()) {
- __ cmp(ToOperand(right), ToInteger32Immediate(left));
- // We transposed the operands. Reverse the condition.
- cc = ReverseCondition(cc);
- } else {
- __ cmp(ToRegister(left), ToOperand(right));
- }
- }
- EmitBranch(true_block, false_block, cc);
- }
-}
-
-
-void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
- Register left = ToRegister(instr->left());
- Operand right = ToOperand(instr->right());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
-
- __ cmp(left, Operand(right));
- EmitBranch(true_block, false_block, equal);
-}
-
-
-void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) {
- Register left = ToRegister(instr->left());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- __ cmp(left, instr->hydrogen()->right());
- EmitBranch(true_block, false_block, equal);
-}
-
-
-void LCodeGen::DoIsNilAndBranch(LIsNilAndBranch* instr) {
- Register reg = ToRegister(instr->value());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- // If the expression is known to be untagged or a smi, then it's definitely
- // not null, and it can't be a an undetectable object.
- if (instr->hydrogen()->representation().IsSpecialization() ||
- instr->hydrogen()->type().IsSmi()) {
- EmitGoto(false_block);
- return;
- }
-
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- Handle<Object> nil_value = instr->nil() == kNullValue ?
- factory()->null_value() :
- factory()->undefined_value();
- __ cmp(reg, nil_value);
- if (instr->kind() == kStrictEquality) {
- EmitBranch(true_block, false_block, equal);
- } else {
- Handle<Object> other_nil_value = instr->nil() == kNullValue ?
- factory()->undefined_value() :
- factory()->null_value();
- Label* true_label = chunk_->GetAssemblyLabel(true_block);
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
- __ j(equal, true_label);
- __ cmp(reg, other_nil_value);
- __ j(equal, true_label);
- __ JumpIfSmi(reg, false_label);
- // Check for undetectable objects by looking in the bit field in
- // the map. The object has already been smi checked.
- Register scratch = ToRegister(instr->temp());
- __ mov(scratch, FieldOperand(reg, HeapObject::kMapOffset));
- __ movzx_b(scratch, FieldOperand(scratch, Map::kBitFieldOffset));
- __ test(scratch, Immediate(1 << Map::kIsUndetectable));
- EmitBranch(true_block, false_block, not_zero);
- }
-}
-
-
-Condition LCodeGen::EmitIsObject(Register input,
- Register temp1,
- Label* is_not_object,
- Label* is_object) {
- __ JumpIfSmi(input, is_not_object);
-
- __ cmp(input, isolate()->factory()->null_value());
- __ j(equal, is_object);
-
- __ mov(temp1, FieldOperand(input, HeapObject::kMapOffset));
- // Undetectable objects behave like undefined.
- __ test_b(FieldOperand(temp1, Map::kBitFieldOffset),
- 1 << Map::kIsUndetectable);
- __ j(not_zero, is_not_object);
-
- __ movzx_b(temp1, FieldOperand(temp1, Map::kInstanceTypeOffset));
- __ cmp(temp1, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
- __ j(below, is_not_object);
- __ cmp(temp1, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
- return below_equal;
-}
-
-
-void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
- Register reg = ToRegister(instr->value());
- Register temp = ToRegister(instr->temp());
-
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
- Label* true_label = chunk_->GetAssemblyLabel(true_block);
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
-
- Condition true_cond = EmitIsObject(reg, temp, false_label, true_label);
-
- EmitBranch(true_block, false_block, true_cond);
-}
-
-
-Condition LCodeGen::EmitIsString(Register input,
- Register temp1,
- Label* is_not_string) {
- __ JumpIfSmi(input, is_not_string);
-
- Condition cond = masm_->IsObjectStringType(input, temp1, temp1);
-
- return cond;
-}
-
-
-void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
- Register reg = ToRegister(instr->value());
- Register temp = ToRegister(instr->temp());
-
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
-
- Condition true_cond = EmitIsString(reg, temp, false_label);
-
- EmitBranch(true_block, false_block, true_cond);
-}
-
-
-void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
- Operand input = ToOperand(instr->value());
-
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- __ test(input, Immediate(kSmiTagMask));
- EmitBranch(true_block, false_block, zero);
-}
-
-
-void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
- Register input = ToRegister(instr->value());
- Register temp = ToRegister(instr->temp());
-
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfSmi(input, chunk_->GetAssemblyLabel(false_block));
- __ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
- __ test_b(FieldOperand(temp, Map::kBitFieldOffset),
- 1 << Map::kIsUndetectable);
- EmitBranch(true_block, false_block, not_zero);
-}
-
-
-static Condition ComputeCompareCondition(Token::Value op) {
- switch (op) {
- case Token::EQ_STRICT:
- case Token::EQ:
- return equal;
- case Token::LT:
- return less;
- case Token::GT:
- return greater;
- case Token::LTE:
- return less_equal;
- case Token::GTE:
- return greater_equal;
- default:
- UNREACHABLE();
- return no_condition;
- }
-}
-
-
-void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
- Token::Value op = instr->op();
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
-
- Condition condition = ComputeCompareCondition(op);
- __ test(eax, Operand(eax));
-
- EmitBranch(true_block, false_block, condition);
-}
-
-
-static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
- InstanceType from = instr->from();
- InstanceType to = instr->to();
- if (from == FIRST_TYPE) return to;
- ASSERT(from == to || to == LAST_TYPE);
- return from;
-}
-
-
-static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
- InstanceType from = instr->from();
- InstanceType to = instr->to();
- if (from == to) return equal;
- if (to == LAST_TYPE) return above_equal;
- if (from == FIRST_TYPE) return below_equal;
- UNREACHABLE();
- return equal;
-}
-
-
-void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
- Register input = ToRegister(instr->value());
- Register temp = ToRegister(instr->temp());
-
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
-
- __ JumpIfSmi(input, false_label);
-
- __ CmpObjectType(input, TestType(instr->hydrogen()), temp);
- EmitBranch(true_block, false_block, BranchCondition(instr->hydrogen()));
-}
-
-
-void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
- Register input = ToRegister(instr->value());
- Register result = ToRegister(instr->result());
-
- __ AssertString(input);
-
- __ mov(result, FieldOperand(input, String::kHashFieldOffset));
- __ IndexFromHash(result, result);
-}
-
-
-void LCodeGen::DoHasCachedArrayIndexAndBranch(
- LHasCachedArrayIndexAndBranch* instr) {
- Register input = ToRegister(instr->value());
-
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- __ test(FieldOperand(input, String::kHashFieldOffset),
- Immediate(String::kContainsCachedArrayIndexMask));
- EmitBranch(true_block, false_block, equal);
-}
-
-
-// Branches to a label or falls through with the answer in the z flag. Trashes
-// the temp registers, but not the input.
-void LCodeGen::EmitClassOfTest(Label* is_true,
- Label* is_false,
- Handle<String>class_name,
- Register input,
- Register temp,
- Register temp2) {
- ASSERT(!input.is(temp));
- ASSERT(!input.is(temp2));
- ASSERT(!temp.is(temp2));
- __ JumpIfSmi(input, is_false);
-
- if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Function"))) {
- // Assuming the following assertions, we can use the same compares to test
- // for both being a function type and being in the object type range.
- STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
- STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
- FIRST_SPEC_OBJECT_TYPE + 1);
- STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
- LAST_SPEC_OBJECT_TYPE - 1);
- STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
- __ CmpObjectType(input, FIRST_SPEC_OBJECT_TYPE, temp);
- __ j(below, is_false);
- __ j(equal, is_true);
- __ CmpInstanceType(temp, LAST_SPEC_OBJECT_TYPE);
- __ j(equal, is_true);
- } else {
- // Faster code path to avoid two compares: subtract lower bound from the
- // actual type and do a signed compare with the width of the type range.
- __ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
- __ movzx_b(temp2, FieldOperand(temp, Map::kInstanceTypeOffset));
- __ sub(Operand(temp2), Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
- __ cmp(Operand(temp2), Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
- FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
- __ j(above, is_false);
- }
-
- // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
- // Check if the constructor in the map is a function.
- __ mov(temp, FieldOperand(temp, Map::kConstructorOffset));
- // Objects with a non-function constructor have class 'Object'.
- __ CmpObjectType(temp, JS_FUNCTION_TYPE, temp2);
- if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Object"))) {
- __ j(not_equal, is_true);
- } else {
- __ j(not_equal, is_false);
- }
-
- // temp now contains the constructor function. Grab the
- // instance class name from there.
- __ mov(temp, FieldOperand(temp, JSFunction::kSharedFunctionInfoOffset));
- __ mov(temp, FieldOperand(temp,
- SharedFunctionInfo::kInstanceClassNameOffset));
- // The class name we are testing against is internalized since it's a literal.
- // The name in the constructor is internalized because of the way the context
- // is booted. This routine isn't expected to work for random API-created
- // classes and it doesn't have to because you can't access it with natives
- // syntax. Since both sides are internalized it is sufficient to use an
- // identity comparison.
- __ cmp(temp, class_name);
- // End with the answer in the z flag.
-}
-
-
-void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
- Register input = ToRegister(instr->value());
- Register temp = ToRegister(instr->temp());
- Register temp2 = ToRegister(instr->temp2());
-
- Handle<String> class_name = instr->hydrogen()->class_name();
-
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- Label* true_label = chunk_->GetAssemblyLabel(true_block);
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
-
- EmitClassOfTest(true_label, false_label, class_name, input, temp, temp2);
-
- EmitBranch(true_block, false_block, equal);
-}
-
-
-void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
- Register reg = ToRegister(instr->value());
- int true_block = instr->true_block_id();
- int false_block = instr->false_block_id();
-
- __ cmp(FieldOperand(reg, HeapObject::kMapOffset), instr->map());
- EmitBranch(true_block, false_block, equal);
-}
-
-
-void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
- // Object and function are in fixed registers defined by the stub.
- ASSERT(ToRegister(instr->context()).is(esi));
- InstanceofStub stub(InstanceofStub::kArgsInRegisters);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
-
- Label true_value, done;
- __ test(eax, Operand(eax));
- __ j(zero, &true_value, Label::kNear);
- __ mov(ToRegister(instr->result()), factory()->false_value());
- __ jmp(&done, Label::kNear);
- __ bind(&true_value);
- __ mov(ToRegister(instr->result()), factory()->true_value());
- __ bind(&done);
-}
-
-
-void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
- class DeferredInstanceOfKnownGlobal: public LDeferredCode {
- public:
- DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
- LInstanceOfKnownGlobal* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() {
- codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
- }
- virtual LInstruction* instr() { return instr_; }
- Label* map_check() { return &map_check_; }
- private:
- LInstanceOfKnownGlobal* instr_;
- Label map_check_;
- };
-
- DeferredInstanceOfKnownGlobal* deferred;
- deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr);
-
- Label done, false_result;
- Register object = ToRegister(instr->value());
- Register temp = ToRegister(instr->temp());
-
- // A Smi is not an instance of anything.
- __ JumpIfSmi(object, &false_result);
-
- // This is the inlined call site instanceof cache. The two occurences of the
- // hole value will be patched to the last map/result pair generated by the
- // instanceof stub.
- Label cache_miss;
- Register map = ToRegister(instr->temp());
- __ mov(map, FieldOperand(object, HeapObject::kMapOffset));
- __ bind(deferred->map_check()); // Label for calculating code patching.
- Handle<JSGlobalPropertyCell> cache_cell =
- factory()->NewJSGlobalPropertyCell(factory()->the_hole_value());
- __ cmp(map, Operand::Cell(cache_cell)); // Patched to cached map.
- __ j(not_equal, &cache_miss, Label::kNear);
- __ mov(eax, factory()->the_hole_value()); // Patched to either true or false.
- __ jmp(&done);
-
- // The inlined call site cache did not match. Check for null and string
- // before calling the deferred code.
- __ bind(&cache_miss);
- // Null is not an instance of anything.
- __ cmp(object, factory()->null_value());
- __ j(equal, &false_result);
-
- // String values are not instances of anything.
- Condition is_string = masm_->IsObjectStringType(object, temp, temp);
- __ j(is_string, &false_result);
-
- // Go to the deferred code.
- __ jmp(deferred->entry());
-
- __ bind(&false_result);
- __ mov(ToRegister(instr->result()), factory()->false_value());
-
- // Here result has either true or false. Deferred code also produces true or
- // false object.
- __ bind(deferred->exit());
- __ bind(&done);
-}
-
-
-void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
- Label* map_check) {
- PushSafepointRegistersScope scope(this);
-
- InstanceofStub::Flags flags = InstanceofStub::kNoFlags;
- flags = static_cast<InstanceofStub::Flags>(
- flags | InstanceofStub::kArgsInRegisters);
- flags = static_cast<InstanceofStub::Flags>(
- flags | InstanceofStub::kCallSiteInlineCheck);
- flags = static_cast<InstanceofStub::Flags>(
- flags | InstanceofStub::kReturnTrueFalseObject);
- InstanceofStub stub(flags);
-
- // Get the temp register reserved by the instruction. This needs to be a
- // register which is pushed last by PushSafepointRegisters as top of the
- // stack is used to pass the offset to the location of the map check to
- // the stub.
- Register temp = ToRegister(instr->temp());
- ASSERT(MacroAssembler::SafepointRegisterStackIndex(temp) == 0);
- __ LoadHeapObject(InstanceofStub::right(), instr->function());
- static const int kAdditionalDelta = 13;
- int delta = masm_->SizeOfCodeGeneratedSince(map_check) + kAdditionalDelta;
- __ mov(temp, Immediate(delta));
- __ StoreToSafepointRegisterSlot(temp, temp);
- CallCodeGeneric(stub.GetCode(isolate()),
- RelocInfo::CODE_TARGET,
- instr,
- RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
- // Get the deoptimization index of the LLazyBailout-environment that
- // corresponds to this instruction.
- LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
- safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
-
- // Put the result value into the eax slot and restore all registers.
- __ StoreToSafepointRegisterSlot(eax, eax);
-}
-
-
-void LCodeGen::DoInstanceSize(LInstanceSize* instr) {
- Register object = ToRegister(instr->object());
- Register result = ToRegister(instr->result());
- __ mov(result, FieldOperand(object, HeapObject::kMapOffset));
- __ movzx_b(result, FieldOperand(result, Map::kInstanceSizeOffset));
-}
-
-
-void LCodeGen::DoCmpT(LCmpT* instr) {
- Token::Value op = instr->op();
-
- Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
-
- Condition condition = ComputeCompareCondition(op);
- Label true_value, done;
- __ test(eax, Operand(eax));
- __ j(condition, &true_value, Label::kNear);
- __ mov(ToRegister(instr->result()), factory()->false_value());
- __ jmp(&done, Label::kNear);
- __ bind(&true_value);
- __ mov(ToRegister(instr->result()), factory()->true_value());
- __ bind(&done);
-}
-
-
-void LCodeGen::DoReturn(LReturn* instr) {
- if (FLAG_trace && info()->IsOptimizing()) {
- // Preserve the return value on the stack and rely on the runtime call
- // to return the value in the same register. We're leaving the code
- // managed by the register allocator and tearing down the frame, it's
- // safe to write to the context register.
- __ push(eax);
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- __ CallRuntime(Runtime::kTraceExit, 1);
- }
- if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(SSE2)) {
- ASSERT(NeedsEagerFrame());
- CpuFeatures::Scope scope(SSE2);
- BitVector* doubles = chunk()->allocated_double_registers();
- BitVector::Iterator save_iterator(doubles);
- int count = 0;
- while (!save_iterator.Done()) {
- __ movdbl(XMMRegister::FromAllocationIndex(save_iterator.Current()),
- MemOperand(esp, count * kDoubleSize));
- save_iterator.Advance();
- count++;
- }
- }
- if (dynamic_frame_alignment_) {
- // Fetch the state of the dynamic frame alignment.
- __ mov(edx, Operand(ebp,
- JavaScriptFrameConstants::kDynamicAlignmentStateOffset));
- }
- if (NeedsEagerFrame()) {
- __ mov(esp, ebp);
- __ pop(ebp);
- }
- if (dynamic_frame_alignment_) {
- Label no_padding;
- __ cmp(edx, Immediate(kNoAlignmentPadding));
- __ j(equal, &no_padding);
- if (FLAG_debug_code) {
- __ cmp(Operand(esp, (GetParameterCount() + 2) * kPointerSize),
- Immediate(kAlignmentZapValue));
- __ Assert(equal, "expected alignment marker");
- }
- __ Ret((GetParameterCount() + 2) * kPointerSize, ecx);
- __ bind(&no_padding);
- }
- if (info()->IsStub()) {
- __ Ret();
- } else {
- __ Ret((GetParameterCount() + 1) * kPointerSize, ecx);
- }
-}
-
-
-void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
- Register result = ToRegister(instr->result());
- __ mov(result, Operand::Cell(instr->hydrogen()->cell()));
- if (instr->hydrogen()->RequiresHoleCheck()) {
- __ cmp(result, factory()->the_hole_value());
- DeoptimizeIf(equal, instr->environment());
- }
-}
-
-
-void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
- ASSERT(ToRegister(instr->context()).is(esi));
- ASSERT(ToRegister(instr->global_object()).is(edx));
- ASSERT(ToRegister(instr->result()).is(eax));
-
- __ mov(ecx, instr->name());
- RelocInfo::Mode mode = instr->for_typeof() ? RelocInfo::CODE_TARGET :
- RelocInfo::CODE_TARGET_CONTEXT;
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallCode(ic, mode, instr);
-}
-
-
-void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
- Register value = ToRegister(instr->value());
- Handle<JSGlobalPropertyCell> cell_handle = instr->hydrogen()->cell();
-
- // If the cell we are storing to contains the hole it could have
- // been deleted from the property dictionary. In that case, we need
- // to update the property details in the property dictionary to mark
- // it as no longer deleted. We deoptimize in that case.
- if (instr->hydrogen()->RequiresHoleCheck()) {
- __ cmp(Operand::Cell(cell_handle), factory()->the_hole_value());
- DeoptimizeIf(equal, instr->environment());
- }
-
- // Store the value.
- __ mov(Operand::Cell(cell_handle), value);
- // Cells are always rescanned, so no write barrier here.
-}
-
-
-void LCodeGen::DoStoreGlobalGeneric(LStoreGlobalGeneric* instr) {
- ASSERT(ToRegister(instr->context()).is(esi));
- ASSERT(ToRegister(instr->global_object()).is(edx));
- ASSERT(ToRegister(instr->value()).is(eax));
-
- __ mov(ecx, instr->name());
- Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
- ? isolate()->builtins()->StoreIC_Initialize_Strict()
- : isolate()->builtins()->StoreIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
-}
-
-
-void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
- Register context = ToRegister(instr->context());
- Register result = ToRegister(instr->result());
- __ mov(result, ContextOperand(context, instr->slot_index()));
-
- if (instr->hydrogen()->RequiresHoleCheck()) {
- __ cmp(result, factory()->the_hole_value());
- if (instr->hydrogen()->DeoptimizesOnHole()) {
- DeoptimizeIf(equal, instr->environment());
- } else {
- Label is_not_hole;
- __ j(not_equal, &is_not_hole, Label::kNear);
- __ mov(result, factory()->undefined_value());
- __ bind(&is_not_hole);
- }
- }
-}
-
-
-void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
- Register context = ToRegister(instr->context());
- Register value = ToRegister(instr->value());
-
- Label skip_assignment;
-
- Operand target = ContextOperand(context, instr->slot_index());
- if (instr->hydrogen()->RequiresHoleCheck()) {
- __ cmp(target, factory()->the_hole_value());
- if (instr->hydrogen()->DeoptimizesOnHole()) {
- DeoptimizeIf(equal, instr->environment());
- } else {
- __ j(not_equal, &skip_assignment, Label::kNear);
- }
- }
-
- __ mov(target, value);
- if (instr->hydrogen()->NeedsWriteBarrier()) {
- HType type = instr->hydrogen()->value()->type();
- SmiCheck check_needed =
- type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
- Register temp = ToRegister(instr->temp());
- int offset = Context::SlotOffset(instr->slot_index());
- __ RecordWriteContextSlot(context,
- offset,
- value,
- temp,
- kSaveFPRegs,
- EMIT_REMEMBERED_SET,
- check_needed);
- }
-
- __ bind(&skip_assignment);
-}
-
-
-void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
- Register object = ToRegister(instr->object());
- Register result = ToRegister(instr->result());
- if (instr->hydrogen()->is_in_object()) {
- __ mov(result, FieldOperand(object, instr->hydrogen()->offset()));
- } else {
- __ mov(result, FieldOperand(object, JSObject::kPropertiesOffset));
- __ mov(result, FieldOperand(result, instr->hydrogen()->offset()));
- }
-}
-
-
-void LCodeGen::EmitLoadFieldOrConstantFunction(Register result,
- Register object,
- Handle<Map> type,
- Handle<String> name,
- LEnvironment* env) {
- LookupResult lookup(isolate());
- type->LookupDescriptor(NULL, *name, &lookup);
- ASSERT(lookup.IsFound() || lookup.IsCacheable());
- if (lookup.IsField()) {
- int index = lookup.GetLocalFieldIndexFromMap(*type);
- int offset = index * kPointerSize;
- if (index < 0) {
- // Negative property indices are in-object properties, indexed
- // from the end of the fixed part of the object.
- __ mov(result, FieldOperand(object, offset + type->instance_size()));
- } else {
- // Non-negative property indices are in the properties array.
- __ mov(result, FieldOperand(object, JSObject::kPropertiesOffset));
- __ mov(result, FieldOperand(result, offset + FixedArray::kHeaderSize));
- }
- } else if (lookup.IsConstantFunction()) {
- Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*type));
- __ LoadHeapObject(result, function);
- } else {
- // Negative lookup.
- // Check prototypes.
- Handle<HeapObject> current(HeapObject::cast((*type)->prototype()));
- Heap* heap = type->GetHeap();
- while (*current != heap->null_value()) {
- __ LoadHeapObject(result, current);
- __ cmp(FieldOperand(result, HeapObject::kMapOffset),
- Handle<Map>(current->map()));
- DeoptimizeIf(not_equal, env);
- current =
- Handle<HeapObject>(HeapObject::cast(current->map()->prototype()));
- }
- __ mov(result, factory()->undefined_value());
- }
-}
-
-
-void LCodeGen::EmitPushTaggedOperand(LOperand* operand) {
- ASSERT(!operand->IsDoubleRegister());
- if (operand->IsConstantOperand()) {
- Handle<Object> object = ToHandle(LConstantOperand::cast(operand));
- if (object->IsSmi()) {
- __ Push(Handle<Smi>::cast(object));
- } else {
- __ PushHeapObject(Handle<HeapObject>::cast(object));
- }
- } else if (operand->IsRegister()) {
- __ push(ToRegister(operand));
- } else {
- __ push(ToOperand(operand));
- }
-}
-
-
-// Check for cases where EmitLoadFieldOrConstantFunction needs to walk the
-// prototype chain, which causes unbounded code generation.
-static bool CompactEmit(SmallMapList* list,
- Handle<String> name,
- int i,
- Isolate* isolate) {
- Handle<Map> map = list->at(i);
- // If the map has ElementsKind transitions, we will generate map checks
- // for each kind in __ CompareMap(..., ALLOW_ELEMENTS_TRANSITION_MAPS).
- if (map->HasElementsTransition()) return false;
- LookupResult lookup(isolate);
- map->LookupDescriptor(NULL, *name, &lookup);
- return lookup.IsField() || lookup.IsConstantFunction();
-}
-
-
-void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) {
- Register object = ToRegister(instr->object());
- Register result = ToRegister(instr->result());
-
- int map_count = instr->hydrogen()->types()->length();
- bool need_generic = instr->hydrogen()->need_generic();
-
- if (map_count == 0 && !need_generic) {
- DeoptimizeIf(no_condition, instr->environment());
- return;
- }
- Handle<String> name = instr->hydrogen()->name();
- Label done;
- bool all_are_compact = true;
- for (int i = 0; i < map_count; ++i) {
- if (!CompactEmit(instr->hydrogen()->types(), name, i, isolate())) {
- all_are_compact = false;
- break;
- }
- }
- for (int i = 0; i < map_count; ++i) {
- bool last = (i == map_count - 1);
- Handle<Map> map = instr->hydrogen()->types()->at(i);
- Label check_passed;
- __ CompareMap(object, map, &check_passed, ALLOW_ELEMENT_TRANSITION_MAPS);
- if (last && !need_generic) {
- DeoptimizeIf(not_equal, instr->environment());
- __ bind(&check_passed);
- EmitLoadFieldOrConstantFunction(
- result, object, map, name, instr->environment());
- } else {
- Label next;
- bool compact = all_are_compact ? true :
- CompactEmit(instr->hydrogen()->types(), name, i, isolate());
- __ j(not_equal, &next, compact ? Label::kNear : Label::kFar);
- __ bind(&check_passed);
- EmitLoadFieldOrConstantFunction(
- result, object, map, name, instr->environment());
- __ jmp(&done, all_are_compact ? Label::kNear : Label::kFar);
- __ bind(&next);
- }
- }
- if (need_generic) {
- __ mov(ecx, name);
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
- }
- __ bind(&done);
-}
-
-
-void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
- ASSERT(ToRegister(instr->context()).is(esi));
- ASSERT(ToRegister(instr->object()).is(edx));
- ASSERT(ToRegister(instr->result()).is(eax));
-
- __ mov(ecx, instr->name());
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
- Register function = ToRegister(instr->function());
- Register temp = ToRegister(instr->temp());
- Register result = ToRegister(instr->result());
-
- // Check that the function really is a function.
- __ CmpObjectType(function, JS_FUNCTION_TYPE, result);
- DeoptimizeIf(not_equal, instr->environment());
-
- // Check whether the function has an instance prototype.
- Label non_instance;
- __ test_b(FieldOperand(result, Map::kBitFieldOffset),
- 1 << Map::kHasNonInstancePrototype);
- __ j(not_zero, &non_instance, Label::kNear);
-
- // Get the prototype or initial map from the function.
- __ mov(result,
- FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
-
- // Check that the function has a prototype or an initial map.
- __ cmp(Operand(result), Immediate(factory()->the_hole_value()));
- DeoptimizeIf(equal, instr->environment());
-
- // If the function does not have an initial map, we're done.
- Label done;
- __ CmpObjectType(result, MAP_TYPE, temp);
- __ j(not_equal, &done, Label::kNear);
-
- // Get the prototype from the initial map.
- __ mov(result, FieldOperand(result, Map::kPrototypeOffset));
- __ jmp(&done, Label::kNear);
-
- // Non-instance prototype: Fetch prototype from constructor field
- // in the function's map.
- __ bind(&non_instance);
- __ mov(result, FieldOperand(result, Map::kConstructorOffset));
-
- // All done.
- __ bind(&done);
-}
-
-
-void LCodeGen::DoLoadElements(LLoadElements* instr) {
- Register result = ToRegister(instr->result());
- Register input = ToRegister(instr->object());
- __ mov(result, FieldOperand(input, JSObject::kElementsOffset));
- if (FLAG_debug_code) {
- Label done, ok, fail;
- __ cmp(FieldOperand(result, HeapObject::kMapOffset),
- Immediate(factory()->fixed_array_map()));
- __ j(equal, &done, Label::kNear);
- __ cmp(FieldOperand(result, HeapObject::kMapOffset),
- Immediate(factory()->fixed_cow_array_map()));
- __ j(equal, &done, Label::kNear);
- Register temp((result.is(eax)) ? ebx : eax);
- __ push(temp);
- __ mov(temp, FieldOperand(result, HeapObject::kMapOffset));
- __ movzx_b(temp, FieldOperand(temp, Map::kBitField2Offset));
- __ and_(temp, Map::kElementsKindMask);
- __ shr(temp, Map::kElementsKindShift);
- __ cmp(temp, GetInitialFastElementsKind());
- __ j(less, &fail, Label::kNear);
- __ cmp(temp, TERMINAL_FAST_ELEMENTS_KIND);
- __ j(less_equal, &ok, Label::kNear);
- __ cmp(temp, FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND);
- __ j(less, &fail, Label::kNear);
- __ cmp(temp, LAST_EXTERNAL_ARRAY_ELEMENTS_KIND);
- __ j(less_equal, &ok, Label::kNear);
- __ bind(&fail);
- __ Abort("Check for fast or external elements failed.");
- __ bind(&ok);
- __ pop(temp);
- __ bind(&done);
- }
-}
-
-
-void LCodeGen::DoLoadExternalArrayPointer(
- LLoadExternalArrayPointer* instr) {
- Register result = ToRegister(instr->result());
- Register input = ToRegister(instr->object());
- __ mov(result, FieldOperand(input,
- ExternalArray::kExternalPointerOffset));
-}
-
-
-void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
- Register arguments = ToRegister(instr->arguments());
- Register length = ToRegister(instr->length());
- Operand index = ToOperand(instr->index());
- Register result = ToRegister(instr->result());
- // There are two words between the frame pointer and the last argument.
- // Subtracting from length accounts for one of them add one more.
- __ sub(length, index);
- __ mov(result, Operand(arguments, length, times_4, kPointerSize));
-}
-
-
-void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
- ElementsKind elements_kind = instr->elements_kind();
- LOperand* key = instr->key();
- if (!key->IsConstantOperand() &&
- ExternalArrayOpRequiresTemp(instr->hydrogen()->key()->representation(),
- elements_kind)) {
- __ SmiUntag(ToRegister(key));
- }
- Operand operand(BuildFastArrayOperand(
- instr->elements(),
- key,
- instr->hydrogen()->key()->representation(),
- elements_kind,
- 0,
- instr->additional_index()));
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope scope(SSE2);
- XMMRegister result(ToDoubleRegister(instr->result()));
- __ movss(result, operand);
- __ cvtss2sd(result, result);
- } else {
- __ fld_s(operand);
- HandleX87FPReturnValue(instr);
- }
- } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope scope(SSE2);
- __ movdbl(ToDoubleRegister(instr->result()), operand);
- } else {
- __ fld_d(operand);
- HandleX87FPReturnValue(instr);
- }
- } else {
- Register result(ToRegister(instr->result()));
- switch (elements_kind) {
- case EXTERNAL_BYTE_ELEMENTS:
- __ movsx_b(result, operand);
- break;
- case EXTERNAL_PIXEL_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- __ movzx_b(result, operand);
- break;
- case EXTERNAL_SHORT_ELEMENTS:
- __ movsx_w(result, operand);
- break;
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- __ movzx_w(result, operand);
- break;
- case EXTERNAL_INT_ELEMENTS:
- __ mov(result, operand);
- break;
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- __ mov(result, operand);
- if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
- __ test(result, Operand(result));
- DeoptimizeIf(negative, instr->environment());
- }
- break;
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
- case FAST_SMI_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- UNREACHABLE();
- break;
- }
- }
-}
-
-
-void LCodeGen::HandleX87FPReturnValue(LInstruction* instr) {
- if (IsX87TopOfStack(instr->result())) {
- // Return value is already on stack. If the value has no uses, then
- // pop it off the FP stack. Otherwise, make sure that there are enough
- // copies of the value on the stack to feed all of the usages, e.g.
- // when the following instruction uses the return value in multiple
- // inputs.
- int count = instr->hydrogen_value()->UseCount();
- if (count == 0) {
- __ fstp(0);
- } else {
- count--;
- ASSERT(count <= 7);
- while (count-- > 0) {
- __ fld(0);
- }
- }
- } else {
- __ fstp_d(ToOperand(instr->result()));
- }
-}
-
-
-void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
- if (instr->hydrogen()->RequiresHoleCheck()) {
- int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag +
- sizeof(kHoleNanLower32);
- Operand hole_check_operand = BuildFastArrayOperand(
- instr->elements(), instr->key(),
- instr->hydrogen()->key()->representation(),
- FAST_DOUBLE_ELEMENTS,
- offset,
- instr->additional_index());
- __ cmp(hole_check_operand, Immediate(kHoleNanUpper32));
- DeoptimizeIf(equal, instr->environment());
- }
-
- Operand double_load_operand = BuildFastArrayOperand(
- instr->elements(),
- instr->key(),
- instr->hydrogen()->key()->representation(),
- FAST_DOUBLE_ELEMENTS,
- FixedDoubleArray::kHeaderSize - kHeapObjectTag,
- instr->additional_index());
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope scope(SSE2);
- XMMRegister result = ToDoubleRegister(instr->result());
- __ movdbl(result, double_load_operand);
- } else {
- __ fld_d(double_load_operand);
- HandleX87FPReturnValue(instr);
- }
-}
-
-
-void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
- Register result = ToRegister(instr->result());
-
- // Load the result.
- __ mov(result,
- BuildFastArrayOperand(instr->elements(),
- instr->key(),
- instr->hydrogen()->key()->representation(),
- FAST_ELEMENTS,
- FixedArray::kHeaderSize - kHeapObjectTag,
- instr->additional_index()));
-
- // Check for the hole value.
- if (instr->hydrogen()->RequiresHoleCheck()) {
- if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
- __ test(result, Immediate(kSmiTagMask));
- DeoptimizeIf(not_equal, instr->environment());
- } else {
- __ cmp(result, factory()->the_hole_value());
- DeoptimizeIf(equal, instr->environment());
- }
- }
-}
-
-
-void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
- if (instr->is_external()) {
- DoLoadKeyedExternalArray(instr);
- } else if (instr->hydrogen()->representation().IsDouble()) {
- DoLoadKeyedFixedDoubleArray(instr);
- } else {
- DoLoadKeyedFixedArray(instr);
- }
-}
-
-
-Operand LCodeGen::BuildFastArrayOperand(
- LOperand* elements_pointer,
- LOperand* key,
- Representation key_representation,
- ElementsKind elements_kind,
- uint32_t offset,
- uint32_t additional_index) {
- Register elements_pointer_reg = ToRegister(elements_pointer);
- int shift_size = ElementsKindToShiftSize(elements_kind);
- if (key->IsConstantOperand()) {
- int constant_value = ToInteger32(LConstantOperand::cast(key));
- if (constant_value & 0xF0000000) {
- Abort("array index constant value too big");
- }
- return Operand(elements_pointer_reg,
- ((constant_value + additional_index) << shift_size)
- + offset);
- } else {
- // Take the tag bit into account while computing the shift size.
- if (key_representation.IsTagged() && (shift_size >= 1)) {
- shift_size -= kSmiTagSize;
- }
- ScaleFactor scale_factor = static_cast<ScaleFactor>(shift_size);
- return Operand(elements_pointer_reg,
- ToRegister(key),
- scale_factor,
- offset + (additional_index << shift_size));
- }
-}
-
-
-void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
- ASSERT(ToRegister(instr->context()).is(esi));
- ASSERT(ToRegister(instr->object()).is(edx));
- ASSERT(ToRegister(instr->key()).is(ecx));
-
- Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
- Register result = ToRegister(instr->result());
-
- if (instr->hydrogen()->from_inlined()) {
- __ lea(result, Operand(esp, -2 * kPointerSize));
- } else {
- // Check for arguments adapter frame.
- Label done, adapted;
- __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
- __ mov(result, Operand(result, StandardFrameConstants::kContextOffset));
- __ cmp(Operand(result),
- Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ j(equal, &adapted, Label::kNear);
-
- // No arguments adaptor frame.
- __ mov(result, Operand(ebp));
- __ jmp(&done, Label::kNear);
-
- // Arguments adaptor frame present.
- __ bind(&adapted);
- __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
-
- // Result is the frame pointer for the frame if not adapted and for the real
- // frame below the adaptor frame if adapted.
- __ bind(&done);
- }
-}
-
-
-void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
- Operand elem = ToOperand(instr->elements());
- Register result = ToRegister(instr->result());
-
- Label done;
-
- // If no arguments adaptor frame the number of arguments is fixed.
- __ cmp(ebp, elem);
- __ mov(result, Immediate(scope()->num_parameters()));
- __ j(equal, &done, Label::kNear);
-
- // Arguments adaptor frame present. Get argument length from there.
- __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
- __ mov(result, Operand(result,
- ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiUntag(result);
-
- // Argument length is in result register.
- __ bind(&done);
-}
-
-
-void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
- Register receiver = ToRegister(instr->receiver());
- Register function = ToRegister(instr->function());
- Register scratch = ToRegister(instr->temp());
-
- // If the receiver is null or undefined, we have to pass the global
- // object as a receiver to normal functions. Values have to be
- // passed unchanged to builtins and strict-mode functions.
- Label global_object, receiver_ok;
-
- // Do not transform the receiver to object for strict mode
- // functions.
- __ mov(scratch,
- FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
- __ test_b(FieldOperand(scratch, SharedFunctionInfo::kStrictModeByteOffset),
- 1 << SharedFunctionInfo::kStrictModeBitWithinByte);
- __ j(not_equal, &receiver_ok); // A near jump is not sufficient here!
-
- // Do not transform the receiver to object for builtins.
- __ test_b(FieldOperand(scratch, SharedFunctionInfo::kNativeByteOffset),
- 1 << SharedFunctionInfo::kNativeBitWithinByte);
- __ j(not_equal, &receiver_ok);
-
- // Normal function. Replace undefined or null with global receiver.
- __ cmp(receiver, factory()->null_value());
- __ j(equal, &global_object, Label::kNear);
- __ cmp(receiver, factory()->undefined_value());
- __ j(equal, &global_object, Label::kNear);
-
- // The receiver should be a JS object.
- __ test(receiver, Immediate(kSmiTagMask));
- DeoptimizeIf(equal, instr->environment());
- __ CmpObjectType(receiver, FIRST_SPEC_OBJECT_TYPE, scratch);
- DeoptimizeIf(below, instr->environment());
- __ jmp(&receiver_ok, Label::kNear);
-
- __ bind(&global_object);
- // TODO(kmillikin): We have a hydrogen value for the global object. See
- // if it's better to use it than to explicitly fetch it from the context
- // here.
- __ mov(receiver, Operand(ebp, StandardFrameConstants::kContextOffset));
- __ mov(receiver, ContextOperand(receiver, Context::GLOBAL_OBJECT_INDEX));
- __ mov(receiver,
- FieldOperand(receiver, JSGlobalObject::kGlobalReceiverOffset));
- __ bind(&receiver_ok);
-}
-
-
-void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
- Register receiver = ToRegister(instr->receiver());
- Register function = ToRegister(instr->function());
- Register length = ToRegister(instr->length());
- Register elements = ToRegister(instr->elements());
- ASSERT(receiver.is(eax)); // Used for parameter count.
- ASSERT(function.is(edi)); // Required by InvokeFunction.
- ASSERT(ToRegister(instr->result()).is(eax));
-
- // Copy the arguments to this function possibly from the
- // adaptor frame below it.
- const uint32_t kArgumentsLimit = 1 * KB;
- __ cmp(length, kArgumentsLimit);
- DeoptimizeIf(above, instr->environment());
-
- __ push(receiver);
- __ mov(receiver, length);
-
- // Loop through the arguments pushing them onto the execution
- // stack.
- Label invoke, loop;
- // length is a small non-negative integer, due to the test above.
- __ test(length, Operand(length));
- __ j(zero, &invoke, Label::kNear);
- __ bind(&loop);
- __ push(Operand(elements, length, times_pointer_size, 1 * kPointerSize));
- __ dec(length);
- __ j(not_zero, &loop);
-
- // Invoke the function.
- __ bind(&invoke);
- ASSERT(instr->HasPointerMap());
- LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
- SafepointGenerator safepoint_generator(
- this, pointers, Safepoint::kLazyDeopt);
- ParameterCount actual(eax);
- __ InvokeFunction(function, actual, CALL_FUNCTION,
- safepoint_generator, CALL_AS_METHOD);
-}
-
-
-void LCodeGen::DoPushArgument(LPushArgument* instr) {
- LOperand* argument = instr->value();
- EmitPushTaggedOperand(argument);
-}
-
-
-void LCodeGen::DoDrop(LDrop* instr) {
- __ Drop(instr->count());
-}
-
-
-void LCodeGen::DoThisFunction(LThisFunction* instr) {
- Register result = ToRegister(instr->result());
- __ mov(result, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
-}
-
-
-void LCodeGen::DoContext(LContext* instr) {
- Register result = ToRegister(instr->result());
- if (info()->IsOptimizing()) {
- __ mov(result, Operand(ebp, StandardFrameConstants::kContextOffset));
- } else {
- // If there is no frame, the context must be in esi.
- ASSERT(result.is(esi));
- }
-}
-
-
-void LCodeGen::DoOuterContext(LOuterContext* instr) {
- Register context = ToRegister(instr->context());
- Register result = ToRegister(instr->result());
- __ mov(result,
- Operand(context, Context::SlotOffset(Context::PREVIOUS_INDEX)));
-}
-
-
-void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
- ASSERT(ToRegister(instr->context()).is(esi));
- __ push(esi); // The context is the first argument.
- __ push(Immediate(instr->hydrogen()->pairs()));
- __ push(Immediate(Smi::FromInt(instr->hydrogen()->flags())));
- CallRuntime(Runtime::kDeclareGlobals, 3, instr);
-}
-
-
-void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
- Register context = ToRegister(instr->context());
- Register result = ToRegister(instr->result());
- __ mov(result,
- Operand(context,
- Context::SlotOffset(instr->qml_global()
- ? Context::QML_GLOBAL_OBJECT_INDEX
- : Context::GLOBAL_OBJECT_INDEX)));
-}
-
-
-void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) {
- Register global = ToRegister(instr->global());
- Register result = ToRegister(instr->result());
- __ mov(result, FieldOperand(global, GlobalObject::kGlobalReceiverOffset));
-}
-
-
-void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
- int arity,
- LInstruction* instr,
- CallKind call_kind,
- EDIState edi_state) {
- bool can_invoke_directly = !function->NeedsArgumentsAdaption() ||
- function->shared()->formal_parameter_count() == arity;
-
- LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
-
- if (can_invoke_directly) {
- if (edi_state == EDI_UNINITIALIZED) {
- __ LoadHeapObject(edi, function);
- }
-
- // Change context.
- __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
-
- // Set eax to arguments count if adaption is not needed. Assumes that eax
- // is available to write to at this point.
- if (!function->NeedsArgumentsAdaption()) {
- __ mov(eax, arity);
- }
-
- // Invoke function directly.
- __ SetCallKind(ecx, call_kind);
- if (*function == *info()->closure()) {
- __ CallSelf();
- } else {
- __ call(FieldOperand(edi, JSFunction::kCodeEntryOffset));
- }
- RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
- } else {
- // We need to adapt arguments.
- SafepointGenerator generator(
- this, pointers, Safepoint::kLazyDeopt);
- ParameterCount count(arity);
- __ InvokeFunction(function, count, CALL_FUNCTION, generator, call_kind);
- }
-}
-
-
-void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
- ASSERT(ToRegister(instr->result()).is(eax));
- CallKnownFunction(instr->function(),
- instr->arity(),
- instr,
- CALL_AS_METHOD,
- EDI_UNINITIALIZED);
-}
-
-
-void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
- Register input_reg = ToRegister(instr->value());
- __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
- factory()->heap_number_map());
- DeoptimizeIf(not_equal, instr->environment());
-
- Label done;
- Register tmp = input_reg.is(eax) ? ecx : eax;
- Register tmp2 = tmp.is(ecx) ? edx : input_reg.is(ecx) ? edx : ecx;
-
- // Preserve the value of all registers.
- PushSafepointRegistersScope scope(this);
-
- Label negative;
- __ mov(tmp, FieldOperand(input_reg, HeapNumber::kExponentOffset));
- // Check the sign of the argument. If the argument is positive, just
- // return it. We do not need to patch the stack since |input| and
- // |result| are the same register and |input| will be restored
- // unchanged by popping safepoint registers.
- __ test(tmp, Immediate(HeapNumber::kSignMask));
- __ j(not_zero, &negative);
- __ jmp(&done);
-
- __ bind(&negative);
-
- Label allocated, slow;
- __ AllocateHeapNumber(tmp, tmp2, no_reg, &slow);
- __ jmp(&allocated);
-
- // Slow case: Call the runtime system to do the number allocation.
- __ bind(&slow);
-
- CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0,
- instr, instr->context());
-
- // Set the pointer to the new heap number in tmp.
- if (!tmp.is(eax)) __ mov(tmp, eax);
-
- // Restore input_reg after call to runtime.
- __ LoadFromSafepointRegisterSlot(input_reg, input_reg);
-
- __ bind(&allocated);
- __ mov(tmp2, FieldOperand(input_reg, HeapNumber::kExponentOffset));
- __ and_(tmp2, ~HeapNumber::kSignMask);
- __ mov(FieldOperand(tmp, HeapNumber::kExponentOffset), tmp2);
- __ mov(tmp2, FieldOperand(input_reg, HeapNumber::kMantissaOffset));
- __ mov(FieldOperand(tmp, HeapNumber::kMantissaOffset), tmp2);
- __ StoreToSafepointRegisterSlot(input_reg, tmp);
-
- __ bind(&done);
-}
-
-
-void LCodeGen::EmitIntegerMathAbs(LUnaryMathOperation* instr) {
- Register input_reg = ToRegister(instr->value());
- __ test(input_reg, Operand(input_reg));
- Label is_positive;
- __ j(not_sign, &is_positive);
- __ neg(input_reg);
- __ test(input_reg, Operand(input_reg));
- DeoptimizeIf(negative, instr->environment());
- __ bind(&is_positive);
-}
-
-
-void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
- // Class for deferred case.
- class DeferredMathAbsTaggedHeapNumber: public LDeferredCode {
- public:
- DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen,
- LUnaryMathOperation* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() {
- codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
- }
- virtual LInstruction* instr() { return instr_; }
- private:
- LUnaryMathOperation* instr_;
- };
-
- ASSERT(instr->value()->Equals(instr->result()));
- Representation r = instr->hydrogen()->value()->representation();
-
- CpuFeatures::Scope scope(SSE2);
- if (r.IsDouble()) {
- XMMRegister scratch = xmm0;
- XMMRegister input_reg = ToDoubleRegister(instr->value());
- __ xorps(scratch, scratch);
- __ subsd(scratch, input_reg);
- __ pand(input_reg, scratch);
- } else if (r.IsInteger32()) {
- EmitIntegerMathAbs(instr);
- } else { // Tagged case.
- DeferredMathAbsTaggedHeapNumber* deferred =
- new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
- Register input_reg = ToRegister(instr->value());
- // Smi check.
- __ JumpIfNotSmi(input_reg, deferred->entry());
- EmitIntegerMathAbs(instr);
- __ bind(deferred->exit());
- }
-}
-
-
-void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
- CpuFeatures::Scope scope(SSE2);
- XMMRegister xmm_scratch = xmm0;
- Register output_reg = ToRegister(instr->result());
- XMMRegister input_reg = ToDoubleRegister(instr->value());
-
- if (CpuFeatures::IsSupported(SSE4_1)) {
- CpuFeatures::Scope scope(SSE4_1);
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- // Deoptimize on negative zero.
- Label non_zero;
- __ xorps(xmm_scratch, xmm_scratch); // Zero the register.
- __ ucomisd(input_reg, xmm_scratch);
- __ j(not_equal, &non_zero, Label::kNear);
- __ movmskpd(output_reg, input_reg);
- __ test(output_reg, Immediate(1));
- DeoptimizeIf(not_zero, instr->environment());
- __ bind(&non_zero);
- }
- __ roundsd(xmm_scratch, input_reg, Assembler::kRoundDown);
- __ cvttsd2si(output_reg, Operand(xmm_scratch));
- // Overflow is signalled with minint.
- __ cmp(output_reg, 0x80000000u);
- DeoptimizeIf(equal, instr->environment());
- } else {
- Label negative_sign, done;
- // Deoptimize on unordered.
- __ xorps(xmm_scratch, xmm_scratch); // Zero the register.
- __ ucomisd(input_reg, xmm_scratch);
- DeoptimizeIf(parity_even, instr->environment());
- __ j(below, &negative_sign, Label::kNear);
-
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- // Check for negative zero.
- Label positive_sign;
- __ j(above, &positive_sign, Label::kNear);
- __ movmskpd(output_reg, input_reg);
- __ test(output_reg, Immediate(1));
- DeoptimizeIf(not_zero, instr->environment());
- __ Set(output_reg, Immediate(0));
- __ jmp(&done, Label::kNear);
- __ bind(&positive_sign);
- }
-
- // Use truncating instruction (OK because input is positive).
- __ cvttsd2si(output_reg, Operand(input_reg));
- // Overflow is signalled with minint.
- __ cmp(output_reg, 0x80000000u);
- DeoptimizeIf(equal, instr->environment());
- __ jmp(&done, Label::kNear);
-
- // Non-zero negative reaches here.
- __ bind(&negative_sign);
- // Truncate, then compare and compensate.
- __ cvttsd2si(output_reg, Operand(input_reg));
- __ cvtsi2sd(xmm_scratch, output_reg);
- __ ucomisd(input_reg, xmm_scratch);
- __ j(equal, &done, Label::kNear);
- __ sub(output_reg, Immediate(1));
- DeoptimizeIf(overflow, instr->environment());
-
- __ bind(&done);
- }
-}
-
-void LCodeGen::DoMathRound(LMathRound* instr) {
- CpuFeatures::Scope scope(SSE2);
- Register output_reg = ToRegister(instr->result());
- XMMRegister input_reg = ToDoubleRegister(instr->value());
- XMMRegister xmm_scratch = xmm0;
- XMMRegister input_temp = ToDoubleRegister(instr->temp());
- ExternalReference one_half = ExternalReference::address_of_one_half();
- ExternalReference minus_one_half =
- ExternalReference::address_of_minus_one_half();
-
- Label done, round_to_zero, below_one_half, do_not_compensate;
- __ movdbl(xmm_scratch, Operand::StaticVariable(one_half));
- __ ucomisd(xmm_scratch, input_reg);
- __ j(above, &below_one_half);
-
- // CVTTSD2SI rounds towards zero, since 0.5 <= x, we use floor(0.5 + x).
- __ addsd(xmm_scratch, input_reg);
- __ cvttsd2si(output_reg, Operand(xmm_scratch));
- // Overflow is signalled with minint.
- __ cmp(output_reg, 0x80000000u);
- __ RecordComment("D2I conversion overflow");
- DeoptimizeIf(equal, instr->environment());
- __ jmp(&done);
-
- __ bind(&below_one_half);
- __ movdbl(xmm_scratch, Operand::StaticVariable(minus_one_half));
- __ ucomisd(xmm_scratch, input_reg);
- __ j(below_equal, &round_to_zero);
-
- // CVTTSD2SI rounds towards zero, we use ceil(x - (-0.5)) and then
- // compare and compensate.
- __ movsd(input_temp, input_reg); // Do not alter input_reg.
- __ subsd(input_temp, xmm_scratch);
- __ cvttsd2si(output_reg, Operand(input_temp));
- // Catch minint due to overflow, and to prevent overflow when compensating.
- __ cmp(output_reg, 0x80000000u);
- __ RecordComment("D2I conversion overflow");
- DeoptimizeIf(equal, instr->environment());
-
- __ cvtsi2sd(xmm_scratch, output_reg);
- __ ucomisd(xmm_scratch, input_temp);
- __ j(equal, &done);
- __ sub(output_reg, Immediate(1));
- // No overflow because we already ruled out minint.
- __ jmp(&done);
-
- __ bind(&round_to_zero);
- // We return 0 for the input range [+0, 0.5[, or [-0.5, 0.5[ if
- // we can ignore the difference between a result of -0 and +0.
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- // If the sign is positive, we return +0.
- __ movmskpd(output_reg, input_reg);
- __ test(output_reg, Immediate(1));
- __ RecordComment("Minus zero");
- DeoptimizeIf(not_zero, instr->environment());
- }
- __ Set(output_reg, Immediate(0));
- __ bind(&done);
-}
-
-
-void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
- CpuFeatures::Scope scope(SSE2);
- XMMRegister input_reg = ToDoubleRegister(instr->value());
- ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
- __ sqrtsd(input_reg, input_reg);
-}
-
-
-void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
- CpuFeatures::Scope scope(SSE2);
- XMMRegister xmm_scratch = xmm0;
- XMMRegister input_reg = ToDoubleRegister(instr->value());
- Register scratch = ToRegister(instr->temp());
- ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
-
- // Note that according to ECMA-262 15.8.2.13:
- // Math.pow(-Infinity, 0.5) == Infinity
- // Math.sqrt(-Infinity) == NaN
- Label done, sqrt;
- // Check base for -Infinity. According to IEEE-754, single-precision
- // -Infinity has the highest 9 bits set and the lowest 23 bits cleared.
- __ mov(scratch, 0xFF800000);
- __ movd(xmm_scratch, scratch);
- __ cvtss2sd(xmm_scratch, xmm_scratch);
- __ ucomisd(input_reg, xmm_scratch);
- // Comparing -Infinity with NaN results in "unordered", which sets the
- // zero flag as if both were equal. However, it also sets the carry flag.
- __ j(not_equal, &sqrt, Label::kNear);
- __ j(carry, &sqrt, Label::kNear);
- // If input is -Infinity, return Infinity.
- __ xorps(input_reg, input_reg);
- __ subsd(input_reg, xmm_scratch);
- __ jmp(&done, Label::kNear);
-
- // Square root.
- __ bind(&sqrt);
- __ xorps(xmm_scratch, xmm_scratch);
- __ addsd(input_reg, xmm_scratch); // Convert -0 to +0.
- __ sqrtsd(input_reg, input_reg);
- __ bind(&done);
-}
-
-
-void LCodeGen::DoPower(LPower* instr) {
- Representation exponent_type = instr->hydrogen()->right()->representation();
- // Having marked this as a call, we can use any registers.
- // Just make sure that the input/output registers are the expected ones.
- ASSERT(!instr->right()->IsDoubleRegister() ||
- ToDoubleRegister(instr->right()).is(xmm1));
- ASSERT(!instr->right()->IsRegister() ||
- ToRegister(instr->right()).is(eax));
- ASSERT(ToDoubleRegister(instr->left()).is(xmm2));
- ASSERT(ToDoubleRegister(instr->result()).is(xmm3));
-
- if (exponent_type.IsTagged()) {
- Label no_deopt;
- __ JumpIfSmi(eax, &no_deopt);
- __ CmpObjectType(eax, HEAP_NUMBER_TYPE, ecx);
- DeoptimizeIf(not_equal, instr->environment());
- __ bind(&no_deopt);
- MathPowStub stub(MathPowStub::TAGGED);
- __ CallStub(&stub);
- } else if (exponent_type.IsInteger32()) {
- MathPowStub stub(MathPowStub::INTEGER);
- __ CallStub(&stub);
- } else {
- ASSERT(exponent_type.IsDouble());
- MathPowStub stub(MathPowStub::DOUBLE);
- __ CallStub(&stub);
- }
-}
-
-
-void LCodeGen::DoRandom(LRandom* instr) {
- class DeferredDoRandom: public LDeferredCode {
- public:
- DeferredDoRandom(LCodeGen* codegen, LRandom* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredRandom(instr_); }
- virtual LInstruction* instr() { return instr_; }
- private:
- LRandom* instr_;
- };
-
- DeferredDoRandom* deferred = new(zone()) DeferredDoRandom(this, instr);
-
- CpuFeatures::Scope scope(SSE2);
- // Having marked this instruction as a call we can use any
- // registers.
- ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
- ASSERT(ToRegister(instr->global_object()).is(eax));
- // Assert that the register size is indeed the size of each seed.
- static const int kSeedSize = sizeof(uint32_t);
- STATIC_ASSERT(kPointerSize == kSeedSize);
-
- __ mov(eax, FieldOperand(eax, GlobalObject::kNativeContextOffset));
- static const int kRandomSeedOffset =
- FixedArray::kHeaderSize + Context::RANDOM_SEED_INDEX * kPointerSize;
- __ mov(ebx, FieldOperand(eax, kRandomSeedOffset));
- // ebx: FixedArray of the native context's random seeds
-
- // Load state[0].
- __ mov(ecx, FieldOperand(ebx, ByteArray::kHeaderSize));
- // If state[0] == 0, call runtime to initialize seeds.
- __ test(ecx, ecx);
- __ j(zero, deferred->entry());
- // Load state[1].
- __ mov(eax, FieldOperand(ebx, ByteArray::kHeaderSize + kSeedSize));
- // ecx: state[0]
- // eax: state[1]
-
- // state[0] = 18273 * (state[0] & 0xFFFF) + (state[0] >> 16)
- __ movzx_w(edx, ecx);
- __ imul(edx, edx, 18273);
- __ shr(ecx, 16);
- __ add(ecx, edx);
- // Save state[0].
- __ mov(FieldOperand(ebx, ByteArray::kHeaderSize), ecx);
-
- // state[1] = 36969 * (state[1] & 0xFFFF) + (state[1] >> 16)
- __ movzx_w(edx, eax);
- __ imul(edx, edx, 36969);
- __ shr(eax, 16);
- __ add(eax, edx);
- // Save state[1].
- __ mov(FieldOperand(ebx, ByteArray::kHeaderSize + kSeedSize), eax);
-
- // Random bit pattern = (state[0] << 14) + (state[1] & 0x3FFFF)
- __ shl(ecx, 14);
- __ and_(eax, Immediate(0x3FFFF));
- __ add(eax, ecx);
-
- __ bind(deferred->exit());
- // Convert 32 random bits in eax to 0.(32 random bits) in a double
- // by computing:
- // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
- __ mov(ebx, Immediate(0x49800000)); // 1.0 x 2^20 as single.
- __ movd(xmm2, ebx);
- __ movd(xmm1, eax);
- __ cvtss2sd(xmm2, xmm2);
- __ xorps(xmm1, xmm2);
- __ subsd(xmm1, xmm2);
-}
-
-
-void LCodeGen::DoDeferredRandom(LRandom* instr) {
- __ PrepareCallCFunction(1, ebx);
- __ mov(Operand(esp, 0), eax);
- __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
- // Return value is in eax.
-}
-
-
-void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
- CpuFeatures::Scope scope(SSE2);
- ASSERT(instr->value()->Equals(instr->result()));
- XMMRegister input_reg = ToDoubleRegister(instr->value());
- Label positive, done, zero;
- __ xorps(xmm0, xmm0);
- __ ucomisd(input_reg, xmm0);
- __ j(above, &positive, Label::kNear);
- __ j(equal, &zero, Label::kNear);
- ExternalReference nan =
- ExternalReference::address_of_canonical_non_hole_nan();
- __ movdbl(input_reg, Operand::StaticVariable(nan));
- __ jmp(&done, Label::kNear);
- __ bind(&zero);
- __ push(Immediate(0xFFF00000));
- __ push(Immediate(0));
- __ movdbl(input_reg, Operand(esp, 0));
- __ add(Operand(esp), Immediate(kDoubleSize));
- __ jmp(&done, Label::kNear);
- __ bind(&positive);
- __ fldln2();
- __ sub(Operand(esp), Immediate(kDoubleSize));
- __ movdbl(Operand(esp, 0), input_reg);
- __ fld_d(Operand(esp, 0));
- __ fyl2x();
- __ fstp_d(Operand(esp, 0));
- __ movdbl(input_reg, Operand(esp, 0));
- __ add(Operand(esp), Immediate(kDoubleSize));
- __ bind(&done);
-}
-
-
-void LCodeGen::DoMathExp(LMathExp* instr) {
- CpuFeatures::Scope scope(SSE2);
- XMMRegister input = ToDoubleRegister(instr->value());
- XMMRegister result = ToDoubleRegister(instr->result());
- Register temp1 = ToRegister(instr->temp1());
- Register temp2 = ToRegister(instr->temp2());
-
- MathExpGenerator::EmitMathExp(masm(), input, result, xmm0, temp1, temp2);
-}
-
-
-void LCodeGen::DoMathTan(LUnaryMathOperation* instr) {
- ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
- TranscendentalCacheStub stub(TranscendentalCache::TAN,
- TranscendentalCacheStub::UNTAGGED);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoMathCos(LUnaryMathOperation* instr) {
- ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
- TranscendentalCacheStub stub(TranscendentalCache::COS,
- TranscendentalCacheStub::UNTAGGED);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoMathSin(LUnaryMathOperation* instr) {
- ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
- TranscendentalCacheStub stub(TranscendentalCache::SIN,
- TranscendentalCacheStub::UNTAGGED);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) {
- switch (instr->op()) {
- case kMathAbs:
- DoMathAbs(instr);
- break;
- case kMathFloor:
- DoMathFloor(instr);
- break;
- case kMathSqrt:
- DoMathSqrt(instr);
- break;
- case kMathCos:
- DoMathCos(instr);
- break;
- case kMathSin:
- DoMathSin(instr);
- break;
- case kMathTan:
- DoMathTan(instr);
- break;
- case kMathLog:
- DoMathLog(instr);
- break;
-
- default:
- UNREACHABLE();
- }
-}
-
-
-void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
- ASSERT(ToRegister(instr->context()).is(esi));
- ASSERT(ToRegister(instr->function()).is(edi));
- ASSERT(instr->HasPointerMap());
-
- if (instr->known_function().is_null()) {
- LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
- SafepointGenerator generator(
- this, pointers, Safepoint::kLazyDeopt);
- ParameterCount count(instr->arity());
- __ InvokeFunction(edi, count, CALL_FUNCTION, generator, CALL_AS_METHOD);
- } else {
- CallKnownFunction(instr->known_function(),
- instr->arity(),
- instr,
- CALL_AS_METHOD,
- EDI_CONTAINS_TARGET);
- }
-}
-
-
-void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
- ASSERT(ToRegister(instr->context()).is(esi));
- ASSERT(ToRegister(instr->key()).is(ecx));
- ASSERT(ToRegister(instr->result()).is(eax));
-
- int arity = instr->arity();
- Handle<Code> ic =
- isolate()->stub_cache()->ComputeKeyedCallInitialize(arity);
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoCallNamed(LCallNamed* instr) {
- ASSERT(ToRegister(instr->context()).is(esi));
- ASSERT(ToRegister(instr->result()).is(eax));
-
- int arity = instr->arity();
- RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
- Handle<Code> ic =
- isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
- __ mov(ecx, instr->name());
- CallCode(ic, mode, instr);
-}
-
-
-void LCodeGen::DoCallFunction(LCallFunction* instr) {
- ASSERT(ToRegister(instr->context()).is(esi));
- ASSERT(ToRegister(instr->function()).is(edi));
- ASSERT(ToRegister(instr->result()).is(eax));
-
- int arity = instr->arity();
- CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
- ASSERT(ToRegister(instr->context()).is(esi));
- ASSERT(ToRegister(instr->result()).is(eax));
-
- int arity = instr->arity();
- RelocInfo::Mode mode = RelocInfo::CODE_TARGET_CONTEXT;
- Handle<Code> ic =
- isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
- __ mov(ecx, instr->name());
- CallCode(ic, mode, instr);
-}
-
-
-void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
- ASSERT(ToRegister(instr->result()).is(eax));
- CallKnownFunction(instr->target(),
- instr->arity(),
- instr,
- CALL_AS_FUNCTION,
- EDI_UNINITIALIZED);
-}
-
-
-void LCodeGen::DoCallNew(LCallNew* instr) {
- ASSERT(ToRegister(instr->context()).is(esi));
- ASSERT(ToRegister(instr->constructor()).is(edi));
- ASSERT(ToRegister(instr->result()).is(eax));
-
- if (FLAG_optimize_constructed_arrays) {
- // No cell in ebx for construct type feedback in optimized code
- Handle<Object> undefined_value(isolate()->heap()->undefined_value(),
- isolate());
- __ mov(ebx, Immediate(undefined_value));
- }
- CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
- __ Set(eax, Immediate(instr->arity()));
- CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
-}
-
-
-void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
- ASSERT(ToRegister(instr->context()).is(esi));
- ASSERT(ToRegister(instr->constructor()).is(edi));
- ASSERT(ToRegister(instr->result()).is(eax));
- ASSERT(FLAG_optimize_constructed_arrays);
-
- __ mov(ebx, instr->hydrogen()->property_cell());
- Handle<Code> array_construct_code =
- isolate()->builtins()->ArrayConstructCode();
- __ Set(eax, Immediate(instr->arity()));
- CallCode(array_construct_code, RelocInfo::CONSTRUCT_CALL, instr);
-}
-
-
-void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
- CallRuntime(instr->function(), instr->arity(), instr);
-}
-
-
-void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
- Register object = ToRegister(instr->object());
- Register value = ToRegister(instr->value());
- int offset = instr->offset();
-
- if (!instr->transition().is_null()) {
- if (!instr->hydrogen()->NeedsWriteBarrierForMap()) {
- __ mov(FieldOperand(object, HeapObject::kMapOffset), instr->transition());
- } else {
- Register temp = ToRegister(instr->temp());
- Register temp_map = ToRegister(instr->temp_map());
- __ mov(temp_map, instr->transition());
- __ mov(FieldOperand(object, HeapObject::kMapOffset), temp_map);
- // Update the write barrier for the map field.
- __ RecordWriteField(object,
- HeapObject::kMapOffset,
- temp_map,
- temp,
- kSaveFPRegs,
- OMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- }
- }
-
- // Do the store.
- HType type = instr->hydrogen()->value()->type();
- SmiCheck check_needed =
- type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
- if (instr->is_in_object()) {
- __ mov(FieldOperand(object, offset), value);
- if (instr->hydrogen()->NeedsWriteBarrier()) {
- Register temp = ToRegister(instr->temp());
- // Update the write barrier for the object for in-object properties.
- __ RecordWriteField(object,
- offset,
- value,
- temp,
- kSaveFPRegs,
- EMIT_REMEMBERED_SET,
- check_needed);
- }
- } else {
- Register temp = ToRegister(instr->temp());
- __ mov(temp, FieldOperand(object, JSObject::kPropertiesOffset));
- __ mov(FieldOperand(temp, offset), value);
- if (instr->hydrogen()->NeedsWriteBarrier()) {
- // Update the write barrier for the properties array.
- // object is used as a scratch register.
- __ RecordWriteField(temp,
- offset,
- value,
- object,
- kSaveFPRegs,
- EMIT_REMEMBERED_SET,
- check_needed);
- }
- }
-}
-
-
-void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
- ASSERT(ToRegister(instr->context()).is(esi));
- ASSERT(ToRegister(instr->object()).is(edx));
- ASSERT(ToRegister(instr->value()).is(eax));
-
- __ mov(ecx, instr->name());
- Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
- ? isolate()->builtins()->StoreIC_Initialize_Strict()
- : isolate()->builtins()->StoreIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
- if (instr->hydrogen()->skip_check()) return;
-
- if (instr->index()->IsConstantOperand()) {
- int constant_index =
- ToInteger32(LConstantOperand::cast(instr->index()));
- if (instr->hydrogen()->length()->representation().IsTagged()) {
- __ cmp(ToOperand(instr->length()),
- Immediate(Smi::FromInt(constant_index)));
- } else {
- __ cmp(ToOperand(instr->length()), Immediate(constant_index));
- }
- DeoptimizeIf(below_equal, instr->environment());
- } else {
- __ cmp(ToRegister(instr->index()), ToOperand(instr->length()));
- DeoptimizeIf(above_equal, instr->environment());
- }
-}
-
-
-void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
- ElementsKind elements_kind = instr->elements_kind();
- LOperand* key = instr->key();
- if (!key->IsConstantOperand() &&
- ExternalArrayOpRequiresTemp(instr->hydrogen()->key()->representation(),
- elements_kind)) {
- __ SmiUntag(ToRegister(key));
- }
- Operand operand(BuildFastArrayOperand(
- instr->elements(),
- key,
- instr->hydrogen()->key()->representation(),
- elements_kind,
- 0,
- instr->additional_index()));
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
- CpuFeatures::Scope scope(SSE2);
- __ cvtsd2ss(xmm0, ToDoubleRegister(instr->value()));
- __ movss(operand, xmm0);
- } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
- CpuFeatures::Scope scope(SSE2);
- __ movdbl(operand, ToDoubleRegister(instr->value()));
- } else {
- Register value = ToRegister(instr->value());
- switch (elements_kind) {
- case EXTERNAL_PIXEL_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- case EXTERNAL_BYTE_ELEMENTS:
- __ mov_b(operand, value);
- break;
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- __ mov_w(operand, value);
- break;
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- __ mov(operand, value);
- break;
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
- case FAST_SMI_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- UNREACHABLE();
- break;
- }
- }
-}
-
-
-void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
- CpuFeatures::Scope scope(SSE2);
- XMMRegister value = ToDoubleRegister(instr->value());
-
- if (instr->NeedsCanonicalization()) {
- Label have_value;
-
- __ ucomisd(value, value);
- __ j(parity_odd, &have_value); // NaN.
-
- ExternalReference canonical_nan_reference =
- ExternalReference::address_of_canonical_non_hole_nan();
- __ movdbl(value, Operand::StaticVariable(canonical_nan_reference));
- __ bind(&have_value);
- }
-
- Operand double_store_operand = BuildFastArrayOperand(
- instr->elements(),
- instr->key(),
- instr->hydrogen()->key()->representation(),
- FAST_DOUBLE_ELEMENTS,
- FixedDoubleArray::kHeaderSize - kHeapObjectTag,
- instr->additional_index());
- __ movdbl(double_store_operand, value);
-}
-
-
-void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
- Register value = ToRegister(instr->value());
- Register elements = ToRegister(instr->elements());
- Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
-
- Operand operand = BuildFastArrayOperand(
- instr->elements(),
- instr->key(),
- instr->hydrogen()->key()->representation(),
- FAST_ELEMENTS,
- FixedArray::kHeaderSize - kHeapObjectTag,
- instr->additional_index());
- __ mov(operand, value);
-
- if (instr->hydrogen()->NeedsWriteBarrier()) {
- ASSERT(!instr->key()->IsConstantOperand());
- HType type = instr->hydrogen()->value()->type();
- SmiCheck check_needed =
- type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
- // Compute address of modified element and store it into key register.
- __ lea(key, operand);
- __ RecordWrite(elements,
- key,
- value,
- kSaveFPRegs,
- EMIT_REMEMBERED_SET,
- check_needed);
- }
-}
-
-
-void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
- // By cases...external, fast-double, fast
- if (instr->is_external()) {
- DoStoreKeyedExternalArray(instr);
- } else if (instr->hydrogen()->value()->representation().IsDouble()) {
- DoStoreKeyedFixedDoubleArray(instr);
- } else {
- DoStoreKeyedFixedArray(instr);
- }
-}
-
-
-void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
- ASSERT(ToRegister(instr->context()).is(esi));
- ASSERT(ToRegister(instr->object()).is(edx));
- ASSERT(ToRegister(instr->key()).is(ecx));
- ASSERT(ToRegister(instr->value()).is(eax));
-
- Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
- ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
- : isolate()->builtins()->KeyedStoreIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
- Register object = ToRegister(instr->object());
- Register temp = ToRegister(instr->temp());
- __ TestJSArrayForAllocationSiteInfo(object, temp);
- DeoptimizeIf(equal, instr->environment());
-}
-
-
-void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
- Register object_reg = ToRegister(instr->object());
-
- Handle<Map> from_map = instr->original_map();
- Handle<Map> to_map = instr->transitioned_map();
- ElementsKind from_kind = instr->from_kind();
- ElementsKind to_kind = instr->to_kind();
-
- Label not_applicable;
- bool is_simple_map_transition =
- IsSimpleMapChangeTransition(from_kind, to_kind);
- Label::Distance branch_distance =
- is_simple_map_transition ? Label::kNear : Label::kFar;
- __ cmp(FieldOperand(object_reg, HeapObject::kMapOffset), from_map);
- __ j(not_equal, &not_applicable, branch_distance);
- if (is_simple_map_transition) {
- Register new_map_reg = ToRegister(instr->new_map_temp());
- Handle<Map> map = instr->hydrogen()->transitioned_map();
- __ mov(FieldOperand(object_reg, HeapObject::kMapOffset),
- Immediate(map));
- // Write barrier.
- ASSERT_NE(instr->temp(), NULL);
- __ RecordWriteForMap(object_reg, to_map, new_map_reg,
- ToRegister(instr->temp()),
- kDontSaveFPRegs);
- } else if (FLAG_compiled_transitions) {
- PushSafepointRegistersScope scope(this);
- if (!object_reg.is(eax)) {
- __ push(object_reg);
- }
- LoadContextFromDeferred(instr->context());
- if (!object_reg.is(eax)) {
- __ pop(eax);
- }
- __ mov(ebx, to_map);
- TransitionElementsKindStub stub(from_kind, to_kind);
- __ CallStub(&stub);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
- } else if (IsFastSmiElementsKind(from_kind) &&
- IsFastDoubleElementsKind(to_kind)) {
- Register new_map_reg = ToRegister(instr->new_map_temp());
- __ mov(new_map_reg, to_map);
- Register fixed_object_reg = ToRegister(instr->temp());
- ASSERT(fixed_object_reg.is(edx));
- ASSERT(new_map_reg.is(ebx));
- __ mov(fixed_object_reg, object_reg);
- CallCode(isolate()->builtins()->TransitionElementsSmiToDouble(),
- RelocInfo::CODE_TARGET, instr);
- } else if (IsFastDoubleElementsKind(from_kind) &&
- IsFastObjectElementsKind(to_kind)) {
- Register new_map_reg = ToRegister(instr->new_map_temp());
- __ mov(new_map_reg, to_map);
- Register fixed_object_reg = ToRegister(instr->temp());
- ASSERT(fixed_object_reg.is(edx));
- ASSERT(new_map_reg.is(ebx));
- __ mov(fixed_object_reg, object_reg);
- CallCode(isolate()->builtins()->TransitionElementsDoubleToObject(),
- RelocInfo::CODE_TARGET, instr);
- } else {
- UNREACHABLE();
- }
- __ bind(&not_applicable);
-}
-
-
-void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
- class DeferredStringCharCodeAt: public LDeferredCode {
- public:
- DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
- virtual LInstruction* instr() { return instr_; }
- private:
- LStringCharCodeAt* instr_;
- };
-
- DeferredStringCharCodeAt* deferred =
- new(zone()) DeferredStringCharCodeAt(this, instr);
-
- StringCharLoadGenerator::Generate(masm(),
- factory(),
- ToRegister(instr->string()),
- ToRegister(instr->index()),
- ToRegister(instr->result()),
- deferred->entry());
- __ bind(deferred->exit());
-}
-
-
-void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
- Register string = ToRegister(instr->string());
- Register result = ToRegister(instr->result());
-
- // TODO(3095996): Get rid of this. For now, we need to make the
- // result register contain a valid pointer because it is already
- // contained in the register pointer map.
- __ Set(result, Immediate(0));
-
- PushSafepointRegistersScope scope(this);
- __ push(string);
- // Push the index as a smi. This is safe because of the checks in
- // DoStringCharCodeAt above.
- STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue);
- if (instr->index()->IsConstantOperand()) {
- int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
- __ push(Immediate(Smi::FromInt(const_index)));
- } else {
- Register index = ToRegister(instr->index());
- __ SmiTag(index);
- __ push(index);
- }
- CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2,
- instr, instr->context());
- __ AssertSmi(eax);
- __ SmiUntag(eax);
- __ StoreToSafepointRegisterSlot(result, eax);
-}
-
-
-void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
- class DeferredStringCharFromCode: public LDeferredCode {
- public:
- DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); }
- virtual LInstruction* instr() { return instr_; }
- private:
- LStringCharFromCode* instr_;
- };
-
- DeferredStringCharFromCode* deferred =
- new(zone()) DeferredStringCharFromCode(this, instr);
-
- ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
- Register char_code = ToRegister(instr->char_code());
- Register result = ToRegister(instr->result());
- ASSERT(!char_code.is(result));
-
- __ cmp(char_code, String::kMaxOneByteCharCode);
- __ j(above, deferred->entry());
- __ Set(result, Immediate(factory()->single_character_string_cache()));
- __ mov(result, FieldOperand(result,
- char_code, times_pointer_size,
- FixedArray::kHeaderSize));
- __ cmp(result, factory()->undefined_value());
- __ j(equal, deferred->entry());
- __ bind(deferred->exit());
-}
-
-
-void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
- Register char_code = ToRegister(instr->char_code());
- Register result = ToRegister(instr->result());
-
- // TODO(3095996): Get rid of this. For now, we need to make the
- // result register contain a valid pointer because it is already
- // contained in the register pointer map.
- __ Set(result, Immediate(0));
-
- PushSafepointRegistersScope scope(this);
- __ SmiTag(char_code);
- __ push(char_code);
- CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
- __ StoreToSafepointRegisterSlot(result, eax);
-}
-
-
-void LCodeGen::DoStringLength(LStringLength* instr) {
- Register string = ToRegister(instr->string());
- Register result = ToRegister(instr->result());
- __ mov(result, FieldOperand(string, String::kLengthOffset));
-}
-
-
-void LCodeGen::DoStringAdd(LStringAdd* instr) {
- EmitPushTaggedOperand(instr->left());
- EmitPushTaggedOperand(instr->right());
- StringAddStub stub(NO_STRING_CHECK_IN_STUB);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope scope(SSE2);
- LOperand* input = instr->value();
- ASSERT(input->IsRegister() || input->IsStackSlot());
- LOperand* output = instr->result();
- ASSERT(output->IsDoubleRegister());
- __ cvtsi2sd(ToDoubleRegister(output), ToOperand(input));
- } else {
- UNREACHABLE();
- }
-}
-
-
-void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
- CpuFeatures::Scope scope(SSE2);
- LOperand* input = instr->value();
- LOperand* output = instr->result();
- LOperand* temp = instr->temp();
-
- __ LoadUint32(ToDoubleRegister(output),
- ToRegister(input),
- ToDoubleRegister(temp));
-}
-
-
-void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
- class DeferredNumberTagI: public LDeferredCode {
- public:
- DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() {
- codegen()->DoDeferredNumberTagI(instr_, instr_->value(), SIGNED_INT32);
- }
- virtual LInstruction* instr() { return instr_; }
- private:
- LNumberTagI* instr_;
- };
-
- LOperand* input = instr->value();
- ASSERT(input->IsRegister() && input->Equals(instr->result()));
- Register reg = ToRegister(input);
-
- DeferredNumberTagI* deferred = new(zone()) DeferredNumberTagI(this, instr);
- __ SmiTag(reg);
- __ j(overflow, deferred->entry());
- __ bind(deferred->exit());
-}
-
-
-void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
- class DeferredNumberTagU: public LDeferredCode {
- public:
- DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() {
- codegen()->DoDeferredNumberTagI(instr_, instr_->value(), UNSIGNED_INT32);
- }
- virtual LInstruction* instr() { return instr_; }
- private:
- LNumberTagU* instr_;
- };
-
- LOperand* input = instr->value();
- ASSERT(input->IsRegister() && input->Equals(instr->result()));
- Register reg = ToRegister(input);
-
- DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
- __ cmp(reg, Immediate(Smi::kMaxValue));
- __ j(above, deferred->entry());
- __ SmiTag(reg);
- __ bind(deferred->exit());
-}
-
-
-void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
- LOperand* value,
- IntegerSignedness signedness) {
- Label slow;
- Register reg = ToRegister(value);
- Register tmp = reg.is(eax) ? ecx : eax;
-
- // Preserve the value of all registers.
- PushSafepointRegistersScope scope(this);
-
- Label done;
-
- if (signedness == SIGNED_INT32) {
- // There was overflow, so bits 30 and 31 of the original integer
- // disagree. Try to allocate a heap number in new space and store
- // the value in there. If that fails, call the runtime system.
- __ SmiUntag(reg);
- __ xor_(reg, 0x80000000);
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope feature_scope(SSE2);
- __ cvtsi2sd(xmm0, Operand(reg));
- } else {
- __ push(reg);
- __ fild_s(Operand(esp, 0));
- __ pop(reg);
- }
- } else {
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope feature_scope(SSE2);
- __ LoadUint32(xmm0, reg, xmm1);
- } else {
- // There's no fild variant for unsigned values, so zero-extend to a 64-bit
- // int manually.
- __ push(Immediate(0));
- __ push(reg);
- __ fild_d(Operand(esp, 0));
- __ pop(reg);
- __ pop(reg);
- }
- }
-
- if (FLAG_inline_new) {
- __ AllocateHeapNumber(reg, tmp, no_reg, &slow);
- __ jmp(&done, Label::kNear);
- }
-
- // Slow case: Call the runtime system to do the number allocation.
- __ bind(&slow);
-
- // TODO(3095996): Put a valid pointer value in the stack slot where the result
- // register is stored, as this register is in the pointer map, but contains an
- // integer value.
- __ StoreToSafepointRegisterSlot(reg, Immediate(0));
- // NumberTagI and NumberTagD use the context from the frame, rather than
- // the environment's HContext or HInlinedContext value.
- // They only call Runtime::kAllocateHeapNumber.
- // The corresponding HChange instructions are added in a phase that does
- // not have easy access to the local context.
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
- if (!reg.is(eax)) __ mov(reg, eax);
-
- // Done. Put the value in xmm0 into the value of the allocated heap
- // number.
- __ bind(&done);
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope feature_scope(SSE2);
- __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), xmm0);
- } else {
- __ fstp_d(FieldOperand(reg, HeapNumber::kValueOffset));
- }
- __ StoreToSafepointRegisterSlot(reg, reg);
-}
-
-
-void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
- class DeferredNumberTagD: public LDeferredCode {
- public:
- DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
- virtual LInstruction* instr() { return instr_; }
- private:
- LNumberTagD* instr_;
- };
-
- Register reg = ToRegister(instr->result());
-
- bool convert_hole = false;
- HValue* change_input = instr->hydrogen()->value();
- if (change_input->IsLoadKeyed()) {
- HLoadKeyed* load = HLoadKeyed::cast(change_input);
- convert_hole = load->UsesMustHandleHole();
- }
-
- Label no_special_nan_handling;
- Label done;
- if (convert_hole) {
- bool use_sse2 = CpuFeatures::IsSupported(SSE2);
- if (use_sse2) {
- CpuFeatures::Scope scope(SSE2);
- XMMRegister input_reg = ToDoubleRegister(instr->value());
- __ ucomisd(input_reg, input_reg);
- } else {
- if (!IsX87TopOfStack(instr->value())) {
- __ fld_d(ToOperand(instr->value()));
- }
- __ fld(0);
- __ fld(0);
- __ FCmp();
- }
-
- __ j(parity_odd, &no_special_nan_handling);
- __ sub(esp, Immediate(kDoubleSize));
- if (use_sse2) {
- CpuFeatures::Scope scope(SSE2);
- XMMRegister input_reg = ToDoubleRegister(instr->value());
- __ movdbl(MemOperand(esp, 0), input_reg);
- } else {
- __ fld(0);
- __ fstp_d(MemOperand(esp, 0));
- }
- __ cmp(MemOperand(esp, sizeof(kHoleNanLower32)),
- Immediate(kHoleNanUpper32));
- Label canonicalize;
- __ j(not_equal, &canonicalize);
- __ add(esp, Immediate(kDoubleSize));
- __ mov(reg, factory()->the_hole_value());
- __ jmp(&done);
- __ bind(&canonicalize);
- __ add(esp, Immediate(kDoubleSize));
- ExternalReference nan =
- ExternalReference::address_of_canonical_non_hole_nan();
- if (use_sse2) {
- CpuFeatures::Scope scope(SSE2);
- XMMRegister input_reg = ToDoubleRegister(instr->value());
- __ movdbl(input_reg, Operand::StaticVariable(nan));
- } else {
- __ fstp(0);
- __ fld_d(Operand::StaticVariable(nan));
- }
- }
-
- __ bind(&no_special_nan_handling);
- DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
- if (FLAG_inline_new) {
- Register tmp = ToRegister(instr->temp());
- __ AllocateHeapNumber(reg, tmp, no_reg, deferred->entry());
- } else {
- __ jmp(deferred->entry());
- }
- __ bind(deferred->exit());
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope scope(SSE2);
- XMMRegister input_reg = ToDoubleRegister(instr->value());
- __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), input_reg);
- } else {
- if (!IsX87TopOfStack(instr->value())) {
- __ fld_d(ToOperand(instr->value()));
- }
- __ fstp_d(FieldOperand(reg, HeapNumber::kValueOffset));
- }
- __ bind(&done);
-}
-
-
-void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
- // TODO(3095996): Get rid of this. For now, we need to make the
- // result register contain a valid pointer because it is already
- // contained in the register pointer map.
- Register reg = ToRegister(instr->result());
- __ Set(reg, Immediate(0));
-
- PushSafepointRegistersScope scope(this);
- // NumberTagI and NumberTagD use the context from the frame, rather than
- // the environment's HContext or HInlinedContext value.
- // They only call Runtime::kAllocateHeapNumber.
- // The corresponding HChange instructions are added in a phase that does
- // not have easy access to the local context.
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
- __ StoreToSafepointRegisterSlot(reg, eax);
-}
-
-
-void LCodeGen::DoSmiTag(LSmiTag* instr) {
- LOperand* input = instr->value();
- ASSERT(input->IsRegister() && input->Equals(instr->result()));
- ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow));
- __ SmiTag(ToRegister(input));
-}
-
-
-void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
- LOperand* input = instr->value();
- ASSERT(input->IsRegister() && input->Equals(instr->result()));
- if (instr->needs_check()) {
- __ test(ToRegister(input), Immediate(kSmiTagMask));
- DeoptimizeIf(not_zero, instr->environment());
- } else {
- __ AssertSmi(ToRegister(input));
- }
- __ SmiUntag(ToRegister(input));
-}
-
-
-void LCodeGen::EmitNumberUntagD(Register input_reg,
- Register temp_reg,
- XMMRegister result_reg,
- bool deoptimize_on_undefined,
- bool deoptimize_on_minus_zero,
- LEnvironment* env,
- NumberUntagDMode mode) {
- Label load_smi, done;
-
- if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
- // Smi check.
- __ JumpIfSmi(input_reg, &load_smi, Label::kNear);
-
- // Heap number map check.
- __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
- factory()->heap_number_map());
- if (deoptimize_on_undefined) {
- DeoptimizeIf(not_equal, env);
- } else {
- Label heap_number;
- __ j(equal, &heap_number, Label::kNear);
-
- __ cmp(input_reg, factory()->undefined_value());
- DeoptimizeIf(not_equal, env);
-
- // Convert undefined to NaN.
- ExternalReference nan =
- ExternalReference::address_of_canonical_non_hole_nan();
- __ movdbl(result_reg, Operand::StaticVariable(nan));
- __ jmp(&done, Label::kNear);
-
- __ bind(&heap_number);
- }
- // Heap number to XMM conversion.
- __ movdbl(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
- if (deoptimize_on_minus_zero) {
- XMMRegister xmm_scratch = xmm0;
- __ xorps(xmm_scratch, xmm_scratch);
- __ ucomisd(result_reg, xmm_scratch);
- __ j(not_zero, &done, Label::kNear);
- __ movmskpd(temp_reg, result_reg);
- __ test_b(temp_reg, 1);
- DeoptimizeIf(not_zero, env);
- }
- __ jmp(&done, Label::kNear);
- } else if (mode == NUMBER_CANDIDATE_IS_SMI_OR_HOLE) {
- __ test(input_reg, Immediate(kSmiTagMask));
- DeoptimizeIf(not_equal, env);
- } else if (mode == NUMBER_CANDIDATE_IS_SMI_CONVERT_HOLE) {
- __ test(input_reg, Immediate(kSmiTagMask));
- __ j(zero, &load_smi);
- ExternalReference hole_nan_reference =
- ExternalReference::address_of_the_hole_nan();
- __ movdbl(result_reg, Operand::StaticVariable(hole_nan_reference));
- __ jmp(&done, Label::kNear);
- } else {
- ASSERT(mode == NUMBER_CANDIDATE_IS_SMI);
- }
-
- // Smi to XMM conversion
- __ bind(&load_smi);
- __ SmiUntag(input_reg); // Untag smi before converting to float.
- __ cvtsi2sd(result_reg, Operand(input_reg));
- __ SmiTag(input_reg); // Retag smi.
- __ bind(&done);
-}
-
-
-void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
- Label done, heap_number;
- Register input_reg = ToRegister(instr->value());
-
- // Heap number map check.
- __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
- factory()->heap_number_map());
-
- if (instr->truncating()) {
- __ j(equal, &heap_number, Label::kNear);
- // Check for undefined. Undefined is converted to zero for truncating
- // conversions.
- __ cmp(input_reg, factory()->undefined_value());
- __ RecordComment("Deferred TaggedToI: cannot truncate");
- DeoptimizeIf(not_equal, instr->environment());
- __ mov(input_reg, 0);
- __ jmp(&done, Label::kNear);
-
- __ bind(&heap_number);
- if (CpuFeatures::IsSupported(SSE3)) {
- CpuFeatures::Scope scope(SSE3);
- Label convert;
- // Use more powerful conversion when sse3 is available.
- // Load x87 register with heap number.
- __ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset));
- // Get exponent alone and check for too-big exponent.
- __ mov(input_reg, FieldOperand(input_reg, HeapNumber::kExponentOffset));
- __ and_(input_reg, HeapNumber::kExponentMask);
- const uint32_t kTooBigExponent =
- (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift;
- __ cmp(Operand(input_reg), Immediate(kTooBigExponent));
- __ j(less, &convert, Label::kNear);
- // Pop FPU stack before deoptimizing.
- __ fstp(0);
- __ RecordComment("Deferred TaggedToI: exponent too big");
- DeoptimizeIf(no_condition, instr->environment());
-
- // Reserve space for 64 bit answer.
- __ bind(&convert);
- __ sub(Operand(esp), Immediate(kDoubleSize));
- // Do conversion, which cannot fail because we checked the exponent.
- __ fisttp_d(Operand(esp, 0));
- __ mov(input_reg, Operand(esp, 0)); // Low word of answer is the result.
- __ add(Operand(esp), Immediate(kDoubleSize));
- } else {
- CpuFeatures::Scope scope(SSE2);
- XMMRegister xmm_temp = ToDoubleRegister(instr->temp());
- __ movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
- __ cvttsd2si(input_reg, Operand(xmm0));
- __ cmp(input_reg, 0x80000000u);
- __ j(not_equal, &done);
- // Check if the input was 0x8000000 (kMinInt).
- // If no, then we got an overflow and we deoptimize.
- ExternalReference min_int = ExternalReference::address_of_min_int();
- __ movdbl(xmm_temp, Operand::StaticVariable(min_int));
- __ ucomisd(xmm_temp, xmm0);
- DeoptimizeIf(not_equal, instr->environment());
- DeoptimizeIf(parity_even, instr->environment()); // NaN.
- }
- } else if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope scope(SSE2);
- // Deoptimize if we don't have a heap number.
- __ RecordComment("Deferred TaggedToI: not a heap number");
- DeoptimizeIf(not_equal, instr->environment());
-
- XMMRegister xmm_temp = ToDoubleRegister(instr->temp());
- __ movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
- __ cvttsd2si(input_reg, Operand(xmm0));
- __ cvtsi2sd(xmm_temp, Operand(input_reg));
- __ ucomisd(xmm0, xmm_temp);
- __ RecordComment("Deferred TaggedToI: lost precision");
- DeoptimizeIf(not_equal, instr->environment());
- __ RecordComment("Deferred TaggedToI: NaN");
- DeoptimizeIf(parity_even, instr->environment()); // NaN.
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ test(input_reg, Operand(input_reg));
- __ j(not_zero, &done);
- __ movmskpd(input_reg, xmm0);
- __ and_(input_reg, 1);
- __ RecordComment("Deferred TaggedToI: minus zero");
- DeoptimizeIf(not_zero, instr->environment());
- }
- } else {
- UNREACHABLE();
- }
- __ bind(&done);
-}
-
-
-void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
- class DeferredTaggedToI: public LDeferredCode {
- public:
- DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
- virtual LInstruction* instr() { return instr_; }
- private:
- LTaggedToI* instr_;
- };
-
- LOperand* input = instr->value();
- ASSERT(input->IsRegister());
- ASSERT(input->Equals(instr->result()));
-
- Register input_reg = ToRegister(input);
-
- DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
-
- // Smi check.
- __ JumpIfNotSmi(input_reg, deferred->entry());
-
- // Smi to int32 conversion
- __ SmiUntag(input_reg); // Untag smi.
-
- __ bind(deferred->exit());
-}
-
-
-void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
- LOperand* input = instr->value();
- ASSERT(input->IsRegister());
- LOperand* temp = instr->temp();
- ASSERT(temp == NULL || temp->IsRegister());
- LOperand* result = instr->result();
- ASSERT(result->IsDoubleRegister());
-
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope scope(SSE2);
- Register input_reg = ToRegister(input);
- XMMRegister result_reg = ToDoubleRegister(result);
-
- bool deoptimize_on_minus_zero =
- instr->hydrogen()->deoptimize_on_minus_zero();
- Register temp_reg = deoptimize_on_minus_zero ? ToRegister(temp) : no_reg;
-
- NumberUntagDMode mode = NUMBER_CANDIDATE_IS_ANY_TAGGED;
- HValue* value = instr->hydrogen()->value();
- if (value->type().IsSmi()) {
- if (value->IsLoadKeyed()) {
- HLoadKeyed* load = HLoadKeyed::cast(value);
- if (load->UsesMustHandleHole()) {
- if (load->hole_mode() == ALLOW_RETURN_HOLE) {
- mode = NUMBER_CANDIDATE_IS_SMI_CONVERT_HOLE;
- } else {
- mode = NUMBER_CANDIDATE_IS_SMI_OR_HOLE;
- }
- } else {
- mode = NUMBER_CANDIDATE_IS_SMI;
- }
- }
- }
-
- EmitNumberUntagD(input_reg,
- temp_reg,
- result_reg,
- instr->hydrogen()->deoptimize_on_undefined(),
- deoptimize_on_minus_zero,
- instr->environment(),
- mode);
- } else {
- UNIMPLEMENTED();
- }
-}
-
-
-void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
- LOperand* input = instr->value();
- ASSERT(input->IsDoubleRegister());
- LOperand* result = instr->result();
- ASSERT(result->IsRegister());
- CpuFeatures::Scope scope(SSE2);
-
- XMMRegister input_reg = ToDoubleRegister(input);
- Register result_reg = ToRegister(result);
-
- if (instr->truncating()) {
- // Performs a truncating conversion of a floating point number as used by
- // the JS bitwise operations.
- __ cvttsd2si(result_reg, Operand(input_reg));
- __ cmp(result_reg, 0x80000000u);
- if (CpuFeatures::IsSupported(SSE3)) {
- // This will deoptimize if the exponent of the input in out of range.
- CpuFeatures::Scope scope(SSE3);
- Label convert, done;
- __ j(not_equal, &done, Label::kNear);
- __ sub(Operand(esp), Immediate(kDoubleSize));
- __ movdbl(Operand(esp, 0), input_reg);
- // Get exponent alone and check for too-big exponent.
- __ mov(result_reg, Operand(esp, sizeof(int32_t)));
- __ and_(result_reg, HeapNumber::kExponentMask);
- const uint32_t kTooBigExponent =
- (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift;
- __ cmp(Operand(result_reg), Immediate(kTooBigExponent));
- __ j(less, &convert, Label::kNear);
- __ add(Operand(esp), Immediate(kDoubleSize));
- DeoptimizeIf(no_condition, instr->environment());
- __ bind(&convert);
- // Do conversion, which cannot fail because we checked the exponent.
- __ fld_d(Operand(esp, 0));
- __ fisttp_d(Operand(esp, 0));
- __ mov(result_reg, Operand(esp, 0)); // Low word of answer is the result.
- __ add(Operand(esp), Immediate(kDoubleSize));
- __ bind(&done);
- } else {
- Label done;
- Register temp_reg = ToRegister(instr->temp());
- XMMRegister xmm_scratch = xmm0;
-
- // If cvttsd2si succeeded, we're done. Otherwise, we attempt
- // manual conversion.
- __ j(not_equal, &done, Label::kNear);
-
- // Get high 32 bits of the input in result_reg and temp_reg.
- __ pshufd(xmm_scratch, input_reg, 1);
- __ movd(Operand(temp_reg), xmm_scratch);
- __ mov(result_reg, temp_reg);
-
- // Prepare negation mask in temp_reg.
- __ sar(temp_reg, kBitsPerInt - 1);
-
- // Extract the exponent from result_reg and subtract adjusted
- // bias from it. The adjustment is selected in a way such that
- // when the difference is zero, the answer is in the low 32 bits
- // of the input, otherwise a shift has to be performed.
- __ shr(result_reg, HeapNumber::kExponentShift);
- __ and_(result_reg,
- HeapNumber::kExponentMask >> HeapNumber::kExponentShift);
- __ sub(Operand(result_reg),
- Immediate(HeapNumber::kExponentBias +
- HeapNumber::kExponentBits +
- HeapNumber::kMantissaBits));
- // Don't handle big (> kMantissaBits + kExponentBits == 63) or
- // special exponents.
- DeoptimizeIf(greater, instr->environment());
-
- // Zero out the sign and the exponent in the input (by shifting
- // it to the left) and restore the implicit mantissa bit,
- // i.e. convert the input to unsigned int64 shifted left by
- // kExponentBits.
- ExternalReference minus_zero = ExternalReference::address_of_minus_zero();
- // Minus zero has the most significant bit set and the other
- // bits cleared.
- __ movdbl(xmm_scratch, Operand::StaticVariable(minus_zero));
- __ psllq(input_reg, HeapNumber::kExponentBits);
- __ por(input_reg, xmm_scratch);
-
- // Get the amount to shift the input right in xmm_scratch.
- __ neg(result_reg);
- __ movd(xmm_scratch, Operand(result_reg));
-
- // Shift the input right and extract low 32 bits.
- __ psrlq(input_reg, xmm_scratch);
- __ movd(Operand(result_reg), input_reg);
-
- // Use the prepared mask in temp_reg to negate the result if necessary.
- __ xor_(result_reg, Operand(temp_reg));
- __ sub(result_reg, Operand(temp_reg));
- __ bind(&done);
- }
- } else {
- Label done;
- __ cvttsd2si(result_reg, Operand(input_reg));
- __ cvtsi2sd(xmm0, Operand(result_reg));
- __ ucomisd(xmm0, input_reg);
- DeoptimizeIf(not_equal, instr->environment());
- DeoptimizeIf(parity_even, instr->environment()); // NaN.
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- // The integer converted back is equal to the original. We
- // only have to test if we got -0 as an input.
- __ test(result_reg, Operand(result_reg));
- __ j(not_zero, &done, Label::kNear);
- __ movmskpd(result_reg, input_reg);
- // Bit 0 contains the sign of the double in input_reg.
- // If input was positive, we are ok and return 0, otherwise
- // deoptimize.
- __ and_(result_reg, 1);
- DeoptimizeIf(not_zero, instr->environment());
- }
- __ bind(&done);
- }
-}
-
-
-void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
- LOperand* input = instr->value();
- __ test(ToOperand(input), Immediate(kSmiTagMask));
- DeoptimizeIf(not_zero, instr->environment());
-}
-
-
-void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
- LOperand* input = instr->value();
- __ test(ToOperand(input), Immediate(kSmiTagMask));
- DeoptimizeIf(zero, instr->environment());
-}
-
-
-void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
- Register input = ToRegister(instr->value());
- Register temp = ToRegister(instr->temp());
-
- __ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
-
- if (instr->hydrogen()->is_interval_check()) {
- InstanceType first;
- InstanceType last;
- instr->hydrogen()->GetCheckInterval(&first, &last);
-
- __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset),
- static_cast<int8_t>(first));
-
- // If there is only one type in the interval check for equality.
- if (first == last) {
- DeoptimizeIf(not_equal, instr->environment());
- } else {
- DeoptimizeIf(below, instr->environment());
- // Omit check for the last type.
- if (last != LAST_TYPE) {
- __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset),
- static_cast<int8_t>(last));
- DeoptimizeIf(above, instr->environment());
- }
- }
- } else {
- uint8_t mask;
- uint8_t tag;
- instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
-
- if (IsPowerOf2(mask)) {
- ASSERT(tag == 0 || IsPowerOf2(tag));
- __ test_b(FieldOperand(temp, Map::kInstanceTypeOffset), mask);
- DeoptimizeIf(tag == 0 ? not_zero : zero, instr->environment());
- } else {
- __ movzx_b(temp, FieldOperand(temp, Map::kInstanceTypeOffset));
- __ and_(temp, mask);
- __ cmp(temp, tag);
- DeoptimizeIf(not_equal, instr->environment());
- }
- }
-}
-
-
-void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
- Handle<JSFunction> target = instr->hydrogen()->target();
- if (instr->hydrogen()->target_in_new_space()) {
- Register reg = ToRegister(instr->value());
- Handle<JSGlobalPropertyCell> cell =
- isolate()->factory()->NewJSGlobalPropertyCell(target);
- __ cmp(reg, Operand::Cell(cell));
- } else {
- Operand operand = ToOperand(instr->value());
- __ cmp(operand, target);
- }
- DeoptimizeIf(not_equal, instr->environment());
-}
-
-
-void LCodeGen::DoCheckMapCommon(Register reg,
- Handle<Map> map,
- CompareMapMode mode,
- LInstruction* instr) {
- Label success;
- __ CompareMap(reg, map, &success, mode);
- DeoptimizeIf(not_equal, instr->environment());
- __ bind(&success);
-}
-
-
-void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
- LOperand* input = instr->value();
- ASSERT(input->IsRegister());
- Register reg = ToRegister(input);
-
- Label success;
- SmallMapList* map_set = instr->hydrogen()->map_set();
- for (int i = 0; i < map_set->length() - 1; i++) {
- Handle<Map> map = map_set->at(i);
- __ CompareMap(reg, map, &success, REQUIRE_EXACT_MAP);
- __ j(equal, &success);
- }
- Handle<Map> map = map_set->last();
- DoCheckMapCommon(reg, map, REQUIRE_EXACT_MAP, instr);
- __ bind(&success);
-}
-
-
-void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
- CpuFeatures::Scope scope(SSE2);
- XMMRegister value_reg = ToDoubleRegister(instr->unclamped());
- Register result_reg = ToRegister(instr->result());
- __ ClampDoubleToUint8(value_reg, xmm0, result_reg);
-}
-
-
-void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
- ASSERT(instr->unclamped()->Equals(instr->result()));
- Register value_reg = ToRegister(instr->result());
- __ ClampUint8(value_reg);
-}
-
-
-void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
- CpuFeatures::Scope scope(SSE2);
-
- ASSERT(instr->unclamped()->Equals(instr->result()));
- Register input_reg = ToRegister(instr->unclamped());
- Label is_smi, done, heap_number;
-
- __ JumpIfSmi(input_reg, &is_smi);
-
- // Check for heap number
- __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
- factory()->heap_number_map());
- __ j(equal, &heap_number, Label::kNear);
-
- // Check for undefined. Undefined is converted to zero for clamping
- // conversions.
- __ cmp(input_reg, factory()->undefined_value());
- DeoptimizeIf(not_equal, instr->environment());
- __ mov(input_reg, 0);
- __ jmp(&done, Label::kNear);
-
- // Heap number
- __ bind(&heap_number);
- __ movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
- __ ClampDoubleToUint8(xmm0, xmm1, input_reg);
- __ jmp(&done, Label::kNear);
-
- // smi
- __ bind(&is_smi);
- __ SmiUntag(input_reg);
- __ ClampUint8(input_reg);
-
- __ bind(&done);
-}
-
-
-void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
- ASSERT(instr->temp()->Equals(instr->result()));
- Register reg = ToRegister(instr->temp());
-
- ZoneList<Handle<JSObject> >* prototypes = instr->prototypes();
- ZoneList<Handle<Map> >* maps = instr->maps();
-
- ASSERT(prototypes->length() == maps->length());
-
- // TODO(ulan): Move this check to hydrogen and split HCheckPrototypeMaps
- // into two instruction: one that checks the prototypes and another that
- // loads the holder (HConstant). Find a way to do it without breaking
- // parallel recompilation.
- if (instr->hydrogen()->CanOmitPrototypeChecks()) {
- for (int i = 0; i < maps->length(); i++) {
- prototype_maps_.Add(maps->at(i), info()->zone());
- }
- __ LoadHeapObject(reg, prototypes->at(prototypes->length() - 1));
- } else {
- for (int i = 0; i < prototypes->length(); i++) {
- __ LoadHeapObject(reg, prototypes->at(i));
- DoCheckMapCommon(reg, maps->at(i), ALLOW_ELEMENT_TRANSITION_MAPS, instr);
- }
- }
-}
-
-
-void LCodeGen::DoAllocateObject(LAllocateObject* instr) {
- class DeferredAllocateObject: public LDeferredCode {
- public:
- DeferredAllocateObject(LCodeGen* codegen, LAllocateObject* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredAllocateObject(instr_); }
- virtual LInstruction* instr() { return instr_; }
- private:
- LAllocateObject* instr_;
- };
-
- DeferredAllocateObject* deferred =
- new(zone()) DeferredAllocateObject(this, instr);
-
- Register result = ToRegister(instr->result());
- Register scratch = ToRegister(instr->temp());
- Handle<JSFunction> constructor = instr->hydrogen()->constructor();
- Handle<Map> initial_map(constructor->initial_map());
- int instance_size = initial_map->instance_size();
- ASSERT(initial_map->pre_allocated_property_fields() +
- initial_map->unused_property_fields() -
- initial_map->inobject_properties() == 0);
-
- // Allocate memory for the object. The initial map might change when
- // the constructor's prototype changes, but instance size and property
- // counts remain unchanged (if slack tracking finished).
- ASSERT(!constructor->shared()->IsInobjectSlackTrackingInProgress());
- __ AllocateInNewSpace(instance_size,
- result,
- no_reg,
- scratch,
- deferred->entry(),
- TAG_OBJECT);
-
- __ bind(deferred->exit());
- if (FLAG_debug_code) {
- Label is_in_new_space;
- __ JumpIfInNewSpace(result, scratch, &is_in_new_space);
- __ Abort("Allocated object is not in new-space");
- __ bind(&is_in_new_space);
- }
-
- // Load the initial map.
- Register map = scratch;
- __ LoadHeapObject(scratch, constructor);
- __ mov(map, FieldOperand(scratch, JSFunction::kPrototypeOrInitialMapOffset));
-
- if (FLAG_debug_code) {
- __ AssertNotSmi(map);
- __ cmpb(FieldOperand(map, Map::kInstanceSizeOffset),
- instance_size >> kPointerSizeLog2);
- __ Assert(equal, "Unexpected instance size");
- __ cmpb(FieldOperand(map, Map::kPreAllocatedPropertyFieldsOffset),
- initial_map->pre_allocated_property_fields());
- __ Assert(equal, "Unexpected pre-allocated property fields count");
- __ cmpb(FieldOperand(map, Map::kUnusedPropertyFieldsOffset),
- initial_map->unused_property_fields());
- __ Assert(equal, "Unexpected unused property fields count");
- __ cmpb(FieldOperand(map, Map::kInObjectPropertiesOffset),
- initial_map->inobject_properties());
- __ Assert(equal, "Unexpected in-object property fields count");
- }
-
- // Initialize map and fields of the newly allocated object.
- ASSERT(initial_map->instance_type() == JS_OBJECT_TYPE);
- __ mov(FieldOperand(result, JSObject::kMapOffset), map);
- __ mov(scratch, factory()->empty_fixed_array());
- __ mov(FieldOperand(result, JSObject::kElementsOffset), scratch);
- __ mov(FieldOperand(result, JSObject::kPropertiesOffset), scratch);
- if (initial_map->inobject_properties() != 0) {
- __ mov(scratch, factory()->undefined_value());
- for (int i = 0; i < initial_map->inobject_properties(); i++) {
- int property_offset = JSObject::kHeaderSize + i * kPointerSize;
- __ mov(FieldOperand(result, property_offset), scratch);
- }
- }
-}
-
-
-void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) {
- Register result = ToRegister(instr->result());
- Handle<JSFunction> constructor = instr->hydrogen()->constructor();
- Handle<Map> initial_map(constructor->initial_map());
- int instance_size = initial_map->instance_size();
-
- // TODO(3095996): Get rid of this. For now, we need to make the
- // result register contain a valid pointer because it is already
- // contained in the register pointer map.
- __ Set(result, Immediate(0));
-
- PushSafepointRegistersScope scope(this);
- __ push(Immediate(Smi::FromInt(instance_size)));
- CallRuntimeFromDeferred(
- Runtime::kAllocateInNewSpace, 1, instr, instr->context());
- __ StoreToSafepointRegisterSlot(result, eax);
-}
-
-
-void LCodeGen::DoAllocate(LAllocate* instr) {
- class DeferredAllocate: public LDeferredCode {
- public:
- DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredAllocate(instr_); }
- virtual LInstruction* instr() { return instr_; }
- private:
- LAllocate* instr_;
- };
-
- DeferredAllocate* deferred =
- new(zone()) DeferredAllocate(this, instr);
-
- Register result = ToRegister(instr->result());
- Register temp = ToRegister(instr->temp());
-
- // Allocate memory for the object.
- AllocationFlags flags = TAG_OBJECT;
- if (instr->hydrogen()->MustAllocateDoubleAligned()) {
- flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
- }
- if (instr->size()->IsConstantOperand()) {
- int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
- __ AllocateInNewSpace(size, result, temp, no_reg, deferred->entry(), flags);
- } else {
- Register size = ToRegister(instr->size());
- __ AllocateInNewSpace(size, result, temp, no_reg, deferred->entry(), flags);
- }
-
- __ bind(deferred->exit());
-}
-
-
-void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
- Register size = ToRegister(instr->size());
- Register result = ToRegister(instr->result());
-
- __ SmiTag(size);
- PushSafepointRegistersScope scope(this);
- // TODO(3095996): Get rid of this. For now, we need to make the
- // result register contain a valid pointer because it is already
- // contained in the register pointer map.
- if (!size.is(result)) {
- __ StoreToSafepointRegisterSlot(result, size);
- }
- __ push(size);
- CallRuntimeFromDeferred(
- Runtime::kAllocateInNewSpace, 1, instr, instr->context());
- __ StoreToSafepointRegisterSlot(result, eax);
-}
-
-
-void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
- ASSERT(ToRegister(instr->context()).is(esi));
- Handle<FixedArray> literals(instr->environment()->closure()->literals());
- ElementsKind boilerplate_elements_kind =
- instr->hydrogen()->boilerplate_elements_kind();
- AllocationSiteMode allocation_site_mode =
- instr->hydrogen()->allocation_site_mode();
-
- // Deopt if the array literal boilerplate ElementsKind is of a type different
- // than the expected one. The check isn't necessary if the boilerplate has
- // already been converted to TERMINAL_FAST_ELEMENTS_KIND.
- if (CanTransitionToMoreGeneralFastElementsKind(
- boilerplate_elements_kind, true)) {
- __ LoadHeapObject(eax, instr->hydrogen()->boilerplate_object());
- __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
- // Load the map's "bit field 2". We only need the first byte,
- // but the following masking takes care of that anyway.
- __ mov(ebx, FieldOperand(ebx, Map::kBitField2Offset));
- // Retrieve elements_kind from bit field 2.
- __ and_(ebx, Map::kElementsKindMask);
- __ cmp(ebx, boilerplate_elements_kind << Map::kElementsKindShift);
- DeoptimizeIf(not_equal, instr->environment());
- }
-
- // Set up the parameters to the stub/runtime call.
- __ PushHeapObject(literals);
- __ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
- // Boilerplate already exists, constant elements are never accessed.
- // Pass an empty fixed array.
- __ push(Immediate(isolate()->factory()->empty_fixed_array()));
-
- // Pick the right runtime function or stub to call.
- int length = instr->hydrogen()->length();
- if (instr->hydrogen()->IsCopyOnWrite()) {
- ASSERT(instr->hydrogen()->depth() == 1);
- FastCloneShallowArrayStub::Mode mode =
- FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS;
- FastCloneShallowArrayStub stub(mode, DONT_TRACK_ALLOCATION_SITE, length);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- } else if (instr->hydrogen()->depth() > 1) {
- CallRuntime(Runtime::kCreateArrayLiteral, 3, instr);
- } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
- CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr);
- } else {
- FastCloneShallowArrayStub::Mode mode =
- boilerplate_elements_kind == FAST_DOUBLE_ELEMENTS
- ? FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
- : FastCloneShallowArrayStub::CLONE_ELEMENTS;
- FastCloneShallowArrayStub stub(mode, allocation_site_mode, length);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- }
-}
-
-
-void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
- Register result,
- Register source,
- int* offset,
- AllocationSiteMode mode) {
- ASSERT(!source.is(ecx));
- ASSERT(!result.is(ecx));
-
- bool create_allocation_site_info = mode == TRACK_ALLOCATION_SITE &&
- object->map()->CanTrackAllocationSite();
-
- if (FLAG_debug_code) {
- __ LoadHeapObject(ecx, object);
- __ cmp(source, ecx);
- __ Assert(equal, "Unexpected object literal boilerplate");
- __ mov(ecx, FieldOperand(source, HeapObject::kMapOffset));
- __ cmp(ecx, Handle<Map>(object->map()));
- __ Assert(equal, "Unexpected boilerplate map");
- __ mov(ecx, FieldOperand(ecx, Map::kBitField2Offset));
- __ and_(ecx, Map::kElementsKindMask);
- __ cmp(ecx, object->GetElementsKind() << Map::kElementsKindShift);
- __ Assert(equal, "Unexpected boilerplate elements kind");
- }
-
- // Only elements backing stores for non-COW arrays need to be copied.
- Handle<FixedArrayBase> elements(object->elements());
- bool has_elements = elements->length() > 0 &&
- elements->map() != isolate()->heap()->fixed_cow_array_map();
-
- // Increase the offset so that subsequent objects end up right after
- // this object and its backing store.
- int object_offset = *offset;
- int object_size = object->map()->instance_size();
- int elements_size = has_elements ? elements->Size() : 0;
- int elements_offset = *offset + object_size;
- if (create_allocation_site_info) {
- elements_offset += AllocationSiteInfo::kSize;
- *offset += AllocationSiteInfo::kSize;
- }
-
- *offset += object_size + elements_size;
-
- // Copy object header.
- ASSERT(object->properties()->length() == 0);
- int inobject_properties = object->map()->inobject_properties();
- int header_size = object_size - inobject_properties * kPointerSize;
- for (int i = 0; i < header_size; i += kPointerSize) {
- if (has_elements && i == JSObject::kElementsOffset) {
- __ lea(ecx, Operand(result, elements_offset));
- } else {
- __ mov(ecx, FieldOperand(source, i));
- }
- __ mov(FieldOperand(result, object_offset + i), ecx);
- }
-
- // Copy in-object properties.
- for (int i = 0; i < inobject_properties; i++) {
- int total_offset = object_offset + object->GetInObjectPropertyOffset(i);
- Handle<Object> value = Handle<Object>(object->InObjectPropertyAt(i),
- isolate());
- if (value->IsJSObject()) {
- Handle<JSObject> value_object = Handle<JSObject>::cast(value);
- __ lea(ecx, Operand(result, *offset));
- __ mov(FieldOperand(result, total_offset), ecx);
- __ LoadHeapObject(source, value_object);
- EmitDeepCopy(value_object, result, source, offset,
- DONT_TRACK_ALLOCATION_SITE);
- } else if (value->IsHeapObject()) {
- __ LoadHeapObject(ecx, Handle<HeapObject>::cast(value));
- __ mov(FieldOperand(result, total_offset), ecx);
- } else {
- __ mov(FieldOperand(result, total_offset), Immediate(value));
- }
- }
-
- // Build Allocation Site Info if desired
- if (create_allocation_site_info) {
- __ mov(FieldOperand(result, object_size),
- Immediate(Handle<Map>(isolate()->heap()->
- allocation_site_info_map())));
- __ mov(FieldOperand(result, object_size + kPointerSize), source);
- }
-
- if (has_elements) {
- // Copy elements backing store header.
- __ LoadHeapObject(source, elements);
- for (int i = 0; i < FixedArray::kHeaderSize; i += kPointerSize) {
- __ mov(ecx, FieldOperand(source, i));
- __ mov(FieldOperand(result, elements_offset + i), ecx);
- }
-
- // Copy elements backing store content.
- int elements_length = elements->length();
- if (elements->IsFixedDoubleArray()) {
- Handle<FixedDoubleArray> double_array =
- Handle<FixedDoubleArray>::cast(elements);
- for (int i = 0; i < elements_length; i++) {
- int64_t value = double_array->get_representation(i);
- int32_t value_low = static_cast<int32_t>(value & 0xFFFFFFFF);
- int32_t value_high = static_cast<int32_t>(value >> 32);
- int total_offset =
- elements_offset + FixedDoubleArray::OffsetOfElementAt(i);
- __ mov(FieldOperand(result, total_offset), Immediate(value_low));
- __ mov(FieldOperand(result, total_offset + 4), Immediate(value_high));
- }
- } else if (elements->IsFixedArray()) {
- Handle<FixedArray> fast_elements = Handle<FixedArray>::cast(elements);
- for (int i = 0; i < elements_length; i++) {
- int total_offset = elements_offset + FixedArray::OffsetOfElementAt(i);
- Handle<Object> value(fast_elements->get(i), isolate());
- if (value->IsJSObject()) {
- Handle<JSObject> value_object = Handle<JSObject>::cast(value);
- __ lea(ecx, Operand(result, *offset));
- __ mov(FieldOperand(result, total_offset), ecx);
- __ LoadHeapObject(source, value_object);
- EmitDeepCopy(value_object, result, source, offset,
- DONT_TRACK_ALLOCATION_SITE);
- } else if (value->IsHeapObject()) {
- __ LoadHeapObject(ecx, Handle<HeapObject>::cast(value));
- __ mov(FieldOperand(result, total_offset), ecx);
- } else {
- __ mov(FieldOperand(result, total_offset), Immediate(value));
- }
- }
- } else {
- UNREACHABLE();
- }
- }
-}
-
-
-void LCodeGen::DoFastLiteral(LFastLiteral* instr) {
- ASSERT(ToRegister(instr->context()).is(esi));
- int size = instr->hydrogen()->total_size();
- ElementsKind boilerplate_elements_kind =
- instr->hydrogen()->boilerplate()->GetElementsKind();
-
- // Deopt if the literal boilerplate ElementsKind is of a type different than
- // the expected one. The check isn't necessary if the boilerplate has already
- // already been converted to TERMINAL_FAST_ELEMENTS_KIND.
- if (CanTransitionToMoreGeneralFastElementsKind(
- boilerplate_elements_kind, true)) {
- __ LoadHeapObject(ebx, instr->hydrogen()->boilerplate());
- __ mov(ecx, FieldOperand(ebx, HeapObject::kMapOffset));
- // Load the map's "bit field 2". We only need the first byte,
- // but the following masking takes care of that anyway.
- __ mov(ecx, FieldOperand(ecx, Map::kBitField2Offset));
- // Retrieve elements_kind from bit field 2.
- __ and_(ecx, Map::kElementsKindMask);
- __ cmp(ecx, boilerplate_elements_kind << Map::kElementsKindShift);
- DeoptimizeIf(not_equal, instr->environment());
- }
-
- // Allocate all objects that are part of the literal in one big
- // allocation. This avoids multiple limit checks.
- Label allocated, runtime_allocate;
- __ AllocateInNewSpace(size, eax, ecx, edx, &runtime_allocate, TAG_OBJECT);
- __ jmp(&allocated);
-
- __ bind(&runtime_allocate);
- __ push(Immediate(Smi::FromInt(size)));
- CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
-
- __ bind(&allocated);
- int offset = 0;
- __ LoadHeapObject(ebx, instr->hydrogen()->boilerplate());
- EmitDeepCopy(instr->hydrogen()->boilerplate(), eax, ebx, &offset,
- instr->hydrogen()->allocation_site_mode());
- ASSERT_EQ(size, offset);
-}
-
-
-void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
- ASSERT(ToRegister(instr->context()).is(esi));
- Handle<FixedArray> literals(instr->environment()->closure()->literals());
- Handle<FixedArray> constant_properties =
- instr->hydrogen()->constant_properties();
-
- int flags = instr->hydrogen()->fast_elements()
- ? ObjectLiteral::kFastElements
- : ObjectLiteral::kNoFlags;
- flags |= instr->hydrogen()->has_function()
- ? ObjectLiteral::kHasFunction
- : ObjectLiteral::kNoFlags;
-
- // Set up the parameters to the stub/runtime call and pick the right
- // runtime function or stub to call.
- int properties_count = constant_properties->length() / 2;
- if (instr->hydrogen()->depth() > 1) {
- __ PushHeapObject(literals);
- __ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
- __ push(Immediate(constant_properties));
- __ push(Immediate(Smi::FromInt(flags)));
- CallRuntime(Runtime::kCreateObjectLiteral, 4, instr);
- } else if (flags != ObjectLiteral::kFastElements ||
- properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
- __ PushHeapObject(literals);
- __ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
- __ push(Immediate(constant_properties));
- __ push(Immediate(Smi::FromInt(flags)));
- CallRuntime(Runtime::kCreateObjectLiteralShallow, 4, instr);
- } else {
- __ LoadHeapObject(eax, literals);
- __ mov(ebx, Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
- __ mov(ecx, Immediate(constant_properties));
- __ mov(edx, Immediate(Smi::FromInt(flags)));
- FastCloneShallowObjectStub stub(properties_count);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- }
-}
-
-
-void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
- ASSERT(ToRegister(instr->value()).is(eax));
- __ push(eax);
- CallRuntime(Runtime::kToFastProperties, 1, instr);
-}
-
-
-void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
- ASSERT(ToRegister(instr->context()).is(esi));
- Label materialized;
- // Registers will be used as follows:
- // ecx = literals array.
- // ebx = regexp literal.
- // eax = regexp literal clone.
- // esi = context.
- int literal_offset =
- FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
- __ LoadHeapObject(ecx, instr->hydrogen()->literals());
- __ mov(ebx, FieldOperand(ecx, literal_offset));
- __ cmp(ebx, factory()->undefined_value());
- __ j(not_equal, &materialized, Label::kNear);
-
- // Create regexp literal using runtime function
- // Result will be in eax.
- __ push(ecx);
- __ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
- __ push(Immediate(instr->hydrogen()->pattern()));
- __ push(Immediate(instr->hydrogen()->flags()));
- CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
- __ mov(ebx, eax);
-
- __ bind(&materialized);
- int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
- Label allocated, runtime_allocate;
- __ AllocateInNewSpace(size, eax, ecx, edx, &runtime_allocate, TAG_OBJECT);
- __ jmp(&allocated);
-
- __ bind(&runtime_allocate);
- __ push(ebx);
- __ push(Immediate(Smi::FromInt(size)));
- CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
- __ pop(ebx);
-
- __ bind(&allocated);
- // Copy the content into the newly allocated memory.
- // (Unroll copy loop once for better throughput).
- for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
- __ mov(edx, FieldOperand(ebx, i));
- __ mov(ecx, FieldOperand(ebx, i + kPointerSize));
- __ mov(FieldOperand(eax, i), edx);
- __ mov(FieldOperand(eax, i + kPointerSize), ecx);
- }
- if ((size % (2 * kPointerSize)) != 0) {
- __ mov(edx, FieldOperand(ebx, size - kPointerSize));
- __ mov(FieldOperand(eax, size - kPointerSize), edx);
- }
-}
-
-
-void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
- ASSERT(ToRegister(instr->context()).is(esi));
- // Use the fast case closure allocation code that allocates in new
- // space for nested functions that don't need literals cloning.
- Handle<SharedFunctionInfo> shared_info = instr->shared_info();
- bool pretenure = instr->hydrogen()->pretenure();
- if (!pretenure && shared_info->num_literals() == 0) {
- FastNewClosureStub stub(shared_info->language_mode());
- __ push(Immediate(shared_info));
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- } else {
- __ push(esi);
- __ push(Immediate(shared_info));
- __ push(Immediate(pretenure
- ? factory()->true_value()
- : factory()->false_value()));
- CallRuntime(Runtime::kNewClosure, 3, instr);
- }
-}
-
-
-void LCodeGen::DoTypeof(LTypeof* instr) {
- LOperand* input = instr->value();
- EmitPushTaggedOperand(input);
- CallRuntime(Runtime::kTypeof, 1, instr);
-}
-
-
-void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
- Register input = ToRegister(instr->value());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
- Label* true_label = chunk_->GetAssemblyLabel(true_block);
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
-
- Condition final_branch_condition =
- EmitTypeofIs(true_label, false_label, input, instr->type_literal());
- if (final_branch_condition != no_condition) {
- EmitBranch(true_block, false_block, final_branch_condition);
- }
-}
-
-
-Condition LCodeGen::EmitTypeofIs(Label* true_label,
- Label* false_label,
- Register input,
- Handle<String> type_name) {
- Condition final_branch_condition = no_condition;
- if (type_name->Equals(heap()->number_string())) {
- __ JumpIfSmi(input, true_label);
- __ cmp(FieldOperand(input, HeapObject::kMapOffset),
- factory()->heap_number_map());
- final_branch_condition = equal;
-
- } else if (type_name->Equals(heap()->string_string())) {
- __ JumpIfSmi(input, false_label);
- __ CmpObjectType(input, FIRST_NONSTRING_TYPE, input);
- __ j(above_equal, false_label);
- __ test_b(FieldOperand(input, Map::kBitFieldOffset),
- 1 << Map::kIsUndetectable);
- final_branch_condition = zero;
-
- } else if (type_name->Equals(heap()->boolean_string())) {
- __ cmp(input, factory()->true_value());
- __ j(equal, true_label);
- __ cmp(input, factory()->false_value());
- final_branch_condition = equal;
-
- } else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_string())) {
- __ cmp(input, factory()->null_value());
- final_branch_condition = equal;
-
- } else if (type_name->Equals(heap()->undefined_string())) {
- __ cmp(input, factory()->undefined_value());
- __ j(equal, true_label);
- __ JumpIfSmi(input, false_label);
- // Check for undetectable objects => true.
- __ mov(input, FieldOperand(input, HeapObject::kMapOffset));
- __ test_b(FieldOperand(input, Map::kBitFieldOffset),
- 1 << Map::kIsUndetectable);
- final_branch_condition = not_zero;
-
- } else if (type_name->Equals(heap()->function_string())) {
- STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
- __ JumpIfSmi(input, false_label);
- __ CmpObjectType(input, JS_FUNCTION_TYPE, input);
- __ j(equal, true_label);
- __ CmpInstanceType(input, JS_FUNCTION_PROXY_TYPE);
- final_branch_condition = equal;
-
- } else if (type_name->Equals(heap()->object_string())) {
- __ JumpIfSmi(input, false_label);
- if (!FLAG_harmony_typeof) {
- __ cmp(input, factory()->null_value());
- __ j(equal, true_label);
- }
- if (FLAG_harmony_symbols) {
- __ CmpObjectType(input, SYMBOL_TYPE, input);
- __ j(equal, true_label);
- __ CmpInstanceType(input, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
- } else {
- __ CmpObjectType(input, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, input);
- }
- __ j(below, false_label);
- __ CmpInstanceType(input, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
- __ j(above, false_label);
- // Check for undetectable objects => false.
- __ test_b(FieldOperand(input, Map::kBitFieldOffset),
- 1 << Map::kIsUndetectable);
- final_branch_condition = zero;
-
- } else {
- __ jmp(false_label);
- }
- return final_branch_condition;
-}
-
-
-void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
- Register temp = ToRegister(instr->temp());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- EmitIsConstructCall(temp);
- EmitBranch(true_block, false_block, equal);
-}
-
-
-void LCodeGen::EmitIsConstructCall(Register temp) {
- // Get the frame pointer for the calling frame.
- __ mov(temp, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
-
- // Skip the arguments adaptor frame if it exists.
- Label check_frame_marker;
- __ cmp(Operand(temp, StandardFrameConstants::kContextOffset),
- Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ j(not_equal, &check_frame_marker, Label::kNear);
- __ mov(temp, Operand(temp, StandardFrameConstants::kCallerFPOffset));
-
- // Check the marker in the calling frame.
- __ bind(&check_frame_marker);
- __ cmp(Operand(temp, StandardFrameConstants::kMarkerOffset),
- Immediate(Smi::FromInt(StackFrame::CONSTRUCT)));
-}
-
-
-void LCodeGen::EnsureSpaceForLazyDeopt() {
- if (!info()->IsStub()) {
- // Ensure that we have enough space after the previous lazy-bailout
- // instruction for patching the code here.
- int current_pc = masm()->pc_offset();
- int patch_size = Deoptimizer::patch_size();
- if (current_pc < last_lazy_deopt_pc_ + patch_size) {
- int padding_size = last_lazy_deopt_pc_ + patch_size - current_pc;
- __ Nop(padding_size);
- }
- }
- last_lazy_deopt_pc_ = masm()->pc_offset();
-}
-
-
-void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
- EnsureSpaceForLazyDeopt();
- ASSERT(instr->HasEnvironment());
- LEnvironment* env = instr->environment();
- RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
- safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
-}
-
-
-void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
- DeoptimizeIf(no_condition, instr->environment());
-}
-
-
-void LCodeGen::DoDummyUse(LDummyUse* instr) {
- // Nothing to see here, move on!
-}
-
-
-void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) {
- LOperand* obj = instr->object();
- LOperand* key = instr->key();
- __ push(ToOperand(obj));
- EmitPushTaggedOperand(key);
- ASSERT(instr->HasPointerMap());
- LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
- // Create safepoint generator that will also ensure enough space in the
- // reloc info for patching in deoptimization (since this is invoking a
- // builtin)
- SafepointGenerator safepoint_generator(
- this, pointers, Safepoint::kLazyDeopt);
- __ push(Immediate(Smi::FromInt(strict_mode_flag())));
- __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, safepoint_generator);
-}
-
-
-void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
- PushSafepointRegistersScope scope(this);
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
- RecordSafepointWithLazyDeopt(
- instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
- ASSERT(instr->HasEnvironment());
- LEnvironment* env = instr->environment();
- safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
-}
-
-
-void LCodeGen::DoStackCheck(LStackCheck* instr) {
- class DeferredStackCheck: public LDeferredCode {
- public:
- DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
- virtual LInstruction* instr() { return instr_; }
- private:
- LStackCheck* instr_;
- };
-
- ASSERT(instr->HasEnvironment());
- LEnvironment* env = instr->environment();
- // There is no LLazyBailout instruction for stack-checks. We have to
- // prepare for lazy deoptimization explicitly here.
- if (instr->hydrogen()->is_function_entry()) {
- // Perform stack overflow check.
- Label done;
- ExternalReference stack_limit =
- ExternalReference::address_of_stack_limit(isolate());
- __ cmp(esp, Operand::StaticVariable(stack_limit));
- __ j(above_equal, &done, Label::kNear);
-
- ASSERT(instr->context()->IsRegister());
- ASSERT(ToRegister(instr->context()).is(esi));
- StackCheckStub stub;
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- EnsureSpaceForLazyDeopt();
- __ bind(&done);
- RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
- safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
- } else {
- ASSERT(instr->hydrogen()->is_backwards_branch());
- // Perform stack overflow check if this goto needs it before jumping.
- DeferredStackCheck* deferred_stack_check =
- new(zone()) DeferredStackCheck(this, instr);
- ExternalReference stack_limit =
- ExternalReference::address_of_stack_limit(isolate());
- __ cmp(esp, Operand::StaticVariable(stack_limit));
- __ j(below, deferred_stack_check->entry());
- EnsureSpaceForLazyDeopt();
- __ bind(instr->done_label());
- deferred_stack_check->SetExit(instr->done_label());
- RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
- // Don't record a deoptimization index for the safepoint here.
- // This will be done explicitly when emitting call and the safepoint in
- // the deferred code.
- }
-}
-
-
-void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
- // This is a pseudo-instruction that ensures that the environment here is
- // properly registered for deoptimization and records the assembler's PC
- // offset.
- LEnvironment* environment = instr->environment();
- environment->SetSpilledRegisters(instr->SpilledRegisterArray(),
- instr->SpilledDoubleRegisterArray());
-
- // If the environment were already registered, we would have no way of
- // backpatching it with the spill slot operands.
- ASSERT(!environment->HasBeenRegistered());
- RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
- ASSERT(osr_pc_offset_ == -1);
- osr_pc_offset_ = masm()->pc_offset();
-}
-
-
-void LCodeGen::DoIn(LIn* instr) {
- LOperand* obj = instr->object();
- LOperand* key = instr->key();
- EmitPushTaggedOperand(key);
- EmitPushTaggedOperand(obj);
- ASSERT(instr->HasPointerMap());
- LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
- SafepointGenerator safepoint_generator(
- this, pointers, Safepoint::kLazyDeopt);
- __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION, safepoint_generator);
-}
-
-
-void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
- __ cmp(eax, isolate()->factory()->undefined_value());
- DeoptimizeIf(equal, instr->environment());
-
- __ cmp(eax, isolate()->factory()->null_value());
- DeoptimizeIf(equal, instr->environment());
-
- __ test(eax, Immediate(kSmiTagMask));
- DeoptimizeIf(zero, instr->environment());
-
- STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
- __ CmpObjectType(eax, LAST_JS_PROXY_TYPE, ecx);
- DeoptimizeIf(below_equal, instr->environment());
-
- Label use_cache, call_runtime;
- __ CheckEnumCache(&call_runtime);
-
- __ mov(eax, FieldOperand(eax, HeapObject::kMapOffset));
- __ jmp(&use_cache, Label::kNear);
-
- // Get the set of properties to enumerate.
- __ bind(&call_runtime);
- __ push(eax);
- CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
-
- __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
- isolate()->factory()->meta_map());
- DeoptimizeIf(not_equal, instr->environment());
- __ bind(&use_cache);
-}
-
-
-void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
- Register map = ToRegister(instr->map());
- Register result = ToRegister(instr->result());
- Label load_cache, done;
- __ EnumLength(result, map);
- __ cmp(result, Immediate(Smi::FromInt(0)));
- __ j(not_equal, &load_cache);
- __ mov(result, isolate()->factory()->empty_fixed_array());
- __ jmp(&done);
-
- __ bind(&load_cache);
- __ LoadInstanceDescriptors(map, result);
- __ mov(result,
- FieldOperand(result, DescriptorArray::kEnumCacheOffset));
- __ mov(result,
- FieldOperand(result, FixedArray::SizeFor(instr->idx())));
- __ bind(&done);
- __ test(result, result);
- DeoptimizeIf(equal, instr->environment());
-}
-
-
-void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
- Register object = ToRegister(instr->value());
- __ cmp(ToRegister(instr->map()),
- FieldOperand(object, HeapObject::kMapOffset));
- DeoptimizeIf(not_equal, instr->environment());
-}
-
-
-void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
- Register object = ToRegister(instr->object());
- Register index = ToRegister(instr->index());
-
- Label out_of_object, done;
- __ cmp(index, Immediate(0));
- __ j(less, &out_of_object);
- __ mov(object, FieldOperand(object,
- index,
- times_half_pointer_size,
- JSObject::kHeaderSize));
- __ jmp(&done, Label::kNear);
-
- __ bind(&out_of_object);
- __ mov(object, FieldOperand(object, JSObject::kPropertiesOffset));
- __ neg(index);
- // Index is now equal to out of object property index plus 1.
- __ mov(object, FieldOperand(object,
- index,
- times_half_pointer_size,
- FixedArray::kHeaderSize - kPointerSize));
- __ bind(&done);
-}
-
-
-#undef __
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_IA32
diff --git a/src/3rdparty/v8/src/ia32/lithium-codegen-ia32.h b/src/3rdparty/v8/src/ia32/lithium-codegen-ia32.h
deleted file mode 100644
index ab6779a..0000000
--- a/src/3rdparty/v8/src/ia32/lithium-codegen-ia32.h
+++ /dev/null
@@ -1,475 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_IA32_LITHIUM_CODEGEN_IA32_H_
-#define V8_IA32_LITHIUM_CODEGEN_IA32_H_
-
-#include "ia32/lithium-ia32.h"
-
-#include "checks.h"
-#include "deoptimizer.h"
-#include "safepoint-table.h"
-#include "scopes.h"
-#include "ia32/lithium-gap-resolver-ia32.h"
-
-namespace v8 {
-namespace internal {
-
-// Forward declarations.
-class LDeferredCode;
-class LGapNode;
-class SafepointGenerator;
-
-class LCodeGen BASE_EMBEDDED {
- public:
- LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
- : zone_(info->zone()),
- chunk_(static_cast<LPlatformChunk*>(chunk)),
- masm_(assembler),
- info_(info),
- current_block_(-1),
- current_instruction_(-1),
- instructions_(chunk->instructions()),
- deoptimizations_(4, info->zone()),
- jump_table_(4, info->zone()),
- deoptimization_literals_(8, info->zone()),
- prototype_maps_(0, info->zone()),
- inlined_function_count_(0),
- scope_(info->scope()),
- status_(UNUSED),
- translations_(info->zone()),
- deferred_(8, info->zone()),
- dynamic_frame_alignment_(false),
- support_aligned_spilled_doubles_(false),
- osr_pc_offset_(-1),
- last_lazy_deopt_pc_(0),
- frame_is_built_(false),
- safepoints_(info->zone()),
- resolver_(this),
- expected_safepoint_kind_(Safepoint::kSimple) {
- PopulateDeoptimizationLiteralsWithInlinedFunctions();
- }
-
- // Simple accessors.
- MacroAssembler* masm() const { return masm_; }
- CompilationInfo* info() const { return info_; }
- Isolate* isolate() const { return info_->isolate(); }
- Factory* factory() const { return isolate()->factory(); }
- Heap* heap() const { return isolate()->heap(); }
- Zone* zone() const { return zone_; }
-
- bool NeedsEagerFrame() const {
- return GetStackSlotCount() > 0 ||
- info()->is_non_deferred_calling() ||
- !info()->IsStub();
- }
- bool NeedsDeferredFrame() const {
- return !NeedsEagerFrame() && info()->is_deferred_calling();
- }
-
- // Support for converting LOperands to assembler types.
- Operand ToOperand(LOperand* op) const;
- Register ToRegister(LOperand* op) const;
- XMMRegister ToDoubleRegister(LOperand* op) const;
- bool IsX87TopOfStack(LOperand* op) const;
-
- bool IsInteger32(LConstantOperand* op) const;
- Immediate ToInteger32Immediate(LOperand* op) const {
- return Immediate(ToInteger32(LConstantOperand::cast(op)));
- }
-
- Handle<Object> ToHandle(LConstantOperand* op) const;
-
- // A utility for instructions that return floating point values on X87.
- void HandleX87FPReturnValue(LInstruction* instr);
-
- // The operand denoting the second word (the one with a higher address) of
- // a double stack slot.
- Operand HighOperand(LOperand* op);
-
- // Try to generate code for the entire chunk, but it may fail if the
- // chunk contains constructs we cannot handle. Returns true if the
- // code generation attempt succeeded.
- bool GenerateCode();
-
- // Finish the code by setting stack height, safepoint, and bailout
- // information on it.
- void FinishCode(Handle<Code> code);
-
- // Deferred code support.
- void DoDeferredNumberTagD(LNumberTagD* instr);
-
- enum IntegerSignedness { SIGNED_INT32, UNSIGNED_INT32 };
- void DoDeferredNumberTagI(LInstruction* instr,
- LOperand* value,
- IntegerSignedness signedness);
-
- void DoDeferredTaggedToI(LTaggedToI* instr);
- void DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr);
- void DoDeferredStackCheck(LStackCheck* instr);
- void DoDeferredRandom(LRandom* instr);
- void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
- void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
- void DoDeferredAllocateObject(LAllocateObject* instr);
- void DoDeferredAllocate(LAllocate* instr);
- void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
- Label* map_check);
-
- void DoCheckMapCommon(Register reg, Handle<Map> map,
- CompareMapMode mode, LInstruction* instr);
-
- // Parallel move support.
- void DoParallelMove(LParallelMove* move);
- void DoGap(LGap* instr);
-
- // Emit frame translation commands for an environment.
- void WriteTranslation(LEnvironment* environment,
- Translation* translation,
- int* arguments_index,
- int* arguments_count);
-
- void EnsureRelocSpaceForDeoptimization();
-
- // Declare methods that deal with the individual node types.
-#define DECLARE_DO(type) void Do##type(L##type* node);
- LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
-#undef DECLARE_DO
-
- private:
- enum Status {
- UNUSED,
- GENERATING,
- DONE,
- ABORTED
- };
-
- bool is_unused() const { return status_ == UNUSED; }
- bool is_generating() const { return status_ == GENERATING; }
- bool is_done() const { return status_ == DONE; }
- bool is_aborted() const { return status_ == ABORTED; }
-
- StrictModeFlag strict_mode_flag() const {
- return info()->is_classic_mode() ? kNonStrictMode : kStrictMode;
- }
-
- LPlatformChunk* chunk() const { return chunk_; }
- Scope* scope() const { return scope_; }
- HGraph* graph() const { return chunk_->graph(); }
-
- int GetNextEmittedBlock(int block);
-
- void EmitClassOfTest(Label* if_true,
- Label* if_false,
- Handle<String> class_name,
- Register input,
- Register temporary,
- Register temporary2);
-
- int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
- int GetParameterCount() const { return info()->num_parameters(); }
-
- void Abort(const char* reason);
- void Comment(const char* format, ...);
-
- void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
-
- // Code generation passes. Returns true if code generation should
- // continue.
- bool GeneratePrologue();
- bool GenerateBody();
- bool GenerateDeferredCode();
- bool GenerateJumpTable();
- bool GenerateSafepointTable();
-
- enum SafepointMode {
- RECORD_SIMPLE_SAFEPOINT,
- RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS
- };
-
- void CallCode(Handle<Code> code,
- RelocInfo::Mode mode,
- LInstruction* instr);
-
- void CallCodeGeneric(Handle<Code> code,
- RelocInfo::Mode mode,
- LInstruction* instr,
- SafepointMode safepoint_mode);
-
- void CallRuntime(const Runtime::Function* fun,
- int argc,
- LInstruction* instr);
-
- void CallRuntime(Runtime::FunctionId id,
- int argc,
- LInstruction* instr) {
- const Runtime::Function* function = Runtime::FunctionForId(id);
- CallRuntime(function, argc, instr);
- }
-
- void CallRuntimeFromDeferred(Runtime::FunctionId id,
- int argc,
- LInstruction* instr,
- LOperand* context);
-
- void LoadContextFromDeferred(LOperand* context);
-
- enum EDIState {
- EDI_UNINITIALIZED,
- EDI_CONTAINS_TARGET
- };
-
- // Generate a direct call to a known function. Expects the function
- // to be in edi.
- void CallKnownFunction(Handle<JSFunction> function,
- int arity,
- LInstruction* instr,
- CallKind call_kind,
- EDIState edi_state);
-
- void RecordSafepointWithLazyDeopt(LInstruction* instr,
- SafepointMode safepoint_mode);
-
- void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
- Safepoint::DeoptMode mode);
- void DeoptimizeIf(Condition cc, LEnvironment* environment);
-
- void AddToTranslation(Translation* translation,
- LOperand* op,
- bool is_tagged,
- bool is_uint32,
- bool arguments_known,
- int arguments_index,
- int arguments_count);
- void RegisterDependentCodeForEmbeddedMaps(Handle<Code> code);
- void PopulateDeoptimizationData(Handle<Code> code);
- int DefineDeoptimizationLiteral(Handle<Object> literal);
-
- void PopulateDeoptimizationLiteralsWithInlinedFunctions();
-
- Register ToRegister(int index) const;
- XMMRegister ToDoubleRegister(int index) const;
- int ToInteger32(LConstantOperand* op) const;
-
- double ToDouble(LConstantOperand* op) const;
- Operand BuildFastArrayOperand(LOperand* elements_pointer,
- LOperand* key,
- Representation key_representation,
- ElementsKind elements_kind,
- uint32_t offset,
- uint32_t additional_index = 0);
-
- // Specific math operations - used from DoUnaryMathOperation.
- void EmitIntegerMathAbs(LUnaryMathOperation* instr);
- void DoMathAbs(LUnaryMathOperation* instr);
- void DoMathFloor(LUnaryMathOperation* instr);
- void DoMathSqrt(LUnaryMathOperation* instr);
- void DoMathLog(LUnaryMathOperation* instr);
- void DoMathTan(LUnaryMathOperation* instr);
- void DoMathCos(LUnaryMathOperation* instr);
- void DoMathSin(LUnaryMathOperation* instr);
-
- // Support for recording safepoint and position information.
- void RecordSafepoint(LPointerMap* pointers,
- Safepoint::Kind kind,
- int arguments,
- Safepoint::DeoptMode mode);
- void RecordSafepoint(LPointerMap* pointers, Safepoint::DeoptMode mode);
- void RecordSafepoint(Safepoint::DeoptMode mode);
- void RecordSafepointWithRegisters(LPointerMap* pointers,
- int arguments,
- Safepoint::DeoptMode mode);
- void RecordPosition(int position);
-
- static Condition TokenToCondition(Token::Value op, bool is_unsigned);
- void EmitGoto(int block);
- void EmitBranch(int left_block, int right_block, Condition cc);
- void EmitNumberUntagD(
- Register input,
- Register temp,
- XMMRegister result,
- bool deoptimize_on_undefined,
- bool deoptimize_on_minus_zero,
- LEnvironment* env,
- NumberUntagDMode mode = NUMBER_CANDIDATE_IS_ANY_TAGGED);
-
- // Emits optimized code for typeof x == "y". Modifies input register.
- // Returns the condition on which a final split to
- // true and false label should be made, to optimize fallthrough.
- Condition EmitTypeofIs(Label* true_label,
- Label* false_label,
- Register input,
- Handle<String> type_name);
-
- // Emits optimized code for %_IsObject(x). Preserves input register.
- // Returns the condition on which a final split to
- // true and false label should be made, to optimize fallthrough.
- Condition EmitIsObject(Register input,
- Register temp1,
- Label* is_not_object,
- Label* is_object);
-
- // Emits optimized code for %_IsString(x). Preserves input register.
- // Returns the condition on which a final split to
- // true and false label should be made, to optimize fallthrough.
- Condition EmitIsString(Register input,
- Register temp1,
- Label* is_not_string);
-
- // Emits optimized code for %_IsConstructCall().
- // Caller should branch on equal condition.
- void EmitIsConstructCall(Register temp);
-
- void EmitLoadFieldOrConstantFunction(Register result,
- Register object,
- Handle<Map> type,
- Handle<String> name,
- LEnvironment* env);
-
- // Emits optimized code to deep-copy the contents of statically known
- // object graphs (e.g. object literal boilerplate).
- void EmitDeepCopy(Handle<JSObject> object,
- Register result,
- Register source,
- int* offset,
- AllocationSiteMode mode);
-
- void EnsureSpaceForLazyDeopt();
- void DoLoadKeyedExternalArray(LLoadKeyed* instr);
- void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr);
- void DoLoadKeyedFixedArray(LLoadKeyed* instr);
- void DoStoreKeyedExternalArray(LStoreKeyed* instr);
- void DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr);
- void DoStoreKeyedFixedArray(LStoreKeyed* instr);
-
- // Emits code for pushing either a tagged constant, a (non-double)
- // register, or a stack slot operand.
- void EmitPushTaggedOperand(LOperand* operand);
-
- Zone* zone_;
- LPlatformChunk* const chunk_;
- MacroAssembler* const masm_;
- CompilationInfo* const info_;
-
- struct JumpTableEntry {
- inline JumpTableEntry(Address entry, bool frame, bool is_lazy)
- : label(),
- address(entry),
- needs_frame(frame),
- is_lazy_deopt(is_lazy) { }
- Label label;
- Address address;
- bool needs_frame;
- bool is_lazy_deopt;
- };
-
- int current_block_;
- int current_instruction_;
- const ZoneList<LInstruction*>* instructions_;
- ZoneList<LEnvironment*> deoptimizations_;
- ZoneList<JumpTableEntry> jump_table_;
- ZoneList<Handle<Object> > deoptimization_literals_;
- ZoneList<Handle<Map> > prototype_maps_;
- int inlined_function_count_;
- Scope* const scope_;
- Status status_;
- TranslationBuffer translations_;
- ZoneList<LDeferredCode*> deferred_;
- bool dynamic_frame_alignment_;
- bool support_aligned_spilled_doubles_;
- int osr_pc_offset_;
- int last_lazy_deopt_pc_;
- bool frame_is_built_;
-
- // Builder that keeps track of safepoints in the code. The table
- // itself is emitted at the end of the generated code.
- SafepointTableBuilder safepoints_;
-
- // Compiler from a set of parallel moves to a sequential list of moves.
- LGapResolver resolver_;
-
- Safepoint::Kind expected_safepoint_kind_;
-
- class PushSafepointRegistersScope BASE_EMBEDDED {
- public:
- explicit PushSafepointRegistersScope(LCodeGen* codegen)
- : codegen_(codegen) {
- ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
- codegen_->masm_->PushSafepointRegisters();
- codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters;
- ASSERT(codegen_->info()->is_calling());
- }
-
- ~PushSafepointRegistersScope() {
- ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kWithRegisters);
- codegen_->masm_->PopSafepointRegisters();
- codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
- }
-
- private:
- LCodeGen* codegen_;
- };
-
- friend class LDeferredCode;
- friend class LEnvironment;
- friend class SafepointGenerator;
- DISALLOW_COPY_AND_ASSIGN(LCodeGen);
-};
-
-
-class LDeferredCode: public ZoneObject {
- public:
- explicit LDeferredCode(LCodeGen* codegen)
- : codegen_(codegen),
- external_exit_(NULL),
- instruction_index_(codegen->current_instruction_) {
- codegen->AddDeferredCode(this);
- }
-
- virtual ~LDeferredCode() { }
- virtual void Generate() = 0;
- virtual LInstruction* instr() = 0;
-
- void SetExit(Label* exit) { external_exit_ = exit; }
- Label* entry() { return &entry_; }
- Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; }
- int instruction_index() const { return instruction_index_; }
-
- protected:
- LCodeGen* codegen() const { return codegen_; }
- MacroAssembler* masm() const { return codegen_->masm(); }
-
- private:
- LCodeGen* codegen_;
- Label entry_;
- Label exit_;
- Label* external_exit_;
- int instruction_index_;
-};
-
-} } // namespace v8::internal
-
-#endif // V8_IA32_LITHIUM_CODEGEN_IA32_H_
diff --git a/src/3rdparty/v8/src/ia32/lithium-gap-resolver-ia32.cc b/src/3rdparty/v8/src/ia32/lithium-gap-resolver-ia32.cc
deleted file mode 100644
index 6fee7fe..0000000
--- a/src/3rdparty/v8/src/ia32/lithium-gap-resolver-ia32.cc
+++ /dev/null
@@ -1,494 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_IA32)
-
-#include "ia32/lithium-gap-resolver-ia32.h"
-#include "ia32/lithium-codegen-ia32.h"
-
-namespace v8 {
-namespace internal {
-
-LGapResolver::LGapResolver(LCodeGen* owner)
- : cgen_(owner),
- moves_(32, owner->zone()),
- source_uses_(),
- destination_uses_(),
- spilled_register_(-1) {}
-
-
-void LGapResolver::Resolve(LParallelMove* parallel_move) {
- ASSERT(HasBeenReset());
- // Build up a worklist of moves.
- BuildInitialMoveList(parallel_move);
-
- for (int i = 0; i < moves_.length(); ++i) {
- LMoveOperands move = moves_[i];
- // Skip constants to perform them last. They don't block other moves
- // and skipping such moves with register destinations keeps those
- // registers free for the whole algorithm.
- if (!move.IsEliminated() && !move.source()->IsConstantOperand()) {
- PerformMove(i);
- }
- }
-
- // Perform the moves with constant sources.
- for (int i = 0; i < moves_.length(); ++i) {
- if (!moves_[i].IsEliminated()) {
- ASSERT(moves_[i].source()->IsConstantOperand());
- EmitMove(i);
- }
- }
-
- Finish();
- ASSERT(HasBeenReset());
-}
-
-
-void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) {
- // Perform a linear sweep of the moves to add them to the initial list of
- // moves to perform, ignoring any move that is redundant (the source is
- // the same as the destination, the destination is ignored and
- // unallocated, or the move was already eliminated).
- const ZoneList<LMoveOperands>* moves = parallel_move->move_operands();
- for (int i = 0; i < moves->length(); ++i) {
- LMoveOperands move = moves->at(i);
- if (!move.IsRedundant()) AddMove(move);
- }
- Verify();
-}
-
-
-void LGapResolver::PerformMove(int index) {
- // Each call to this function performs a move and deletes it from the move
- // graph. We first recursively perform any move blocking this one. We
- // mark a move as "pending" on entry to PerformMove in order to detect
- // cycles in the move graph. We use operand swaps to resolve cycles,
- // which means that a call to PerformMove could change any source operand
- // in the move graph.
-
- ASSERT(!moves_[index].IsPending());
- ASSERT(!moves_[index].IsRedundant());
-
- // Clear this move's destination to indicate a pending move. The actual
- // destination is saved on the side.
- ASSERT(moves_[index].source() != NULL); // Or else it will look eliminated.
- LOperand* destination = moves_[index].destination();
- moves_[index].set_destination(NULL);
-
- // Perform a depth-first traversal of the move graph to resolve
- // dependencies. Any unperformed, unpending move with a source the same
- // as this one's destination blocks this one so recursively perform all
- // such moves.
- for (int i = 0; i < moves_.length(); ++i) {
- LMoveOperands other_move = moves_[i];
- if (other_move.Blocks(destination) && !other_move.IsPending()) {
- // Though PerformMove can change any source operand in the move graph,
- // this call cannot create a blocking move via a swap (this loop does
- // not miss any). Assume there is a non-blocking move with source A
- // and this move is blocked on source B and there is a swap of A and
- // B. Then A and B must be involved in the same cycle (or they would
- // not be swapped). Since this move's destination is B and there is
- // only a single incoming edge to an operand, this move must also be
- // involved in the same cycle. In that case, the blocking move will
- // be created but will be "pending" when we return from PerformMove.
- PerformMove(i);
- }
- }
-
- // We are about to resolve this move and don't need it marked as
- // pending, so restore its destination.
- moves_[index].set_destination(destination);
-
- // This move's source may have changed due to swaps to resolve cycles and
- // so it may now be the last move in the cycle. If so remove it.
- if (moves_[index].source()->Equals(destination)) {
- RemoveMove(index);
- return;
- }
-
- // The move may be blocked on a (at most one) pending move, in which case
- // we have a cycle. Search for such a blocking move and perform a swap to
- // resolve it.
- for (int i = 0; i < moves_.length(); ++i) {
- LMoveOperands other_move = moves_[i];
- if (other_move.Blocks(destination)) {
- ASSERT(other_move.IsPending());
- EmitSwap(index);
- return;
- }
- }
-
- // This move is not blocked.
- EmitMove(index);
-}
-
-
-void LGapResolver::AddMove(LMoveOperands move) {
- LOperand* source = move.source();
- if (source->IsRegister()) ++source_uses_[source->index()];
-
- LOperand* destination = move.destination();
- if (destination->IsRegister()) ++destination_uses_[destination->index()];
-
- moves_.Add(move, cgen_->zone());
-}
-
-
-void LGapResolver::RemoveMove(int index) {
- LOperand* source = moves_[index].source();
- if (source->IsRegister()) {
- --source_uses_[source->index()];
- ASSERT(source_uses_[source->index()] >= 0);
- }
-
- LOperand* destination = moves_[index].destination();
- if (destination->IsRegister()) {
- --destination_uses_[destination->index()];
- ASSERT(destination_uses_[destination->index()] >= 0);
- }
-
- moves_[index].Eliminate();
-}
-
-
-int LGapResolver::CountSourceUses(LOperand* operand) {
- int count = 0;
- for (int i = 0; i < moves_.length(); ++i) {
- if (!moves_[i].IsEliminated() && moves_[i].source()->Equals(operand)) {
- ++count;
- }
- }
- return count;
-}
-
-
-Register LGapResolver::GetFreeRegisterNot(Register reg) {
- int skip_index = reg.is(no_reg) ? -1 : Register::ToAllocationIndex(reg);
- for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) {
- if (source_uses_[i] == 0 && destination_uses_[i] > 0 && i != skip_index) {
- return Register::FromAllocationIndex(i);
- }
- }
- return no_reg;
-}
-
-
-bool LGapResolver::HasBeenReset() {
- if (!moves_.is_empty()) return false;
- if (spilled_register_ >= 0) return false;
-
- for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) {
- if (source_uses_[i] != 0) return false;
- if (destination_uses_[i] != 0) return false;
- }
- return true;
-}
-
-
-void LGapResolver::Verify() {
-#ifdef ENABLE_SLOW_ASSERTS
- // No operand should be the destination for more than one move.
- for (int i = 0; i < moves_.length(); ++i) {
- LOperand* destination = moves_[i].destination();
- for (int j = i + 1; j < moves_.length(); ++j) {
- SLOW_ASSERT(!destination->Equals(moves_[j].destination()));
- }
- }
-#endif
-}
-
-
-#define __ ACCESS_MASM(cgen_->masm())
-
-void LGapResolver::Finish() {
- if (spilled_register_ >= 0) {
- __ pop(Register::FromAllocationIndex(spilled_register_));
- spilled_register_ = -1;
- }
- moves_.Rewind(0);
-}
-
-
-void LGapResolver::EnsureRestored(LOperand* operand) {
- if (operand->IsRegister() && operand->index() == spilled_register_) {
- __ pop(Register::FromAllocationIndex(spilled_register_));
- spilled_register_ = -1;
- }
-}
-
-
-Register LGapResolver::EnsureTempRegister() {
- // 1. We may have already spilled to create a temp register.
- if (spilled_register_ >= 0) {
- return Register::FromAllocationIndex(spilled_register_);
- }
-
- // 2. We may have a free register that we can use without spilling.
- Register free = GetFreeRegisterNot(no_reg);
- if (!free.is(no_reg)) return free;
-
- // 3. Prefer to spill a register that is not used in any remaining move
- // because it will not need to be restored until the end.
- for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) {
- if (source_uses_[i] == 0 && destination_uses_[i] == 0) {
- Register scratch = Register::FromAllocationIndex(i);
- __ push(scratch);
- spilled_register_ = i;
- return scratch;
- }
- }
-
- // 4. Use an arbitrary register. Register 0 is as arbitrary as any other.
- Register scratch = Register::FromAllocationIndex(0);
- __ push(scratch);
- spilled_register_ = 0;
- return scratch;
-}
-
-
-void LGapResolver::EmitMove(int index) {
- LOperand* source = moves_[index].source();
- LOperand* destination = moves_[index].destination();
- EnsureRestored(source);
- EnsureRestored(destination);
-
- // Dispatch on the source and destination operand kinds. Not all
- // combinations are possible.
- if (source->IsRegister()) {
- ASSERT(destination->IsRegister() || destination->IsStackSlot());
- Register src = cgen_->ToRegister(source);
- Operand dst = cgen_->ToOperand(destination);
- __ mov(dst, src);
-
- } else if (source->IsStackSlot()) {
- ASSERT(destination->IsRegister() || destination->IsStackSlot());
- Operand src = cgen_->ToOperand(source);
- if (destination->IsRegister()) {
- Register dst = cgen_->ToRegister(destination);
- __ mov(dst, src);
- } else {
- // Spill on demand to use a temporary register for memory-to-memory
- // moves.
- Register tmp = EnsureTempRegister();
- Operand dst = cgen_->ToOperand(destination);
- __ mov(tmp, src);
- __ mov(dst, tmp);
- }
-
- } else if (source->IsConstantOperand()) {
- LConstantOperand* constant_source = LConstantOperand::cast(source);
- if (destination->IsRegister()) {
- Register dst = cgen_->ToRegister(destination);
- if (cgen_->IsInteger32(constant_source)) {
- __ Set(dst, cgen_->ToInteger32Immediate(constant_source));
- } else {
- __ LoadObject(dst, cgen_->ToHandle(constant_source));
- }
- } else {
- ASSERT(destination->IsStackSlot());
- Operand dst = cgen_->ToOperand(destination);
- if (cgen_->IsInteger32(constant_source)) {
- __ Set(dst, cgen_->ToInteger32Immediate(constant_source));
- } else {
- Register tmp = EnsureTempRegister();
- __ LoadObject(tmp, cgen_->ToHandle(constant_source));
- __ mov(dst, tmp);
- }
- }
-
- } else if (source->IsDoubleRegister()) {
- CpuFeatures::Scope scope(SSE2);
- XMMRegister src = cgen_->ToDoubleRegister(source);
- if (destination->IsDoubleRegister()) {
- XMMRegister dst = cgen_->ToDoubleRegister(destination);
- __ movaps(dst, src);
- } else {
- ASSERT(destination->IsDoubleStackSlot());
- Operand dst = cgen_->ToOperand(destination);
- __ movdbl(dst, src);
- }
- } else if (source->IsDoubleStackSlot()) {
- CpuFeatures::Scope scope(SSE2);
- ASSERT(destination->IsDoubleRegister() ||
- destination->IsDoubleStackSlot());
- Operand src = cgen_->ToOperand(source);
- if (destination->IsDoubleRegister()) {
- XMMRegister dst = cgen_->ToDoubleRegister(destination);
- __ movdbl(dst, src);
- } else {
- // We rely on having xmm0 available as a fixed scratch register.
- Operand dst = cgen_->ToOperand(destination);
- __ movdbl(xmm0, src);
- __ movdbl(dst, xmm0);
- }
- } else {
- UNREACHABLE();
- }
-
- RemoveMove(index);
-}
-
-
-void LGapResolver::EmitSwap(int index) {
- LOperand* source = moves_[index].source();
- LOperand* destination = moves_[index].destination();
- EnsureRestored(source);
- EnsureRestored(destination);
-
- // Dispatch on the source and destination operand kinds. Not all
- // combinations are possible.
- if (source->IsRegister() && destination->IsRegister()) {
- // Register-register.
- Register src = cgen_->ToRegister(source);
- Register dst = cgen_->ToRegister(destination);
- __ xchg(dst, src);
-
- } else if ((source->IsRegister() && destination->IsStackSlot()) ||
- (source->IsStackSlot() && destination->IsRegister())) {
- // Register-memory. Use a free register as a temp if possible. Do not
- // spill on demand because the simple spill implementation cannot avoid
- // spilling src at this point.
- Register tmp = GetFreeRegisterNot(no_reg);
- Register reg =
- cgen_->ToRegister(source->IsRegister() ? source : destination);
- Operand mem =
- cgen_->ToOperand(source->IsRegister() ? destination : source);
- if (tmp.is(no_reg)) {
- __ xor_(reg, mem);
- __ xor_(mem, reg);
- __ xor_(reg, mem);
- } else {
- __ mov(tmp, mem);
- __ mov(mem, reg);
- __ mov(reg, tmp);
- }
-
- } else if (source->IsStackSlot() && destination->IsStackSlot()) {
- // Memory-memory. Spill on demand to use a temporary. If there is a
- // free register after that, use it as a second temporary.
- Register tmp0 = EnsureTempRegister();
- Register tmp1 = GetFreeRegisterNot(tmp0);
- Operand src = cgen_->ToOperand(source);
- Operand dst = cgen_->ToOperand(destination);
- if (tmp1.is(no_reg)) {
- // Only one temp register available to us.
- __ mov(tmp0, dst);
- __ xor_(tmp0, src);
- __ xor_(src, tmp0);
- __ xor_(tmp0, src);
- __ mov(dst, tmp0);
- } else {
- __ mov(tmp0, dst);
- __ mov(tmp1, src);
- __ mov(dst, tmp1);
- __ mov(src, tmp0);
- }
- } else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
- CpuFeatures::Scope scope(SSE2);
- // XMM register-register swap. We rely on having xmm0
- // available as a fixed scratch register.
- XMMRegister src = cgen_->ToDoubleRegister(source);
- XMMRegister dst = cgen_->ToDoubleRegister(destination);
- __ movaps(xmm0, src);
- __ movaps(src, dst);
- __ movaps(dst, xmm0);
-
- } else if (source->IsDoubleRegister() || destination->IsDoubleRegister()) {
- CpuFeatures::Scope scope(SSE2);
- // XMM register-memory swap. We rely on having xmm0
- // available as a fixed scratch register.
- ASSERT(source->IsDoubleStackSlot() || destination->IsDoubleStackSlot());
- XMMRegister reg = cgen_->ToDoubleRegister(source->IsDoubleRegister()
- ? source
- : destination);
- Operand other =
- cgen_->ToOperand(source->IsDoubleRegister() ? destination : source);
- __ movdbl(xmm0, other);
- __ movdbl(other, reg);
- __ movdbl(reg, Operand(xmm0));
-
- } else if (source->IsDoubleStackSlot() && destination->IsDoubleStackSlot()) {
- CpuFeatures::Scope scope(SSE2);
- // Double-width memory-to-memory. Spill on demand to use a general
- // purpose temporary register and also rely on having xmm0 available as
- // a fixed scratch register.
- Register tmp = EnsureTempRegister();
- Operand src0 = cgen_->ToOperand(source);
- Operand src1 = cgen_->HighOperand(source);
- Operand dst0 = cgen_->ToOperand(destination);
- Operand dst1 = cgen_->HighOperand(destination);
- __ movdbl(xmm0, dst0); // Save destination in xmm0.
- __ mov(tmp, src0); // Then use tmp to copy source to destination.
- __ mov(dst0, tmp);
- __ mov(tmp, src1);
- __ mov(dst1, tmp);
- __ movdbl(src0, xmm0);
-
- } else {
- // No other combinations are possible.
- UNREACHABLE();
- }
-
- // The swap of source and destination has executed a move from source to
- // destination.
- RemoveMove(index);
-
- // Any unperformed (including pending) move with a source of either
- // this move's source or destination needs to have their source
- // changed to reflect the state of affairs after the swap.
- for (int i = 0; i < moves_.length(); ++i) {
- LMoveOperands other_move = moves_[i];
- if (other_move.Blocks(source)) {
- moves_[i].set_source(destination);
- } else if (other_move.Blocks(destination)) {
- moves_[i].set_source(source);
- }
- }
-
- // In addition to swapping the actual uses as sources, we need to update
- // the use counts.
- if (source->IsRegister() && destination->IsRegister()) {
- int temp = source_uses_[source->index()];
- source_uses_[source->index()] = source_uses_[destination->index()];
- source_uses_[destination->index()] = temp;
- } else if (source->IsRegister()) {
- // We don't have use counts for non-register operands like destination.
- // Compute those counts now.
- source_uses_[source->index()] = CountSourceUses(source);
- } else if (destination->IsRegister()) {
- source_uses_[destination->index()] = CountSourceUses(destination);
- }
-}
-
-#undef __
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_IA32
diff --git a/src/3rdparty/v8/src/ia32/lithium-gap-resolver-ia32.h b/src/3rdparty/v8/src/ia32/lithium-gap-resolver-ia32.h
deleted file mode 100644
index 3a58f58..0000000
--- a/src/3rdparty/v8/src/ia32/lithium-gap-resolver-ia32.h
+++ /dev/null
@@ -1,110 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_IA32_LITHIUM_GAP_RESOLVER_IA32_H_
-#define V8_IA32_LITHIUM_GAP_RESOLVER_IA32_H_
-
-#include "v8.h"
-
-#include "lithium.h"
-
-namespace v8 {
-namespace internal {
-
-class LCodeGen;
-class LGapResolver;
-
-class LGapResolver BASE_EMBEDDED {
- public:
- explicit LGapResolver(LCodeGen* owner);
-
- // Resolve a set of parallel moves, emitting assembler instructions.
- void Resolve(LParallelMove* parallel_move);
-
- private:
- // Build the initial list of moves.
- void BuildInitialMoveList(LParallelMove* parallel_move);
-
- // Perform the move at the moves_ index in question (possibly requiring
- // other moves to satisfy dependencies).
- void PerformMove(int index);
-
- // Emit any code necessary at the end of a gap move.
- void Finish();
-
- // Add or delete a move from the move graph without emitting any code.
- // Used to build up the graph and remove trivial moves.
- void AddMove(LMoveOperands move);
- void RemoveMove(int index);
-
- // Report the count of uses of operand as a source in a not-yet-performed
- // move. Used to rebuild use counts.
- int CountSourceUses(LOperand* operand);
-
- // Emit a move and remove it from the move graph.
- void EmitMove(int index);
-
- // Execute a move by emitting a swap of two operands. The move from
- // source to destination is removed from the move graph.
- void EmitSwap(int index);
-
- // Ensure that the given operand is not spilled.
- void EnsureRestored(LOperand* operand);
-
- // Return a register that can be used as a temp register, spilling
- // something if necessary.
- Register EnsureTempRegister();
-
- // Return a known free register different from the given one (which could
- // be no_reg---returning any free register), or no_reg if there is no such
- // register.
- Register GetFreeRegisterNot(Register reg);
-
- // Verify that the state is the initial one, ready to resolve a single
- // parallel move.
- bool HasBeenReset();
-
- // Verify the move list before performing moves.
- void Verify();
-
- LCodeGen* cgen_;
-
- // List of moves not yet resolved.
- ZoneList<LMoveOperands> moves_;
-
- // Source and destination use counts for the general purpose registers.
- int source_uses_[Register::kMaxNumAllocatableRegisters];
- int destination_uses_[Register::kMaxNumAllocatableRegisters];
-
- // If we had to spill on demand, the currently spilled register's
- // allocation index.
- int spilled_register_;
-};
-
-} } // namespace v8::internal
-
-#endif // V8_IA32_LITHIUM_GAP_RESOLVER_IA32_H_
diff --git a/src/3rdparty/v8/src/ia32/lithium-ia32.cc b/src/3rdparty/v8/src/ia32/lithium-ia32.cc
deleted file mode 100644
index 910219d..0000000
--- a/src/3rdparty/v8/src/ia32/lithium-ia32.cc
+++ /dev/null
@@ -1,2604 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_IA32)
-
-#include "lithium-allocator-inl.h"
-#include "ia32/lithium-ia32.h"
-#include "ia32/lithium-codegen-ia32.h"
-
-namespace v8 {
-namespace internal {
-
-#define DEFINE_COMPILE(type) \
- void L##type::CompileToNative(LCodeGen* generator) { \
- generator->Do##type(this); \
- }
-LITHIUM_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE)
-#undef DEFINE_COMPILE
-
-LOsrEntry::LOsrEntry() {
- for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) {
- register_spills_[i] = NULL;
- }
- for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); ++i) {
- double_register_spills_[i] = NULL;
- }
-}
-
-
-void LOsrEntry::MarkSpilledRegister(int allocation_index,
- LOperand* spill_operand) {
- ASSERT(spill_operand->IsStackSlot());
- ASSERT(register_spills_[allocation_index] == NULL);
- register_spills_[allocation_index] = spill_operand;
-}
-
-
-void LOsrEntry::MarkSpilledDoubleRegister(int allocation_index,
- LOperand* spill_operand) {
- ASSERT(spill_operand->IsDoubleStackSlot());
- ASSERT(double_register_spills_[allocation_index] == NULL);
- double_register_spills_[allocation_index] = spill_operand;
-}
-
-
-#ifdef DEBUG
-void LInstruction::VerifyCall() {
- // Call instructions can use only fixed registers as temporaries and
- // outputs because all registers are blocked by the calling convention.
- // Inputs operands must use a fixed register or use-at-start policy or
- // a non-register policy.
- ASSERT(Output() == NULL ||
- LUnallocated::cast(Output())->HasFixedPolicy() ||
- !LUnallocated::cast(Output())->HasRegisterPolicy());
- for (UseIterator it(this); !it.Done(); it.Advance()) {
- LUnallocated* operand = LUnallocated::cast(it.Current());
- ASSERT(operand->HasFixedPolicy() ||
- operand->IsUsedAtStart());
- }
- for (TempIterator it(this); !it.Done(); it.Advance()) {
- LUnallocated* operand = LUnallocated::cast(it.Current());
- ASSERT(operand->HasFixedPolicy() ||!operand->HasRegisterPolicy());
- }
-}
-#endif
-
-
-void LInstruction::PrintTo(StringStream* stream) {
- stream->Add("%s ", this->Mnemonic());
-
- PrintOutputOperandTo(stream);
-
- PrintDataTo(stream);
-
- if (HasEnvironment()) {
- stream->Add(" ");
- environment()->PrintTo(stream);
- }
-
- if (HasPointerMap()) {
- stream->Add(" ");
- pointer_map()->PrintTo(stream);
- }
-}
-
-
-void LInstruction::PrintDataTo(StringStream* stream) {
- stream->Add("= ");
- for (int i = 0; i < InputCount(); i++) {
- if (i > 0) stream->Add(" ");
- if (InputAt(i) == NULL) {
- stream->Add("NULL");
- } else {
- InputAt(i)->PrintTo(stream);
- }
- }
-}
-
-
-void LInstruction::PrintOutputOperandTo(StringStream* stream) {
- if (HasResult()) result()->PrintTo(stream);
-}
-
-
-void LLabel::PrintDataTo(StringStream* stream) {
- LGap::PrintDataTo(stream);
- LLabel* rep = replacement();
- if (rep != NULL) {
- stream->Add(" Dead block replaced with B%d", rep->block_id());
- }
-}
-
-
-bool LGap::IsRedundant() const {
- for (int i = 0; i < 4; i++) {
- if (parallel_moves_[i] != NULL && !parallel_moves_[i]->IsRedundant()) {
- return false;
- }
- }
-
- return true;
-}
-
-
-void LGap::PrintDataTo(StringStream* stream) {
- for (int i = 0; i < 4; i++) {
- stream->Add("(");
- if (parallel_moves_[i] != NULL) {
- parallel_moves_[i]->PrintDataTo(stream);
- }
- stream->Add(") ");
- }
-}
-
-
-const char* LArithmeticD::Mnemonic() const {
- switch (op()) {
- case Token::ADD: return "add-d";
- case Token::SUB: return "sub-d";
- case Token::MUL: return "mul-d";
- case Token::DIV: return "div-d";
- case Token::MOD: return "mod-d";
- default:
- UNREACHABLE();
- return NULL;
- }
-}
-
-
-const char* LArithmeticT::Mnemonic() const {
- switch (op()) {
- case Token::ADD: return "add-t";
- case Token::SUB: return "sub-t";
- case Token::MUL: return "mul-t";
- case Token::MOD: return "mod-t";
- case Token::DIV: return "div-t";
- case Token::BIT_AND: return "bit-and-t";
- case Token::BIT_OR: return "bit-or-t";
- case Token::BIT_XOR: return "bit-xor-t";
- case Token::ROR: return "ror-t";
- case Token::SHL: return "sal-t";
- case Token::SAR: return "sar-t";
- case Token::SHR: return "shr-t";
- default:
- UNREACHABLE();
- return NULL;
- }
-}
-
-
-void LGoto::PrintDataTo(StringStream* stream) {
- stream->Add("B%d", block_id());
-}
-
-
-void LBranch::PrintDataTo(StringStream* stream) {
- stream->Add("B%d | B%d on ", true_block_id(), false_block_id());
- value()->PrintTo(stream);
-}
-
-
-void LCmpIDAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if ");
- left()->PrintTo(stream);
- stream->Add(" %s ", Token::String(op()));
- right()->PrintTo(stream);
- stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LIsNilAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if ");
- value()->PrintTo(stream);
- stream->Add(kind() == kStrictEquality ? " === " : " == ");
- stream->Add(nil() == kNullValue ? "null" : "undefined");
- stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LIsObjectAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if is_object(");
- value()->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LIsStringAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if is_string(");
- value()->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LIsSmiAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if is_smi(");
- value()->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LIsUndetectableAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if is_undetectable(");
- value()->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LStringCompareAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if string_compare(");
- left()->PrintTo(stream);
- right()->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if has_instance_type(");
- value()->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LHasCachedArrayIndexAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if has_cached_array_index(");
- value()->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if class_of_test(");
- value()->PrintTo(stream);
- stream->Add(", \"%o\") then B%d else B%d",
- *hydrogen()->class_name(),
- true_block_id(),
- false_block_id());
-}
-
-
-void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if typeof ");
- value()->PrintTo(stream);
- stream->Add(" == \"%s\" then B%d else B%d",
- *hydrogen()->type_literal()->ToCString(),
- true_block_id(), false_block_id());
-}
-
-
-void LCallConstantFunction::PrintDataTo(StringStream* stream) {
- stream->Add("#%d / ", arity());
-}
-
-
-void LUnaryMathOperation::PrintDataTo(StringStream* stream) {
- stream->Add("/%s ", hydrogen()->OpName());
- value()->PrintTo(stream);
-}
-
-
-void LMathExp::PrintDataTo(StringStream* stream) {
- value()->PrintTo(stream);
-}
-
-
-void LMathPowHalf::PrintDataTo(StringStream* stream) {
- stream->Add("/pow_half ");
- value()->PrintTo(stream);
-}
-
-
-void LMathRound::PrintDataTo(StringStream* stream) {
- stream->Add("/round ");
- value()->PrintTo(stream);
-}
-
-
-void LLoadContextSlot::PrintDataTo(StringStream* stream) {
- context()->PrintTo(stream);
- stream->Add("[%d]", slot_index());
-}
-
-
-void LStoreContextSlot::PrintDataTo(StringStream* stream) {
- context()->PrintTo(stream);
- stream->Add("[%d] <- ", slot_index());
- value()->PrintTo(stream);
-}
-
-
-void LInvokeFunction::PrintDataTo(StringStream* stream) {
- stream->Add("= ");
- context()->PrintTo(stream);
- stream->Add(" ");
- function()->PrintTo(stream);
- stream->Add(" #%d / ", arity());
-}
-
-
-void LCallKeyed::PrintDataTo(StringStream* stream) {
- stream->Add("[ecx] #%d / ", arity());
-}
-
-
-void LCallNamed::PrintDataTo(StringStream* stream) {
- SmartArrayPointer<char> name_string = name()->ToCString();
- stream->Add("%s #%d / ", *name_string, arity());
-}
-
-
-void LCallGlobal::PrintDataTo(StringStream* stream) {
- SmartArrayPointer<char> name_string = name()->ToCString();
- stream->Add("%s #%d / ", *name_string, arity());
-}
-
-
-void LCallKnownGlobal::PrintDataTo(StringStream* stream) {
- stream->Add("#%d / ", arity());
-}
-
-
-void LCallNew::PrintDataTo(StringStream* stream) {
- stream->Add("= ");
- context()->PrintTo(stream);
- stream->Add(" ");
- constructor()->PrintTo(stream);
- stream->Add(" #%d / ", arity());
-}
-
-
-void LCallNewArray::PrintDataTo(StringStream* stream) {
- stream->Add("= ");
- context()->PrintTo(stream);
- stream->Add(" ");
- constructor()->PrintTo(stream);
- stream->Add(" #%d / ", arity());
- ASSERT(hydrogen()->property_cell()->value()->IsSmi());
- ElementsKind kind = static_cast<ElementsKind>(
- Smi::cast(hydrogen()->property_cell()->value())->value());
- stream->Add(" (%s) ", ElementsKindToString(kind));
-}
-
-
-void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
- arguments()->PrintTo(stream);
-
- stream->Add(" length ");
- length()->PrintTo(stream);
-
- stream->Add(" index ");
- index()->PrintTo(stream);
-}
-
-
-int LPlatformChunk::GetNextSpillIndex(bool is_double) {
- // Skip a slot if for a double-width slot.
- if (is_double) {
- spill_slot_count_++;
- spill_slot_count_ |= 1;
- num_double_slots_++;
- }
- return spill_slot_count_++;
-}
-
-
-LOperand* LPlatformChunk::GetNextSpillSlot(bool is_double) {
- int index = GetNextSpillIndex(is_double);
- if (is_double) {
- return LDoubleStackSlot::Create(index, zone());
- } else {
- return LStackSlot::Create(index, zone());
- }
-}
-
-
-void LStoreNamedField::PrintDataTo(StringStream* stream) {
- object()->PrintTo(stream);
- stream->Add(".");
- stream->Add(*String::cast(*name())->ToCString());
- stream->Add(" <- ");
- value()->PrintTo(stream);
-}
-
-
-void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
- object()->PrintTo(stream);
- stream->Add(".");
- stream->Add(*String::cast(*name())->ToCString());
- stream->Add(" <- ");
- value()->PrintTo(stream);
-}
-
-
-void LLoadKeyed::PrintDataTo(StringStream* stream) {
- elements()->PrintTo(stream);
- stream->Add("[");
- key()->PrintTo(stream);
- if (hydrogen()->IsDehoisted()) {
- stream->Add(" + %d]", additional_index());
- } else {
- stream->Add("]");
- }
-}
-
-
-void LStoreKeyed::PrintDataTo(StringStream* stream) {
- elements()->PrintTo(stream);
- stream->Add("[");
- key()->PrintTo(stream);
- if (hydrogen()->IsDehoisted()) {
- stream->Add(" + %d] <-", additional_index());
- } else {
- stream->Add("] <- ");
- }
- value()->PrintTo(stream);
-}
-
-
-void LStoreKeyedGeneric::PrintDataTo(StringStream* stream) {
- object()->PrintTo(stream);
- stream->Add("[");
- key()->PrintTo(stream);
- stream->Add("] <- ");
- value()->PrintTo(stream);
-}
-
-
-void LTransitionElementsKind::PrintDataTo(StringStream* stream) {
- object()->PrintTo(stream);
- stream->Add(" %p -> %p", *original_map(), *transitioned_map());
-}
-
-
-LPlatformChunk* LChunkBuilder::Build() {
- ASSERT(is_unused());
- chunk_ = new(zone()) LPlatformChunk(info(), graph());
- HPhase phase("L_Building chunk", chunk_);
- status_ = BUILDING;
-
- // Reserve the first spill slot for the state of dynamic alignment.
- if (info()->IsOptimizing()) {
- int alignment_state_index = chunk_->GetNextSpillIndex(false);
- ASSERT_EQ(alignment_state_index, 0);
- USE(alignment_state_index);
- }
-
- const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
- for (int i = 0; i < blocks->length(); i++) {
- HBasicBlock* next = NULL;
- if (i < blocks->length() - 1) next = blocks->at(i + 1);
- DoBasicBlock(blocks->at(i), next);
- if (is_aborted()) return NULL;
- }
- status_ = DONE;
- return chunk_;
-}
-
-
-void LChunkBuilder::Abort(const char* reason) {
- info()->set_bailout_reason(reason);
- status_ = ABORTED;
-}
-
-
-LUnallocated* LChunkBuilder::ToUnallocated(Register reg) {
- return new(zone()) LUnallocated(LUnallocated::FIXED_REGISTER,
- Register::ToAllocationIndex(reg));
-}
-
-
-LUnallocated* LChunkBuilder::ToUnallocated(XMMRegister reg) {
- return new(zone()) LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER,
- XMMRegister::ToAllocationIndex(reg));
-}
-
-
-LUnallocated* LChunkBuilder::ToUnallocated(X87TopOfStackRegister reg) {
- return new(zone()) LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER,
- X87TopOfStackRegister::ToAllocationIndex(reg));
-}
-
-
-LOperand* LChunkBuilder::UseFixed(HValue* value, Register fixed_register) {
- return Use(value, ToUnallocated(fixed_register));
-}
-
-
-LOperand* LChunkBuilder::UseFixedDouble(HValue* value, XMMRegister reg) {
- return Use(value, ToUnallocated(reg));
-}
-
-
-LOperand* LChunkBuilder::UseRegister(HValue* value) {
- return Use(value, new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
-}
-
-
-LOperand* LChunkBuilder::UseRegisterAtStart(HValue* value) {
- return Use(value,
- new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER,
- LUnallocated::USED_AT_START));
-}
-
-
-LOperand* LChunkBuilder::UseTempRegister(HValue* value) {
- return Use(value, new(zone()) LUnallocated(LUnallocated::WRITABLE_REGISTER));
-}
-
-
-LOperand* LChunkBuilder::Use(HValue* value) {
- return Use(value, new(zone()) LUnallocated(LUnallocated::NONE));
-}
-
-
-LOperand* LChunkBuilder::UseAtStart(HValue* value) {
- return Use(value, new(zone()) LUnallocated(LUnallocated::NONE,
- LUnallocated::USED_AT_START));
-}
-
-
-LOperand* LChunkBuilder::UseOrConstant(HValue* value) {
- return value->IsConstant()
- ? chunk_->DefineConstantOperand(HConstant::cast(value))
- : Use(value);
-}
-
-
-LOperand* LChunkBuilder::UseOrConstantAtStart(HValue* value) {
- return value->IsConstant()
- ? chunk_->DefineConstantOperand(HConstant::cast(value))
- : UseAtStart(value);
-}
-
-
-LOperand* LChunkBuilder::UseRegisterOrConstant(HValue* value) {
- return value->IsConstant()
- ? chunk_->DefineConstantOperand(HConstant::cast(value))
- : UseRegister(value);
-}
-
-
-LOperand* LChunkBuilder::UseRegisterOrConstantAtStart(HValue* value) {
- return value->IsConstant()
- ? chunk_->DefineConstantOperand(HConstant::cast(value))
- : UseRegisterAtStart(value);
-}
-
-
-LOperand* LChunkBuilder::UseAny(HValue* value) {
- return value->IsConstant()
- ? chunk_->DefineConstantOperand(HConstant::cast(value))
- : Use(value, new(zone()) LUnallocated(LUnallocated::ANY));
-}
-
-
-LOperand* LChunkBuilder::Use(HValue* value, LUnallocated* operand) {
- if (value->EmitAtUses()) {
- HInstruction* instr = HInstruction::cast(value);
- VisitInstruction(instr);
- }
- operand->set_virtual_register(value->id());
- return operand;
-}
-
-
-template<int I, int T>
-LInstruction* LChunkBuilder::Define(LTemplateInstruction<1, I, T>* instr,
- LUnallocated* result) {
- result->set_virtual_register(current_instruction_->id());
- instr->set_result(result);
- return instr;
-}
-
-
-template<int I, int T>
-LInstruction* LChunkBuilder::DefineAsRegister(
- LTemplateInstruction<1, I, T>* instr) {
- return Define(instr,
- new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
-}
-
-
-template<int I, int T>
-LInstruction* LChunkBuilder::DefineAsSpilled(
- LTemplateInstruction<1, I, T>* instr,
- int index) {
- return Define(instr,
- new(zone()) LUnallocated(LUnallocated::FIXED_SLOT, index));
-}
-
-
-template<int I, int T>
-LInstruction* LChunkBuilder::DefineSameAsFirst(
- LTemplateInstruction<1, I, T>* instr) {
- return Define(instr,
- new(zone()) LUnallocated(LUnallocated::SAME_AS_FIRST_INPUT));
-}
-
-
-template<int I, int T>
-LInstruction* LChunkBuilder::DefineFixed(LTemplateInstruction<1, I, T>* instr,
- Register reg) {
- return Define(instr, ToUnallocated(reg));
-}
-
-
-template<int I, int T>
-LInstruction* LChunkBuilder::DefineFixedDouble(
- LTemplateInstruction<1, I, T>* instr,
- XMMRegister reg) {
- return Define(instr, ToUnallocated(reg));
-}
-
-
-template<int I, int T>
-LInstruction* LChunkBuilder::DefineX87TOS(
- LTemplateInstruction<1, I, T>* instr) {
- return Define(instr, ToUnallocated(x87tos));
-}
-
-
-LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
- HEnvironment* hydrogen_env = current_block_->last_environment();
- int argument_index_accumulator = 0;
- instr->set_environment(CreateEnvironment(hydrogen_env,
- &argument_index_accumulator));
- return instr;
-}
-
-
-LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
- HInstruction* hinstr,
- CanDeoptimize can_deoptimize) {
- info()->MarkAsNonDeferredCalling();
-
-#ifdef DEBUG
- instr->VerifyCall();
-#endif
- instr->MarkAsCall();
- instr = AssignPointerMap(instr);
-
- if (hinstr->HasObservableSideEffects()) {
- ASSERT(hinstr->next()->IsSimulate());
- HSimulate* sim = HSimulate::cast(hinstr->next());
- ASSERT(instruction_pending_deoptimization_environment_ == NULL);
- ASSERT(pending_deoptimization_ast_id_.IsNone());
- instruction_pending_deoptimization_environment_ = instr;
- pending_deoptimization_ast_id_ = sim->ast_id();
- }
-
- // If instruction does not have side-effects lazy deoptimization
- // after the call will try to deoptimize to the point before the call.
- // Thus we still need to attach environment to this call even if
- // call sequence can not deoptimize eagerly.
- bool needs_environment =
- (can_deoptimize == CAN_DEOPTIMIZE_EAGERLY) ||
- !hinstr->HasObservableSideEffects();
- if (needs_environment && !instr->HasEnvironment()) {
- instr = AssignEnvironment(instr);
- }
-
- return instr;
-}
-
-
-LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
- ASSERT(!instr->HasPointerMap());
- instr->set_pointer_map(new(zone()) LPointerMap(position_, zone()));
- return instr;
-}
-
-
-LUnallocated* LChunkBuilder::TempRegister() {
- LUnallocated* operand =
- new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER);
- operand->set_virtual_register(allocator_->GetVirtualRegister());
- if (!allocator_->AllocationOk()) {
- Abort("Not enough virtual registers (temps).");
- }
- return operand;
-}
-
-
-LOperand* LChunkBuilder::FixedTemp(Register reg) {
- LUnallocated* operand = ToUnallocated(reg);
- ASSERT(operand->HasFixedPolicy());
- return operand;
-}
-
-
-LOperand* LChunkBuilder::FixedTemp(XMMRegister reg) {
- LUnallocated* operand = ToUnallocated(reg);
- ASSERT(operand->HasFixedPolicy());
- return operand;
-}
-
-
-LInstruction* LChunkBuilder::DoBlockEntry(HBlockEntry* instr) {
- return new(zone()) LLabel(instr->block());
-}
-
-
-LInstruction* LChunkBuilder::DoDummyUse(HDummyUse* instr) {
- return DefineAsRegister(new(zone()) LDummyUse(UseAny(instr->value())));
-}
-
-
-LInstruction* LChunkBuilder::DoSoftDeoptimize(HSoftDeoptimize* instr) {
- return AssignEnvironment(new(zone()) LDeoptimize);
-}
-
-
-LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
- return AssignEnvironment(new(zone()) LDeoptimize);
-}
-
-
-LInstruction* LChunkBuilder::DoShift(Token::Value op,
- HBitwiseBinaryOperation* instr) {
- if (instr->representation().IsTagged()) {
- ASSERT(instr->left()->representation().IsTagged());
- ASSERT(instr->right()->representation().IsTagged());
-
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* left = UseFixed(instr->left(), edx);
- LOperand* right = UseFixed(instr->right(), eax);
- LArithmeticT* result = new(zone()) LArithmeticT(op, context, left, right);
- return MarkAsCall(DefineFixed(result, eax), instr);
- }
-
- ASSERT(instr->representation().IsInteger32());
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
- LOperand* left = UseRegisterAtStart(instr->left());
-
- HValue* right_value = instr->right();
- LOperand* right = NULL;
- int constant_value = 0;
- if (right_value->IsConstant()) {
- HConstant* constant = HConstant::cast(right_value);
- right = chunk_->DefineConstantOperand(constant);
- constant_value = constant->Integer32Value() & 0x1f;
- } else {
- right = UseFixed(right_value, ecx);
- }
-
- // Shift operations can only deoptimize if we do a logical shift by 0 and
- // the result cannot be truncated to int32.
- bool does_deopt = false;
- if (op == Token::SHR && constant_value == 0) {
- if (FLAG_opt_safe_uint32_operations) {
- does_deopt = !instr->CheckFlag(HInstruction::kUint32);
- } else {
- for (HUseIterator it(instr->uses()); !it.Done(); it.Advance()) {
- if (!it.value()->CheckFlag(HValue::kTruncatingToInt32)) {
- does_deopt = true;
- break;
- }
- }
- }
- }
-
- LInstruction* result =
- DefineSameAsFirst(new(zone()) LShiftI(op, left, right, does_deopt));
- return does_deopt ? AssignEnvironment(result) : result;
-}
-
-
-LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op,
- HArithmeticBinaryOperation* instr) {
- ASSERT(instr->representation().IsDouble());
- ASSERT(instr->left()->representation().IsDouble());
- ASSERT(instr->right()->representation().IsDouble());
- ASSERT(op != Token::MOD);
- LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right = UseRegisterAtStart(instr->right());
- LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
- return DefineSameAsFirst(result);
-}
-
-
-LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op,
- HArithmeticBinaryOperation* instr) {
- ASSERT(op == Token::ADD ||
- op == Token::DIV ||
- op == Token::MOD ||
- op == Token::MUL ||
- op == Token::SUB);
- HValue* left = instr->left();
- HValue* right = instr->right();
- ASSERT(left->representation().IsTagged());
- ASSERT(right->representation().IsTagged());
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* left_operand = UseFixed(left, edx);
- LOperand* right_operand = UseFixed(right, eax);
- LArithmeticT* result =
- new(zone()) LArithmeticT(op, context, left_operand, right_operand);
- return MarkAsCall(DefineFixed(result, eax), instr);
-}
-
-
-void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) {
- ASSERT(is_building());
- current_block_ = block;
- next_block_ = next_block;
- if (block->IsStartBlock()) {
- block->UpdateEnvironment(graph_->start_environment());
- argument_count_ = 0;
- } else if (block->predecessors()->length() == 1) {
- // We have a single predecessor => copy environment and outgoing
- // argument count from the predecessor.
- ASSERT(block->phis()->length() == 0);
- HBasicBlock* pred = block->predecessors()->at(0);
- HEnvironment* last_environment = pred->last_environment();
- ASSERT(last_environment != NULL);
- // Only copy the environment, if it is later used again.
- if (pred->end()->SecondSuccessor() == NULL) {
- ASSERT(pred->end()->FirstSuccessor() == block);
- } else {
- if (pred->end()->FirstSuccessor()->block_id() > block->block_id() ||
- pred->end()->SecondSuccessor()->block_id() > block->block_id()) {
- last_environment = last_environment->Copy();
- }
- }
- block->UpdateEnvironment(last_environment);
- ASSERT(pred->argument_count() >= 0);
- argument_count_ = pred->argument_count();
- } else {
- // We are at a state join => process phis.
- HBasicBlock* pred = block->predecessors()->at(0);
- // No need to copy the environment, it cannot be used later.
- HEnvironment* last_environment = pred->last_environment();
- for (int i = 0; i < block->phis()->length(); ++i) {
- HPhi* phi = block->phis()->at(i);
- last_environment->SetValueAt(phi->merged_index(), phi);
- }
- for (int i = 0; i < block->deleted_phis()->length(); ++i) {
- last_environment->SetValueAt(block->deleted_phis()->at(i),
- graph_->GetConstantUndefined());
- }
- block->UpdateEnvironment(last_environment);
- // Pick up the outgoing argument count of one of the predecessors.
- argument_count_ = pred->argument_count();
- }
- HInstruction* current = block->first();
- int start = chunk_->instructions()->length();
- while (current != NULL && !is_aborted()) {
- // Code for constants in registers is generated lazily.
- if (!current->EmitAtUses()) {
- VisitInstruction(current);
- }
- current = current->next();
- }
- int end = chunk_->instructions()->length() - 1;
- if (end >= start) {
- block->set_first_instruction_index(start);
- block->set_last_instruction_index(end);
- }
- block->set_argument_count(argument_count_);
- next_block_ = NULL;
- current_block_ = NULL;
-}
-
-
-void LChunkBuilder::VisitInstruction(HInstruction* current) {
- HInstruction* old_current = current_instruction_;
- current_instruction_ = current;
- if (current->has_position()) position_ = current->position();
- LInstruction* instr = current->CompileToLithium(this);
-
- if (instr != NULL) {
- if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) {
- instr = AssignPointerMap(instr);
- }
- if (FLAG_stress_environments && !instr->HasEnvironment()) {
- instr = AssignEnvironment(instr);
- }
- instr->set_hydrogen_value(current);
- chunk_->AddInstruction(instr, current_block_);
- }
- current_instruction_ = old_current;
-}
-
-
-LEnvironment* LChunkBuilder::CreateEnvironment(
- HEnvironment* hydrogen_env,
- int* argument_index_accumulator) {
- if (hydrogen_env == NULL) return NULL;
-
- LEnvironment* outer =
- CreateEnvironment(hydrogen_env->outer(), argument_index_accumulator);
- BailoutId ast_id = hydrogen_env->ast_id();
- ASSERT(!ast_id.IsNone() ||
- hydrogen_env->frame_type() != JS_FUNCTION);
- int value_count = hydrogen_env->length();
- LEnvironment* result =
- new(zone()) LEnvironment(hydrogen_env->closure(),
- hydrogen_env->frame_type(),
- ast_id,
- hydrogen_env->parameter_count(),
- argument_count_,
- value_count,
- outer,
- hydrogen_env->entry(),
- zone());
- int argument_index = *argument_index_accumulator;
- for (int i = 0; i < value_count; ++i) {
- if (hydrogen_env->is_special_index(i)) continue;
-
- HValue* value = hydrogen_env->values()->at(i);
- LOperand* op = NULL;
- if (value->IsArgumentsObject()) {
- op = NULL;
- } else if (value->IsPushArgument()) {
- op = new(zone()) LArgument(argument_index++);
- } else {
- op = UseAny(value);
- }
- result->AddValue(op,
- value->representation(),
- value->CheckFlag(HInstruction::kUint32));
- }
-
- if (hydrogen_env->frame_type() == JS_FUNCTION) {
- *argument_index_accumulator = argument_index;
- }
-
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
- return new(zone()) LGoto(instr->FirstSuccessor()->block_id());
-}
-
-
-LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
- HValue* value = instr->value();
- if (value->EmitAtUses()) {
- ASSERT(value->IsConstant());
- ASSERT(!value->representation().IsDouble());
- HBasicBlock* successor = HConstant::cast(value)->ToBoolean()
- ? instr->FirstSuccessor()
- : instr->SecondSuccessor();
- return new(zone()) LGoto(successor->block_id());
- }
-
- // Untagged integers or doubles, smis and booleans don't require a
- // deoptimization environment nor a temp register.
- Representation rep = value->representation();
- HType type = value->type();
- if (!rep.IsTagged() || type.IsSmi() || type.IsBoolean()) {
- return new(zone()) LBranch(UseRegister(value), NULL);
- }
-
- ToBooleanStub::Types expected = instr->expected_input_types();
- // We need a temporary register when we have to access the map *or* we have
- // no type info yet, in which case we handle all cases (including the ones
- // involving maps).
- bool needs_temp = expected.NeedsMap() || expected.IsEmpty();
- LOperand* temp = needs_temp ? TempRegister() : NULL;
- return AssignEnvironment(new(zone()) LBranch(UseRegister(value), temp));
-}
-
-
-LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) {
- ASSERT(instr->value()->representation().IsTagged());
- LOperand* value = UseRegisterAtStart(instr->value());
- return new(zone()) LCmpMapAndBranch(value);
-}
-
-
-LInstruction* LChunkBuilder::DoArgumentsLength(HArgumentsLength* length) {
- return DefineAsRegister(new(zone()) LArgumentsLength(Use(length->value())));
-}
-
-
-LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) {
- return DefineAsRegister(new(zone()) LArgumentsElements);
-}
-
-
-LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
- LOperand* left = UseFixed(instr->left(), InstanceofStub::left());
- LOperand* right = UseFixed(instr->right(), InstanceofStub::right());
- LOperand* context = UseFixed(instr->context(), esi);
- LInstanceOf* result = new(zone()) LInstanceOf(context, left, right);
- return MarkAsCall(DefineFixed(result, eax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal(
- HInstanceOfKnownGlobal* instr) {
- LInstanceOfKnownGlobal* result =
- new(zone()) LInstanceOfKnownGlobal(
- UseFixed(instr->context(), esi),
- UseFixed(instr->left(), InstanceofStub::left()),
- FixedTemp(edi));
- return MarkAsCall(DefineFixed(result, eax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoInstanceSize(HInstanceSize* instr) {
- LOperand* object = UseRegisterAtStart(instr->object());
- return DefineAsRegister(new(zone()) LInstanceSize(object));
-}
-
-
-LInstruction* LChunkBuilder::DoWrapReceiver(HWrapReceiver* instr) {
- LOperand* receiver = UseRegister(instr->receiver());
- LOperand* function = UseRegisterAtStart(instr->function());
- LOperand* temp = TempRegister();
- LWrapReceiver* result =
- new(zone()) LWrapReceiver(receiver, function, temp);
- return AssignEnvironment(DefineSameAsFirst(result));
-}
-
-
-LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) {
- LOperand* function = UseFixed(instr->function(), edi);
- LOperand* receiver = UseFixed(instr->receiver(), eax);
- LOperand* length = UseFixed(instr->length(), ebx);
- LOperand* elements = UseFixed(instr->elements(), ecx);
- LApplyArguments* result = new(zone()) LApplyArguments(function,
- receiver,
- length,
- elements);
- return MarkAsCall(DefineFixed(result, eax), instr, CAN_DEOPTIMIZE_EAGERLY);
-}
-
-
-LInstruction* LChunkBuilder::DoPushArgument(HPushArgument* instr) {
- ++argument_count_;
- LOperand* argument = UseAny(instr->argument());
- return new(zone()) LPushArgument(argument);
-}
-
-
-LInstruction* LChunkBuilder::DoThisFunction(HThisFunction* instr) {
- return instr->HasNoUses()
- ? NULL
- : DefineAsRegister(new(zone()) LThisFunction);
-}
-
-
-LInstruction* LChunkBuilder::DoContext(HContext* instr) {
- if (instr->HasNoUses()) return NULL;
-
- if (info()->IsStub()) {
- return DefineFixed(new(zone()) LContext, esi);
- }
-
- return DefineAsRegister(new(zone()) LContext);
-}
-
-
-LInstruction* LChunkBuilder::DoOuterContext(HOuterContext* instr) {
- LOperand* context = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LOuterContext(context));
-}
-
-
-LInstruction* LChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- return MarkAsCall(new(zone()) LDeclareGlobals(context), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoGlobalObject(HGlobalObject* instr) {
- LOperand* context = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LGlobalObject(context,
- instr->qml_global()));
-}
-
-
-LInstruction* LChunkBuilder::DoGlobalReceiver(HGlobalReceiver* instr) {
- LOperand* global_object = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LGlobalReceiver(global_object));
-}
-
-
-LInstruction* LChunkBuilder::DoCallConstantFunction(
- HCallConstantFunction* instr) {
- argument_count_ -= instr->argument_count();
- return MarkAsCall(DefineFixed(new(zone()) LCallConstantFunction, eax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* function = UseFixed(instr->function(), edi);
- argument_count_ -= instr->argument_count();
- LInvokeFunction* result = new(zone()) LInvokeFunction(context, function);
- return MarkAsCall(DefineFixed(result, eax), instr, CANNOT_DEOPTIMIZE_EAGERLY);
-}
-
-
-LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
- BuiltinFunctionId op = instr->op();
- if (op == kMathLog) {
- ASSERT(instr->representation().IsDouble());
- ASSERT(instr->value()->representation().IsDouble());
- LOperand* context = UseAny(instr->context()); // Not actually used.
- LOperand* input = UseRegisterAtStart(instr->value());
- LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(context,
- input);
- return DefineSameAsFirst(result);
- } else if (op == kMathExp) {
- ASSERT(instr->representation().IsDouble());
- ASSERT(instr->value()->representation().IsDouble());
- LOperand* value = UseTempRegister(instr->value());
- LOperand* temp1 = TempRegister();
- LOperand* temp2 = TempRegister();
- LMathExp* result = new(zone()) LMathExp(value, temp1, temp2);
- return DefineAsRegister(result);
- } else if (op == kMathSin || op == kMathCos || op == kMathTan) {
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* input = UseFixedDouble(instr->value(), xmm1);
- LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(context,
- input);
- return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
- } else {
- LOperand* input = UseRegisterAtStart(instr->value());
- LOperand* context = UseAny(instr->context()); // Deferred use by MathAbs.
- if (op == kMathPowHalf) {
- LOperand* temp = TempRegister();
- LMathPowHalf* result = new(zone()) LMathPowHalf(context, input, temp);
- return DefineSameAsFirst(result);
- } else if (op == kMathRound) {
- LOperand* temp = FixedTemp(xmm4);
- LMathRound* result = new(zone()) LMathRound(context, input, temp);
- return AssignEnvironment(DefineAsRegister(result));
- }
- LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(context,
- input);
- switch (op) {
- case kMathAbs:
- return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
- case kMathFloor:
- return AssignEnvironment(DefineAsRegister(result));
- case kMathSqrt:
- return DefineSameAsFirst(result);
- default:
- UNREACHABLE();
- return NULL;
- }
- }
-}
-
-
-LInstruction* LChunkBuilder::DoCallKeyed(HCallKeyed* instr) {
- ASSERT(instr->key()->representation().IsTagged());
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* key = UseFixed(instr->key(), ecx);
- argument_count_ -= instr->argument_count();
- LCallKeyed* result = new(zone()) LCallKeyed(context, key);
- return MarkAsCall(DefineFixed(result, eax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallNamed(HCallNamed* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- argument_count_ -= instr->argument_count();
- LCallNamed* result = new(zone()) LCallNamed(context);
- return MarkAsCall(DefineFixed(result, eax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallGlobal(HCallGlobal* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- argument_count_ -= instr->argument_count();
- LCallGlobal* result = new(zone()) LCallGlobal(context, instr->qml_global());
- return MarkAsCall(DefineFixed(result, eax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallKnownGlobal(HCallKnownGlobal* instr) {
- argument_count_ -= instr->argument_count();
- return MarkAsCall(DefineFixed(new(zone()) LCallKnownGlobal, eax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* constructor = UseFixed(instr->constructor(), edi);
- argument_count_ -= instr->argument_count();
- LCallNew* result = new(zone()) LCallNew(context, constructor);
- return MarkAsCall(DefineFixed(result, eax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) {
- ASSERT(FLAG_optimize_constructed_arrays);
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* constructor = UseFixed(instr->constructor(), edi);
- argument_count_ -= instr->argument_count();
- LCallNewArray* result = new(zone()) LCallNewArray(context, constructor);
- return MarkAsCall(DefineFixed(result, eax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* function = UseFixed(instr->function(), edi);
- argument_count_ -= instr->argument_count();
- LCallFunction* result = new(zone()) LCallFunction(context, function);
- return MarkAsCall(DefineFixed(result, eax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
- argument_count_ -= instr->argument_count();
- LOperand* context = UseFixed(instr->context(), esi);
- return MarkAsCall(DefineFixed(new(zone()) LCallRuntime(context), eax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoRor(HRor* instr) {
- return DoShift(Token::ROR, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoShr(HShr* instr) {
- return DoShift(Token::SHR, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoSar(HSar* instr) {
- return DoShift(Token::SAR, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoShl(HShl* instr) {
- return DoShift(Token::SHL, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
- if (instr->representation().IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
-
- LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
- LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
- return DefineSameAsFirst(new(zone()) LBitI(left, right));
- } else {
- ASSERT(instr->representation().IsTagged());
- ASSERT(instr->left()->representation().IsTagged());
- ASSERT(instr->right()->representation().IsTagged());
-
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* left = UseFixed(instr->left(), edx);
- LOperand* right = UseFixed(instr->right(), eax);
- LArithmeticT* result =
- new(zone()) LArithmeticT(instr->op(), context, left, right);
- return MarkAsCall(DefineFixed(result, eax), instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoBitNot(HBitNot* instr) {
- ASSERT(instr->value()->representation().IsInteger32());
- ASSERT(instr->representation().IsInteger32());
- if (instr->HasNoUses()) return NULL;
- LOperand* input = UseRegisterAtStart(instr->value());
- LBitNotI* result = new(zone()) LBitNotI(input);
- return DefineSameAsFirst(result);
-}
-
-
-LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
- if (instr->representation().IsDouble()) {
- return DoArithmeticD(Token::DIV, instr);
- } else if (instr->representation().IsInteger32()) {
- if (instr->HasPowerOf2Divisor()) {
- ASSERT(!instr->CheckFlag(HValue::kCanBeDivByZero));
- LOperand* value = UseRegisterAtStart(instr->left());
- LDivI* div =
- new(zone()) LDivI(value, UseOrConstant(instr->right()), NULL);
- return AssignEnvironment(DefineSameAsFirst(div));
- }
- // The temporary operand is necessary to ensure that right is not allocated
- // into edx.
- LOperand* temp = FixedTemp(edx);
- LOperand* dividend = UseFixed(instr->left(), eax);
- LOperand* divisor = UseRegister(instr->right());
- LDivI* result = new(zone()) LDivI(dividend, divisor, temp);
- return AssignEnvironment(DefineFixed(result, eax));
- } else {
- ASSERT(instr->representation().IsTagged());
- return DoArithmeticT(Token::DIV, instr);
- }
-}
-
-
-HValue* LChunkBuilder::SimplifiedDividendForMathFloorOfDiv(HValue* dividend) {
- // A value with an integer representation does not need to be transformed.
- if (dividend->representation().IsInteger32()) {
- return dividend;
- // A change from an integer32 can be replaced by the integer32 value.
- } else if (dividend->IsChange() &&
- HChange::cast(dividend)->from().IsInteger32()) {
- return HChange::cast(dividend)->value();
- }
- return NULL;
-}
-
-
-HValue* LChunkBuilder::SimplifiedDivisorForMathFloorOfDiv(HValue* divisor) {
- if (divisor->IsConstant() &&
- HConstant::cast(divisor)->HasInteger32Value()) {
- HConstant* constant_val = HConstant::cast(divisor);
- return constant_val->CopyToRepresentation(Representation::Integer32(),
- divisor->block()->zone());
- }
- // A value with an integer representation does not need to be transformed.
- if (divisor->representation().IsInteger32()) {
- return divisor;
- // A change from an integer32 can be replaced by the integer32 value.
- } else if (divisor->IsChange() &&
- HChange::cast(divisor)->from().IsInteger32()) {
- return HChange::cast(divisor)->value();
- }
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) {
- HValue* right = instr->right();
- if (!right->IsConstant()) {
- ASSERT(right->representation().IsInteger32());
- // The temporary operand is necessary to ensure that right is not allocated
- // into edx.
- LOperand* temp = FixedTemp(edx);
- LOperand* dividend = UseFixed(instr->left(), eax);
- LOperand* divisor = UseRegister(instr->right());
- LDivI* flooring_div = new(zone()) LDivI(dividend, divisor, temp);
- return AssignEnvironment(DefineFixed(flooring_div, eax));
- }
-
- ASSERT(right->IsConstant() && HConstant::cast(right)->HasInteger32Value());
- LOperand* divisor = chunk_->DefineConstantOperand(HConstant::cast(right));
- int32_t divisor_si = HConstant::cast(right)->Integer32Value();
- if (divisor_si == 0) {
- LOperand* dividend = UseRegister(instr->left());
- return AssignEnvironment(DefineAsRegister(
- new(zone()) LMathFloorOfDiv(dividend, divisor, NULL)));
- } else if (IsPowerOf2(abs(divisor_si))) {
- // use dividend as temp if divisor < 0 && divisor != -1
- LOperand* dividend = divisor_si < -1 ? UseTempRegister(instr->left()) :
- UseRegisterAtStart(instr->left());
- LInstruction* result = DefineAsRegister(
- new(zone()) LMathFloorOfDiv(dividend, divisor, NULL));
- return divisor_si < 0 ? AssignEnvironment(result) : result;
- } else {
- // needs edx:eax, plus a temp
- LOperand* dividend = UseFixed(instr->left(), eax);
- LOperand* temp = TempRegister();
- LInstruction* result = DefineFixed(
- new(zone()) LMathFloorOfDiv(dividend, divisor, temp), edx);
- return divisor_si < 0 ? AssignEnvironment(result) : result;
- }
-}
-
-
-LInstruction* LChunkBuilder::DoMod(HMod* instr) {
- if (instr->representation().IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
-
- LInstruction* result;
- if (instr->HasPowerOf2Divisor()) {
- ASSERT(!instr->CheckFlag(HValue::kCanBeDivByZero));
- LOperand* value = UseRegisterAtStart(instr->left());
- LModI* mod =
- new(zone()) LModI(value, UseOrConstant(instr->right()), NULL);
- result = DefineSameAsFirst(mod);
- } else {
- // The temporary operand is necessary to ensure that right is
- // not allocated into edx.
- LOperand* temp = FixedTemp(edx);
- LOperand* value = UseFixed(instr->left(), eax);
- LOperand* divisor = UseRegister(instr->right());
- LModI* mod = new(zone()) LModI(value, divisor, temp);
- result = DefineFixed(mod, edx);
- }
-
- return (instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
- instr->CheckFlag(HValue::kCanBeDivByZero))
- ? AssignEnvironment(result)
- : result;
- } else if (instr->representation().IsTagged()) {
- return DoArithmeticT(Token::MOD, instr);
- } else {
- ASSERT(instr->representation().IsDouble());
- // We call a C function for double modulo. It can't trigger a GC.
- // We need to use fixed result register for the call.
- // TODO(fschneider): Allow any register as input registers.
- LOperand* left = UseFixedDouble(instr->left(), xmm2);
- LOperand* right = UseFixedDouble(instr->right(), xmm1);
- LArithmeticD* result = new(zone()) LArithmeticD(Token::MOD, left, right);
- return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoMul(HMul* instr) {
- if (instr->representation().IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
- LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
- LOperand* right = UseOrConstant(instr->MostConstantOperand());
- LOperand* temp = NULL;
- if (instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
- temp = TempRegister();
- }
- LMulI* mul = new(zone()) LMulI(left, right, temp);
- if (instr->CheckFlag(HValue::kCanOverflow) ||
- instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
- AssignEnvironment(mul);
- }
- return DefineSameAsFirst(mul);
- } else if (instr->representation().IsDouble()) {
- return DoArithmeticD(Token::MUL, instr);
- } else {
- ASSERT(instr->representation().IsTagged());
- return DoArithmeticT(Token::MUL, instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoSub(HSub* instr) {
- if (instr->representation().IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
- LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right = UseOrConstantAtStart(instr->right());
- LSubI* sub = new(zone()) LSubI(left, right);
- LInstruction* result = DefineSameAsFirst(sub);
- if (instr->CheckFlag(HValue::kCanOverflow)) {
- result = AssignEnvironment(result);
- }
- return result;
- } else if (instr->representation().IsDouble()) {
- return DoArithmeticD(Token::SUB, instr);
- } else {
- ASSERT(instr->representation().IsTagged());
- return DoArithmeticT(Token::SUB, instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
- if (instr->representation().IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
- LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
- LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
- LAddI* add = new(zone()) LAddI(left, right);
- LInstruction* result = DefineSameAsFirst(add);
- if (instr->CheckFlag(HValue::kCanOverflow)) {
- result = AssignEnvironment(result);
- }
- return result;
- } else if (instr->representation().IsDouble()) {
- return DoArithmeticD(Token::ADD, instr);
- } else {
- ASSERT(instr->representation().IsTagged());
- return DoArithmeticT(Token::ADD, instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoMathMinMax(HMathMinMax* instr) {
- LOperand* left = NULL;
- LOperand* right = NULL;
- if (instr->representation().IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
- left = UseRegisterAtStart(instr->LeastConstantOperand());
- right = UseOrConstantAtStart(instr->MostConstantOperand());
- } else {
- ASSERT(instr->representation().IsDouble());
- ASSERT(instr->left()->representation().IsDouble());
- ASSERT(instr->right()->representation().IsDouble());
- left = UseRegisterAtStart(instr->left());
- right = UseRegisterAtStart(instr->right());
- }
- LMathMinMax* minmax = new(zone()) LMathMinMax(left, right);
- return DefineSameAsFirst(minmax);
-}
-
-
-LInstruction* LChunkBuilder::DoPower(HPower* instr) {
- ASSERT(instr->representation().IsDouble());
- // We call a C function for double power. It can't trigger a GC.
- // We need to use fixed result register for the call.
- Representation exponent_type = instr->right()->representation();
- ASSERT(instr->left()->representation().IsDouble());
- LOperand* left = UseFixedDouble(instr->left(), xmm2);
- LOperand* right = exponent_type.IsDouble() ?
- UseFixedDouble(instr->right(), xmm1) :
- UseFixed(instr->right(), eax);
- LPower* result = new(zone()) LPower(left, right);
- return MarkAsCall(DefineFixedDouble(result, xmm3), instr,
- CAN_DEOPTIMIZE_EAGERLY);
-}
-
-
-LInstruction* LChunkBuilder::DoRandom(HRandom* instr) {
- ASSERT(instr->representation().IsDouble());
- ASSERT(instr->global_object()->representation().IsTagged());
- LOperand* global_object = UseFixed(instr->global_object(), eax);
- LRandom* result = new(zone()) LRandom(global_object);
- return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
- ASSERT(instr->left()->representation().IsTagged());
- ASSERT(instr->right()->representation().IsTagged());
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* left = UseFixed(instr->left(), edx);
- LOperand* right = UseFixed(instr->right(), eax);
- LCmpT* result = new(zone()) LCmpT(context, left, right);
- return MarkAsCall(DefineFixed(result, eax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCompareIDAndBranch(
- HCompareIDAndBranch* instr) {
- Representation r = instr->representation();
- if (r.IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
- LOperand* left = UseRegisterOrConstantAtStart(instr->left());
- LOperand* right = UseOrConstantAtStart(instr->right());
- return new(zone()) LCmpIDAndBranch(left, right);
- } else {
- ASSERT(r.IsDouble());
- ASSERT(instr->left()->representation().IsDouble());
- ASSERT(instr->right()->representation().IsDouble());
- LOperand* left;
- LOperand* right;
- if (instr->left()->IsConstant() && instr->right()->IsConstant()) {
- left = UseRegisterOrConstantAtStart(instr->left());
- right = UseRegisterOrConstantAtStart(instr->right());
- } else {
- left = UseRegisterAtStart(instr->left());
- right = UseRegisterAtStart(instr->right());
- }
- return new(zone()) LCmpIDAndBranch(left, right);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch(
- HCompareObjectEqAndBranch* instr) {
- LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right = UseAtStart(instr->right());
- return new(zone()) LCmpObjectEqAndBranch(left, right);
-}
-
-
-LInstruction* LChunkBuilder::DoCompareConstantEqAndBranch(
- HCompareConstantEqAndBranch* instr) {
- return new(zone()) LCmpConstantEqAndBranch(
- UseRegisterAtStart(instr->value()));
-}
-
-
-LInstruction* LChunkBuilder::DoIsNilAndBranch(HIsNilAndBranch* instr) {
- // We only need a temp register for non-strict compare.
- LOperand* temp = instr->kind() == kStrictEquality ? NULL : TempRegister();
- return new(zone()) LIsNilAndBranch(UseRegisterAtStart(instr->value()), temp);
-}
-
-
-LInstruction* LChunkBuilder::DoIsObjectAndBranch(HIsObjectAndBranch* instr) {
- ASSERT(instr->value()->representation().IsTagged());
- LOperand* temp = TempRegister();
- return new(zone()) LIsObjectAndBranch(UseRegister(instr->value()), temp);
-}
-
-
-LInstruction* LChunkBuilder::DoIsStringAndBranch(HIsStringAndBranch* instr) {
- ASSERT(instr->value()->representation().IsTagged());
- LOperand* temp = TempRegister();
- return new(zone()) LIsStringAndBranch(UseRegister(instr->value()), temp);
-}
-
-
-LInstruction* LChunkBuilder::DoIsSmiAndBranch(HIsSmiAndBranch* instr) {
- ASSERT(instr->value()->representation().IsTagged());
- return new(zone()) LIsSmiAndBranch(Use(instr->value()));
-}
-
-
-LInstruction* LChunkBuilder::DoIsUndetectableAndBranch(
- HIsUndetectableAndBranch* instr) {
- ASSERT(instr ->value()->representation().IsTagged());
- return new(zone()) LIsUndetectableAndBranch(
- UseRegisterAtStart(instr->value()), TempRegister());
-}
-
-
-LInstruction* LChunkBuilder::DoStringCompareAndBranch(
- HStringCompareAndBranch* instr) {
- ASSERT(instr->left()->representation().IsTagged());
- ASSERT(instr->right()->representation().IsTagged());
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* left = UseFixed(instr->left(), edx);
- LOperand* right = UseFixed(instr->right(), eax);
-
- LStringCompareAndBranch* result = new(zone())
- LStringCompareAndBranch(context, left, right);
-
- return MarkAsCall(result, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoHasInstanceTypeAndBranch(
- HHasInstanceTypeAndBranch* instr) {
- ASSERT(instr->value()->representation().IsTagged());
- return new(zone()) LHasInstanceTypeAndBranch(
- UseRegisterAtStart(instr->value()),
- TempRegister());
-}
-
-
-LInstruction* LChunkBuilder::DoGetCachedArrayIndex(
- HGetCachedArrayIndex* instr) {
- ASSERT(instr->value()->representation().IsTagged());
- LOperand* value = UseRegisterAtStart(instr->value());
-
- return DefineAsRegister(new(zone()) LGetCachedArrayIndex(value));
-}
-
-
-LInstruction* LChunkBuilder::DoHasCachedArrayIndexAndBranch(
- HHasCachedArrayIndexAndBranch* instr) {
- ASSERT(instr->value()->representation().IsTagged());
- return new(zone()) LHasCachedArrayIndexAndBranch(
- UseRegisterAtStart(instr->value()));
-}
-
-
-LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
- HClassOfTestAndBranch* instr) {
- ASSERT(instr->value()->representation().IsTagged());
- return new(zone()) LClassOfTestAndBranch(UseRegister(instr->value()),
- TempRegister(),
- TempRegister());
-}
-
-
-LInstruction* LChunkBuilder::DoJSArrayLength(HJSArrayLength* instr) {
- LOperand* array = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LJSArrayLength(array));
-}
-
-
-LInstruction* LChunkBuilder::DoFixedArrayBaseLength(
- HFixedArrayBaseLength* instr) {
- LOperand* array = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LFixedArrayBaseLength(array));
-}
-
-
-LInstruction* LChunkBuilder::DoMapEnumLength(HMapEnumLength* instr) {
- LOperand* map = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LMapEnumLength(map));
-}
-
-
-LInstruction* LChunkBuilder::DoElementsKind(HElementsKind* instr) {
- LOperand* object = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LElementsKind(object));
-}
-
-
-LInstruction* LChunkBuilder::DoValueOf(HValueOf* instr) {
- LOperand* object = UseRegister(instr->value());
- LValueOf* result = new(zone()) LValueOf(object, TempRegister());
- return DefineSameAsFirst(result);
-}
-
-
-LInstruction* LChunkBuilder::DoDateField(HDateField* instr) {
- LOperand* date = UseFixed(instr->value(), eax);
- LDateField* result =
- new(zone()) LDateField(date, FixedTemp(ecx), instr->index());
- return MarkAsCall(DefineFixed(result, eax), instr, CAN_DEOPTIMIZE_EAGERLY);
-}
-
-
-LInstruction* LChunkBuilder::DoSeqStringSetChar(HSeqStringSetChar* instr) {
- LOperand* string = UseRegister(instr->string());
- LOperand* index = UseRegister(instr->index());
- ASSERT(ecx.is_byte_register());
- LOperand* value = UseFixed(instr->value(), ecx);
- LSeqStringSetChar* result =
- new(zone()) LSeqStringSetChar(instr->encoding(), string, index, value);
- return DefineSameAsFirst(result);
-}
-
-
-LInstruction* LChunkBuilder::DoNumericConstraint(HNumericConstraint* instr) {
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoInductionVariableAnnotation(
- HInductionVariableAnnotation* instr) {
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
- return AssignEnvironment(new(zone()) LBoundsCheck(
- UseRegisterOrConstantAtStart(instr->index()),
- UseAtStart(instr->length())));
-}
-
-
-LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) {
- // The control instruction marking the end of a block that completed
- // abruptly (e.g., threw an exception). There is nothing specific to do.
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoThrow(HThrow* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* value = UseFixed(instr->value(), eax);
- return MarkAsCall(new(zone()) LThrow(context, value), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoUseConst(HUseConst* instr) {
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoForceRepresentation(HForceRepresentation* bad) {
- // All HForceRepresentation instructions should be eliminated in the
- // representation change phase of Hydrogen.
- UNREACHABLE();
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoChange(HChange* instr) {
- Representation from = instr->from();
- Representation to = instr->to();
- // Only mark conversions that might need to allocate as calling rather than
- // all changes. This makes simple, non-allocating conversion not have to force
- // building a stack frame.
- if (from.IsTagged()) {
- if (to.IsDouble()) {
- info()->MarkAsDeferredCalling();
- LOperand* value = UseRegister(instr->value());
- // Temp register only necessary for minus zero check.
- LOperand* temp = instr->deoptimize_on_minus_zero()
- ? TempRegister()
- : NULL;
- LNumberUntagD* res = new(zone()) LNumberUntagD(value, temp);
- return AssignEnvironment(DefineAsRegister(res));
- } else {
- ASSERT(to.IsInteger32());
- LOperand* value = UseRegister(instr->value());
- if (instr->value()->type().IsSmi()) {
- return DefineSameAsFirst(new(zone()) LSmiUntag(value, false));
- } else {
- bool truncating = instr->CanTruncateToInt32();
- LOperand* xmm_temp =
- (truncating && CpuFeatures::IsSupported(SSE3))
- ? NULL
- : FixedTemp(xmm1);
- LTaggedToI* res = new(zone()) LTaggedToI(value, xmm_temp);
- return AssignEnvironment(DefineSameAsFirst(res));
- }
- }
- } else if (from.IsDouble()) {
- if (to.IsTagged()) {
- info()->MarkAsDeferredCalling();
- LOperand* value = CpuFeatures::IsSupported(SSE2)
- ? UseRegisterAtStart(instr->value())
- : UseAtStart(instr->value());
- LOperand* temp = FLAG_inline_new ? TempRegister() : NULL;
-
- // Make sure that temp and result_temp are different registers.
- LUnallocated* result_temp = TempRegister();
- LNumberTagD* result = new(zone()) LNumberTagD(value, temp);
- return AssignPointerMap(Define(result, result_temp));
- } else {
- ASSERT(to.IsInteger32());
- bool truncating = instr->CanTruncateToInt32();
- bool needs_temp = truncating && !CpuFeatures::IsSupported(SSE3);
- LOperand* value = needs_temp ?
- UseTempRegister(instr->value()) : UseRegister(instr->value());
- LOperand* temp = needs_temp ? TempRegister() : NULL;
- return AssignEnvironment(
- DefineAsRegister(new(zone()) LDoubleToI(value, temp)));
- }
- } else if (from.IsInteger32()) {
- info()->MarkAsDeferredCalling();
- if (to.IsTagged()) {
- HValue* val = instr->value();
- LOperand* value = UseRegister(val);
- if (val->HasRange() && val->range()->IsInSmiRange()) {
- return DefineSameAsFirst(new(zone()) LSmiTag(value));
- } else if (val->CheckFlag(HInstruction::kUint32)) {
- LNumberTagU* result = new(zone()) LNumberTagU(value);
- return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
- } else {
- LNumberTagI* result = new(zone()) LNumberTagI(value);
- return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
- }
- } else {
- ASSERT(to.IsDouble());
- if (instr->value()->CheckFlag(HInstruction::kUint32)) {
- LOperand* temp = FixedTemp(xmm1);
- return DefineAsRegister(
- new(zone()) LUint32ToDouble(UseRegister(instr->value()), temp));
- } else {
- return DefineAsRegister(
- new(zone()) LInteger32ToDouble(Use(instr->value())));
- }
- }
- }
- UNREACHABLE();
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoCheckNonSmi(HCheckNonSmi* instr) {
- LOperand* value = UseAtStart(instr->value());
- return AssignEnvironment(new(zone()) LCheckNonSmi(value));
-}
-
-
-LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
- LOperand* temp = TempRegister();
- LCheckInstanceType* result = new(zone()) LCheckInstanceType(value, temp);
- return AssignEnvironment(result);
-}
-
-
-LInstruction* LChunkBuilder::DoCheckPrototypeMaps(HCheckPrototypeMaps* instr) {
- LUnallocated* temp = TempRegister();
- LCheckPrototypeMaps* result = new(zone()) LCheckPrototypeMaps(temp);
- return AssignEnvironment(Define(result, temp));
-}
-
-
-LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
- LOperand* value = UseAtStart(instr->value());
- return AssignEnvironment(new(zone()) LCheckSmi(value));
-}
-
-
-LInstruction* LChunkBuilder::DoCheckSmiOrInt32(HCheckSmiOrInt32* instr) {
- LOperand* value = UseAtStart(instr->value());
- return AssignEnvironment(new(zone()) LCheckSmi(value));
-}
-
-
-LInstruction* LChunkBuilder::DoCheckFunction(HCheckFunction* instr) {
- // If the target is in new space, we'll emit a global cell compare and so
- // want the value in a register. If the target gets promoted before we
- // emit code, we will still get the register but will do an immediate
- // compare instead of the cell compare. This is safe.
- LOperand* value = instr->target_in_new_space()
- ? UseRegisterAtStart(instr->value()) : UseAtStart(instr->value());
- return AssignEnvironment(new(zone()) LCheckFunction(value));
-}
-
-
-LInstruction* LChunkBuilder::DoCheckMaps(HCheckMaps* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
- LCheckMaps* result = new(zone()) LCheckMaps(value);
- return AssignEnvironment(result);
-}
-
-
-LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
- HValue* value = instr->value();
- Representation input_rep = value->representation();
- if (input_rep.IsDouble()) {
- LOperand* reg = UseRegister(value);
- return DefineFixed(new(zone()) LClampDToUint8(reg), eax);
- } else if (input_rep.IsInteger32()) {
- LOperand* reg = UseFixed(value, eax);
- return DefineFixed(new(zone()) LClampIToUint8(reg), eax);
- } else {
- ASSERT(input_rep.IsTagged());
- LOperand* reg = UseFixed(value, eax);
- // Register allocator doesn't (yet) support allocation of double
- // temps. Reserve xmm1 explicitly.
- LOperand* temp = FixedTemp(xmm1);
- LClampTToUint8* result = new(zone()) LClampTToUint8(reg, temp);
- return AssignEnvironment(DefineFixed(result, eax));
- }
-}
-
-
-LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
- LOperand* context = info()->IsStub()
- ? UseFixed(instr->context(), esi)
- : NULL;
- return new(zone()) LReturn(UseFixed(instr->value(), eax), context);
-}
-
-
-LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
- Representation r = instr->representation();
- if (r.IsInteger32()) {
- return DefineAsRegister(new(zone()) LConstantI);
- } else if (r.IsDouble()) {
- double value = instr->DoubleValue();
- LOperand* temp = (BitCast<uint64_t, double>(value) != 0)
- ? TempRegister()
- : NULL;
- return DefineAsRegister(new(zone()) LConstantD(temp));
- } else if (r.IsTagged()) {
- return DefineAsRegister(new(zone()) LConstantT);
- } else {
- UNREACHABLE();
- return NULL;
- }
-}
-
-
-LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) {
- LLoadGlobalCell* result = new(zone()) LLoadGlobalCell;
- return instr->RequiresHoleCheck()
- ? AssignEnvironment(DefineAsRegister(result))
- : DefineAsRegister(result);
-}
-
-
-LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* global_object = UseFixed(instr->global_object(), edx);
- LLoadGlobalGeneric* result =
- new(zone()) LLoadGlobalGeneric(context, global_object);
- return MarkAsCall(DefineFixed(result, eax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoStoreGlobalCell(HStoreGlobalCell* instr) {
- LStoreGlobalCell* result =
- new(zone()) LStoreGlobalCell(UseRegister(instr->value()));
- return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
-}
-
-
-LInstruction* LChunkBuilder::DoStoreGlobalGeneric(HStoreGlobalGeneric* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* global_object = UseFixed(instr->global_object(), edx);
- LOperand* value = UseFixed(instr->value(), eax);
- LStoreGlobalGeneric* result =
- new(zone()) LStoreGlobalGeneric(context, global_object, value);
- return MarkAsCall(result, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
- LOperand* context = UseRegisterAtStart(instr->value());
- LInstruction* result =
- DefineAsRegister(new(zone()) LLoadContextSlot(context));
- return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
-}
-
-
-LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) {
- LOperand* value;
- LOperand* temp;
- LOperand* context = UseRegister(instr->context());
- if (instr->NeedsWriteBarrier()) {
- value = UseTempRegister(instr->value());
- temp = TempRegister();
- } else {
- value = UseRegister(instr->value());
- temp = NULL;
- }
- LInstruction* result = new(zone()) LStoreContextSlot(context, value, temp);
- return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
-}
-
-
-LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
- ASSERT(instr->representation().IsTagged());
- LOperand* obj = UseRegisterAtStart(instr->object());
- return DefineAsRegister(new(zone()) LLoadNamedField(obj));
-}
-
-
-LInstruction* LChunkBuilder::DoLoadNamedFieldPolymorphic(
- HLoadNamedFieldPolymorphic* instr) {
- ASSERT(instr->representation().IsTagged());
- if (instr->need_generic()) {
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* obj = UseFixed(instr->object(), edx);
- LLoadNamedFieldPolymorphic* result =
- new(zone()) LLoadNamedFieldPolymorphic(context, obj);
- return MarkAsCall(DefineFixed(result, eax), instr);
- } else {
- LOperand* context = UseAny(instr->context()); // Not actually used.
- LOperand* obj = UseRegisterAtStart(instr->object());
- LLoadNamedFieldPolymorphic* result =
- new(zone()) LLoadNamedFieldPolymorphic(context, obj);
- return AssignEnvironment(DefineAsRegister(result));
- }
-}
-
-
-LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* object = UseFixed(instr->object(), edx);
- LLoadNamedGeneric* result = new(zone()) LLoadNamedGeneric(context, object);
- return MarkAsCall(DefineFixed(result, eax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoLoadFunctionPrototype(
- HLoadFunctionPrototype* instr) {
- return AssignEnvironment(DefineAsRegister(
- new(zone()) LLoadFunctionPrototype(UseRegister(instr->function()),
- TempRegister())));
-}
-
-
-LInstruction* LChunkBuilder::DoLoadElements(HLoadElements* instr) {
- LOperand* input = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LLoadElements(input));
-}
-
-
-LInstruction* LChunkBuilder::DoLoadExternalArrayPointer(
- HLoadExternalArrayPointer* instr) {
- LOperand* input = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LLoadExternalArrayPointer(input));
-}
-
-
-LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
- ASSERT(instr->key()->representation().IsInteger32() ||
- instr->key()->representation().IsTagged());
- ElementsKind elements_kind = instr->elements_kind();
- bool clobbers_key = ExternalArrayOpRequiresTemp(
- instr->key()->representation(), elements_kind);
- LOperand* key = clobbers_key
- ? UseTempRegister(instr->key())
- : UseRegisterOrConstantAtStart(instr->key());
- LLoadKeyed* result = NULL;
-
- if (!instr->is_external()) {
- LOperand* obj = UseRegisterAtStart(instr->elements());
- result = new(zone()) LLoadKeyed(obj, key);
- } else {
- ASSERT(
- (instr->representation().IsInteger32() &&
- (elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
- (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
- (instr->representation().IsDouble() &&
- ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
- (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
- LOperand* external_pointer = UseRegister(instr->elements());
- result = new(zone()) LLoadKeyed(external_pointer, key);
- }
-
- DefineAsRegister(result);
- bool can_deoptimize = instr->RequiresHoleCheck() ||
- (elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS);
- // An unsigned int array load might overflow and cause a deopt, make sure it
- // has an environment.
- return can_deoptimize ? AssignEnvironment(result) : result;
-}
-
-
-LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* object = UseFixed(instr->object(), edx);
- LOperand* key = UseFixed(instr->key(), ecx);
-
- LLoadKeyedGeneric* result =
- new(zone()) LLoadKeyedGeneric(context, object, key);
- return MarkAsCall(DefineFixed(result, eax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
- if (!instr->is_external()) {
- ASSERT(instr->elements()->representation().IsTagged());
- ASSERT(instr->key()->representation().IsInteger32() ||
- instr->key()->representation().IsTagged());
-
- if (instr->value()->representation().IsDouble()) {
- LOperand* object = UseRegisterAtStart(instr->elements());
- LOperand* val = UseTempRegister(instr->value());
- LOperand* key = UseRegisterOrConstantAtStart(instr->key());
-
- return new(zone()) LStoreKeyed(object, key, val);
- } else {
- ASSERT(instr->value()->representation().IsTagged());
- bool needs_write_barrier = instr->NeedsWriteBarrier();
-
- LOperand* obj = UseRegister(instr->elements());
- LOperand* val = needs_write_barrier
- ? UseTempRegister(instr->value())
- : UseRegisterAtStart(instr->value());
- LOperand* key = needs_write_barrier
- ? UseTempRegister(instr->key())
- : UseRegisterOrConstantAtStart(instr->key());
- return new(zone()) LStoreKeyed(obj, key, val);
- }
- }
-
- ElementsKind elements_kind = instr->elements_kind();
- ASSERT(
- (instr->value()->representation().IsInteger32() &&
- (elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
- (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
- (instr->value()->representation().IsDouble() &&
- ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
- (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
- ASSERT(instr->elements()->representation().IsExternal());
-
- LOperand* external_pointer = UseRegister(instr->elements());
- // Determine if we need a byte register in this case for the value.
- bool val_is_fixed_register =
- elements_kind == EXTERNAL_BYTE_ELEMENTS ||
- elements_kind == EXTERNAL_UNSIGNED_BYTE_ELEMENTS ||
- elements_kind == EXTERNAL_PIXEL_ELEMENTS;
-
- LOperand* val = val_is_fixed_register
- ? UseFixed(instr->value(), eax)
- : UseRegister(instr->value());
- bool clobbers_key = ExternalArrayOpRequiresTemp(
- instr->key()->representation(), elements_kind);
- LOperand* key = clobbers_key
- ? UseTempRegister(instr->key())
- : UseRegisterOrConstantAtStart(instr->key());
- return new(zone()) LStoreKeyed(external_pointer,
- key,
- val);
-}
-
-
-LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* object = UseFixed(instr->object(), edx);
- LOperand* key = UseFixed(instr->key(), ecx);
- LOperand* value = UseFixed(instr->value(), eax);
-
- ASSERT(instr->object()->representation().IsTagged());
- ASSERT(instr->key()->representation().IsTagged());
- ASSERT(instr->value()->representation().IsTagged());
-
- LStoreKeyedGeneric* result =
- new(zone()) LStoreKeyedGeneric(context, object, key, value);
- return MarkAsCall(result, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoTransitionElementsKind(
- HTransitionElementsKind* instr) {
- LOperand* object = UseRegister(instr->object());
- if (IsSimpleMapChangeTransition(instr->from_kind(), instr->to_kind())) {
- LOperand* object = UseRegister(instr->object());
- LOperand* new_map_reg = TempRegister();
- LOperand* temp_reg = TempRegister();
- LTransitionElementsKind* result =
- new(zone()) LTransitionElementsKind(object, NULL,
- new_map_reg, temp_reg);
- return result;
- } else if (FLAG_compiled_transitions) {
- LOperand* context = UseRegister(instr->context());
- LTransitionElementsKind* result =
- new(zone()) LTransitionElementsKind(object, context, NULL, NULL);
- return AssignPointerMap(result);
- } else {
- LOperand* object = UseFixed(instr->object(), eax);
- LOperand* fixed_object_reg = FixedTemp(edx);
- LOperand* new_map_reg = FixedTemp(ebx);
- LTransitionElementsKind* result =
- new(zone()) LTransitionElementsKind(object,
- NULL,
- new_map_reg,
- fixed_object_reg);
- return MarkAsCall(result, instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoTrapAllocationMemento(
- HTrapAllocationMemento* instr) {
- LOperand* object = UseRegister(instr->object());
- LOperand* temp = TempRegister();
- LTrapAllocationMemento* result =
- new(zone()) LTrapAllocationMemento(object, temp);
- return AssignEnvironment(result);
-}
-
-
-LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
- bool needs_write_barrier = instr->NeedsWriteBarrier();
- bool needs_write_barrier_for_map = !instr->transition().is_null() &&
- instr->NeedsWriteBarrierForMap();
-
- LOperand* obj;
- if (needs_write_barrier) {
- obj = instr->is_in_object()
- ? UseRegister(instr->object())
- : UseTempRegister(instr->object());
- } else {
- obj = needs_write_barrier_for_map
- ? UseRegister(instr->object())
- : UseRegisterAtStart(instr->object());
- }
-
- LOperand* val = needs_write_barrier
- ? UseTempRegister(instr->value())
- : UseRegister(instr->value());
-
- // We only need a scratch register if we have a write barrier or we
- // have a store into the properties array (not in-object-property).
- LOperand* temp = (!instr->is_in_object() || needs_write_barrier ||
- needs_write_barrier_for_map) ? TempRegister() : NULL;
-
- // We need a temporary register for write barrier of the map field.
- LOperand* temp_map = needs_write_barrier_for_map ? TempRegister() : NULL;
-
- return new(zone()) LStoreNamedField(obj, val, temp, temp_map);
-}
-
-
-LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* object = UseFixed(instr->object(), edx);
- LOperand* value = UseFixed(instr->value(), eax);
-
- LStoreNamedGeneric* result =
- new(zone()) LStoreNamedGeneric(context, object, value);
- return MarkAsCall(result, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* left = UseOrConstantAtStart(instr->left());
- LOperand* right = UseOrConstantAtStart(instr->right());
- LStringAdd* string_add = new(zone()) LStringAdd(context, left, right);
- return MarkAsCall(DefineFixed(string_add, eax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) {
- LOperand* string = UseTempRegister(instr->string());
- LOperand* index = UseTempRegister(instr->index());
- LOperand* context = UseAny(instr->context());
- LStringCharCodeAt* result =
- new(zone()) LStringCharCodeAt(context, string, index);
- return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
-}
-
-
-LInstruction* LChunkBuilder::DoStringCharFromCode(HStringCharFromCode* instr) {
- LOperand* char_code = UseRegister(instr->value());
- LOperand* context = UseAny(instr->context());
- LStringCharFromCode* result =
- new(zone()) LStringCharFromCode(context, char_code);
- return AssignPointerMap(DefineAsRegister(result));
-}
-
-
-LInstruction* LChunkBuilder::DoStringLength(HStringLength* instr) {
- LOperand* string = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LStringLength(string));
-}
-
-
-LInstruction* LChunkBuilder::DoAllocateObject(HAllocateObject* instr) {
- info()->MarkAsDeferredCalling();
- LOperand* context = UseAny(instr->context());
- LOperand* temp = TempRegister();
- LAllocateObject* result = new(zone()) LAllocateObject(context, temp);
- return AssignPointerMap(DefineAsRegister(result));
-}
-
-
-LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
- info()->MarkAsDeferredCalling();
- LOperand* context = UseAny(instr->context());
- LOperand* size = UseTempRegister(instr->size());
- LOperand* temp = TempRegister();
- LAllocate* result = new(zone()) LAllocate(context, size, temp);
- return AssignPointerMap(DefineAsRegister(result));
-}
-
-
-LInstruction* LChunkBuilder::DoFastLiteral(HFastLiteral* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- return MarkAsCall(
- DefineFixed(new(zone()) LFastLiteral(context), eax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoArrayLiteral(HArrayLiteral* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- return MarkAsCall(
- DefineFixed(new(zone()) LArrayLiteral(context), eax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoObjectLiteral(HObjectLiteral* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- return MarkAsCall(
- DefineFixed(new(zone()) LObjectLiteral(context), eax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoRegExpLiteral(HRegExpLiteral* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- return MarkAsCall(
- DefineFixed(new(zone()) LRegExpLiteral(context), eax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoFunctionLiteral(HFunctionLiteral* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- return MarkAsCall(
- DefineFixed(new(zone()) LFunctionLiteral(context), eax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoDeleteProperty(HDeleteProperty* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* object = UseAtStart(instr->object());
- LOperand* key = UseOrConstantAtStart(instr->key());
- LDeleteProperty* result = new(zone()) LDeleteProperty(context, object, key);
- return MarkAsCall(DefineFixed(result, eax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
- ASSERT(argument_count_ == 0);
- allocator_->MarkAsOsrEntry();
- current_block_->last_environment()->set_ast_id(instr->ast_id());
- return AssignEnvironment(new(zone()) LOsrEntry);
-}
-
-
-LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
- LParameter* result = new(zone()) LParameter;
- if (instr->kind() == HParameter::STACK_PARAMETER) {
- int spill_index = chunk()->GetParameterStackSlot(instr->index());
- return DefineAsSpilled(result, spill_index);
- } else {
- ASSERT(info()->IsStub());
- CodeStubInterfaceDescriptor* descriptor =
- info()->code_stub()->GetInterfaceDescriptor(info()->isolate());
- Register reg = descriptor->register_params_[instr->index()];
- return DefineFixed(result, reg);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
- int spill_index = chunk()->GetNextSpillIndex(false); // Not double-width.
- if (spill_index > LUnallocated::kMaxFixedIndex) {
- Abort("Too many spill slots needed for OSR");
- spill_index = 0;
- }
- return DefineAsSpilled(new(zone()) LUnknownOSRValue, spill_index);
-}
-
-
-LInstruction* LChunkBuilder::DoCallStub(HCallStub* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- argument_count_ -= instr->argument_count();
- LCallStub* result = new(zone()) LCallStub(context);
- return MarkAsCall(DefineFixed(result, eax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) {
- // There are no real uses of the arguments object.
- // arguments.length and element access are supported directly on
- // stack arguments, and any real arguments object use causes a bailout.
- // So this value is never used.
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
- LOperand* args = UseRegister(instr->arguments());
- LOperand* length = UseTempRegister(instr->length());
- LOperand* index = Use(instr->index());
- return DefineAsRegister(new(zone()) LAccessArgumentsAt(args, length, index));
-}
-
-
-LInstruction* LChunkBuilder::DoToFastProperties(HToFastProperties* instr) {
- LOperand* object = UseFixed(instr->value(), eax);
- LToFastProperties* result = new(zone()) LToFastProperties(object);
- return MarkAsCall(DefineFixed(result, eax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* value = UseAtStart(instr->value());
- LTypeof* result = new(zone()) LTypeof(context, value);
- return MarkAsCall(DefineFixed(result, eax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) {
- return new(zone()) LTypeofIsAndBranch(UseTempRegister(instr->value()));
-}
-
-
-LInstruction* LChunkBuilder::DoIsConstructCallAndBranch(
- HIsConstructCallAndBranch* instr) {
- return new(zone()) LIsConstructCallAndBranch(TempRegister());
-}
-
-
-LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
- HEnvironment* env = current_block_->last_environment();
- ASSERT(env != NULL);
-
- env->set_ast_id(instr->ast_id());
-
- env->Drop(instr->pop_count());
- for (int i = instr->values()->length() - 1; i >= 0; --i) {
- HValue* value = instr->values()->at(i);
- if (instr->HasAssignedIndexAt(i)) {
- env->Bind(instr->GetAssignedIndexAt(i), value);
- } else {
- env->Push(value);
- }
- }
-
- // If there is an instruction pending deoptimization environment create a
- // lazy bailout instruction to capture the environment.
- if (!pending_deoptimization_ast_id_.IsNone()) {
- ASSERT(pending_deoptimization_ast_id_ == instr->ast_id());
- LLazyBailout* lazy_bailout = new(zone()) LLazyBailout;
- LInstruction* result = AssignEnvironment(lazy_bailout);
- // Store the lazy deopt environment with the instruction if needed. Right
- // now it is only used for LInstanceOfKnownGlobal.
- instruction_pending_deoptimization_environment_->
- SetDeferredLazyDeoptimizationEnvironment(result->environment());
- instruction_pending_deoptimization_environment_ = NULL;
- pending_deoptimization_ast_id_ = BailoutId::None();
- return result;
- }
-
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) {
- info()->MarkAsDeferredCalling();
- if (instr->is_function_entry()) {
- LOperand* context = UseFixed(instr->context(), esi);
- return MarkAsCall(new(zone()) LStackCheck(context), instr);
- } else {
- ASSERT(instr->is_backwards_branch());
- LOperand* context = UseAny(instr->context());
- return AssignEnvironment(
- AssignPointerMap(new(zone()) LStackCheck(context)));
- }
-}
-
-
-LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
- HEnvironment* outer = current_block_->last_environment();
- HConstant* undefined = graph()->GetConstantUndefined();
- HEnvironment* inner = outer->CopyForInlining(instr->closure(),
- instr->arguments_count(),
- instr->function(),
- undefined,
- instr->inlining_kind(),
- instr->undefined_receiver());
- if (instr->arguments_var() != NULL) {
- inner->Bind(instr->arguments_var(), graph()->GetArgumentsObject());
- }
- inner->set_entry(instr);
- current_block_->UpdateEnvironment(inner);
- chunk_->AddInlinedClosure(instr->closure());
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
- LInstruction* pop = NULL;
-
- HEnvironment* env = current_block_->last_environment();
-
- if (env->entry()->arguments_pushed()) {
- int argument_count = env->arguments_environment()->parameter_count();
- pop = new(zone()) LDrop(argument_count);
- argument_count_ -= argument_count;
- }
-
- HEnvironment* outer = current_block_->last_environment()->
- DiscardInlined(false);
- current_block_->UpdateEnvironment(outer);
- return pop;
-}
-
-
-LInstruction* LChunkBuilder::DoIn(HIn* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* key = UseOrConstantAtStart(instr->key());
- LOperand* object = UseOrConstantAtStart(instr->object());
- LIn* result = new(zone()) LIn(context, key, object);
- return MarkAsCall(DefineFixed(result, eax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoForInPrepareMap(HForInPrepareMap* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* object = UseFixed(instr->enumerable(), eax);
- LForInPrepareMap* result = new(zone()) LForInPrepareMap(context, object);
- return MarkAsCall(DefineFixed(result, eax), instr, CAN_DEOPTIMIZE_EAGERLY);
-}
-
-
-LInstruction* LChunkBuilder::DoForInCacheArray(HForInCacheArray* instr) {
- LOperand* map = UseRegister(instr->map());
- return AssignEnvironment(DefineAsRegister(
- new(zone()) LForInCacheArray(map)));
-}
-
-
-LInstruction* LChunkBuilder::DoCheckMapValue(HCheckMapValue* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
- LOperand* map = UseRegisterAtStart(instr->map());
- return AssignEnvironment(new(zone()) LCheckMapValue(value, map));
-}
-
-
-LInstruction* LChunkBuilder::DoLoadFieldByIndex(HLoadFieldByIndex* instr) {
- LOperand* object = UseRegister(instr->object());
- LOperand* index = UseTempRegister(instr->index());
- return DefineSameAsFirst(new(zone()) LLoadFieldByIndex(object, index));
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_IA32
diff --git a/src/3rdparty/v8/src/ia32/lithium-ia32.h b/src/3rdparty/v8/src/ia32/lithium-ia32.h
deleted file mode 100644
index e6fd655..0000000
--- a/src/3rdparty/v8/src/ia32/lithium-ia32.h
+++ /dev/null
@@ -1,2849 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_IA32_LITHIUM_IA32_H_
-#define V8_IA32_LITHIUM_IA32_H_
-
-#include "hydrogen.h"
-#include "lithium-allocator.h"
-#include "lithium.h"
-#include "safepoint-table.h"
-#include "utils.h"
-
-namespace v8 {
-namespace internal {
-
-// Forward declarations.
-class LCodeGen;
-
-#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V) \
- V(AccessArgumentsAt) \
- V(AddI) \
- V(Allocate) \
- V(AllocateObject) \
- V(ApplyArguments) \
- V(ArgumentsElements) \
- V(ArgumentsLength) \
- V(ArithmeticD) \
- V(ArithmeticT) \
- V(ArrayLiteral) \
- V(BitI) \
- V(BitNotI) \
- V(BoundsCheck) \
- V(Branch) \
- V(CallConstantFunction) \
- V(CallFunction) \
- V(CallGlobal) \
- V(CallKeyed) \
- V(CallKnownGlobal) \
- V(CallNamed) \
- V(CallNew) \
- V(CallNewArray) \
- V(CallRuntime) \
- V(CallStub) \
- V(CheckFunction) \
- V(CheckInstanceType) \
- V(CheckMaps) \
- V(CheckNonSmi) \
- V(CheckPrototypeMaps) \
- V(CheckSmi) \
- V(ClampDToUint8) \
- V(ClampIToUint8) \
- V(ClampTToUint8) \
- V(ClassOfTestAndBranch) \
- V(CmpIDAndBranch) \
- V(CmpObjectEqAndBranch) \
- V(CmpMapAndBranch) \
- V(CmpT) \
- V(CmpConstantEqAndBranch) \
- V(ConstantD) \
- V(ConstantI) \
- V(ConstantT) \
- V(Context) \
- V(DeclareGlobals) \
- V(DeleteProperty) \
- V(Deoptimize) \
- V(DivI) \
- V(DoubleToI) \
- V(DummyUse) \
- V(ElementsKind) \
- V(FastLiteral) \
- V(FixedArrayBaseLength) \
- V(FunctionLiteral) \
- V(GetCachedArrayIndex) \
- V(GlobalObject) \
- V(GlobalReceiver) \
- V(Goto) \
- V(HasCachedArrayIndexAndBranch) \
- V(HasInstanceTypeAndBranch) \
- V(In) \
- V(InstanceOf) \
- V(InstanceOfKnownGlobal) \
- V(InstanceSize) \
- V(InstructionGap) \
- V(Integer32ToDouble) \
- V(Uint32ToDouble) \
- V(InvokeFunction) \
- V(IsConstructCallAndBranch) \
- V(IsNilAndBranch) \
- V(IsObjectAndBranch) \
- V(IsStringAndBranch) \
- V(IsSmiAndBranch) \
- V(IsUndetectableAndBranch) \
- V(JSArrayLength) \
- V(Label) \
- V(LazyBailout) \
- V(LoadContextSlot) \
- V(LoadElements) \
- V(LoadExternalArrayPointer) \
- V(LoadFunctionPrototype) \
- V(LoadGlobalCell) \
- V(LoadGlobalGeneric) \
- V(LoadKeyed) \
- V(LoadKeyedGeneric) \
- V(LoadNamedField) \
- V(LoadNamedFieldPolymorphic) \
- V(LoadNamedGeneric) \
- V(MapEnumLength) \
- V(MathExp) \
- V(MathFloorOfDiv) \
- V(MathMinMax) \
- V(MathPowHalf) \
- V(MathRound) \
- V(ModI) \
- V(MulI) \
- V(NumberTagD) \
- V(NumberTagI) \
- V(NumberTagU) \
- V(NumberUntagD) \
- V(ObjectLiteral) \
- V(OsrEntry) \
- V(OuterContext) \
- V(Parameter) \
- V(Power) \
- V(Random) \
- V(PushArgument) \
- V(RegExpLiteral) \
- V(Return) \
- V(SeqStringSetChar) \
- V(ShiftI) \
- V(SmiTag) \
- V(SmiUntag) \
- V(StackCheck) \
- V(StoreContextSlot) \
- V(StoreGlobalCell) \
- V(StoreGlobalGeneric) \
- V(StoreKeyed) \
- V(StoreKeyedGeneric) \
- V(StoreNamedField) \
- V(StoreNamedGeneric) \
- V(StringAdd) \
- V(StringCharCodeAt) \
- V(StringCharFromCode) \
- V(StringCompareAndBranch) \
- V(StringLength) \
- V(SubI) \
- V(TaggedToI) \
- V(ThisFunction) \
- V(Throw) \
- V(ToFastProperties) \
- V(TransitionElementsKind) \
- V(TrapAllocationMemento) \
- V(Typeof) \
- V(TypeofIsAndBranch) \
- V(UnaryMathOperation) \
- V(UnknownOSRValue) \
- V(ValueOf) \
- V(ForInPrepareMap) \
- V(ForInCacheArray) \
- V(CheckMapValue) \
- V(LoadFieldByIndex) \
- V(DateField) \
- V(WrapReceiver) \
- V(Drop)
-
-
-#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
- virtual Opcode opcode() const { return LInstruction::k##type; } \
- virtual void CompileToNative(LCodeGen* generator); \
- virtual const char* Mnemonic() const { return mnemonic; } \
- static L##type* cast(LInstruction* instr) { \
- ASSERT(instr->Is##type()); \
- return reinterpret_cast<L##type*>(instr); \
- }
-
-
-#define DECLARE_HYDROGEN_ACCESSOR(type) \
- H##type* hydrogen() const { \
- return H##type::cast(hydrogen_value()); \
- }
-
-
-class LInstruction: public ZoneObject {
- public:
- LInstruction()
- : environment_(NULL),
- hydrogen_value_(NULL),
- is_call_(false) { }
- virtual ~LInstruction() { }
-
- virtual void CompileToNative(LCodeGen* generator) = 0;
- virtual const char* Mnemonic() const = 0;
- virtual void PrintTo(StringStream* stream);
- virtual void PrintDataTo(StringStream* stream);
- virtual void PrintOutputOperandTo(StringStream* stream);
-
- enum Opcode {
- // Declare a unique enum value for each instruction.
-#define DECLARE_OPCODE(type) k##type,
- LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_OPCODE)
- kNumberOfInstructions
-#undef DECLARE_OPCODE
- };
-
- virtual Opcode opcode() const = 0;
-
- // Declare non-virtual type testers for all leaf IR classes.
-#define DECLARE_PREDICATE(type) \
- bool Is##type() const { return opcode() == k##type; }
- LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_PREDICATE)
-#undef DECLARE_PREDICATE
-
- // Declare virtual predicates for instructions that don't have
- // an opcode.
- virtual bool IsGap() const { return false; }
-
- virtual bool IsControl() const { return false; }
-
- void set_environment(LEnvironment* env) { environment_ = env; }
- LEnvironment* environment() const { return environment_; }
- bool HasEnvironment() const { return environment_ != NULL; }
-
- void set_pointer_map(LPointerMap* p) { pointer_map_.set(p); }
- LPointerMap* pointer_map() const { return pointer_map_.get(); }
- bool HasPointerMap() const { return pointer_map_.is_set(); }
-
-
- void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; }
- HValue* hydrogen_value() const { return hydrogen_value_; }
-
- virtual void SetDeferredLazyDeoptimizationEnvironment(LEnvironment* env) { }
-
- void MarkAsCall() { is_call_ = true; }
-
- // Interface to the register allocator and iterators.
- bool ClobbersTemps() const { return is_call_; }
- bool ClobbersRegisters() const { return is_call_; }
- virtual bool ClobbersDoubleRegisters() const {
- return is_call_ || !CpuFeatures::IsSupported(SSE2);
- }
-
- virtual bool HasResult() const = 0;
- virtual LOperand* result() = 0;
-
- LOperand* FirstInput() { return InputAt(0); }
- LOperand* Output() { return HasResult() ? result() : NULL; }
-
-#ifdef DEBUG
- void VerifyCall();
-#endif
-
- private:
- // Iterator support.
- friend class InputIterator;
- virtual int InputCount() = 0;
- virtual LOperand* InputAt(int i) = 0;
-
- friend class TempIterator;
- virtual int TempCount() = 0;
- virtual LOperand* TempAt(int i) = 0;
-
- LEnvironment* environment_;
- SetOncePointer<LPointerMap> pointer_map_;
- HValue* hydrogen_value_;
- bool is_call_;
-};
-
-
-// R = number of result operands (0 or 1).
-// I = number of input operands.
-// T = number of temporary operands.
-template<int R, int I, int T>
-class LTemplateInstruction: public LInstruction {
- public:
- // Allow 0 or 1 output operands.
- STATIC_ASSERT(R == 0 || R == 1);
- virtual bool HasResult() const { return R != 0; }
- void set_result(LOperand* operand) { results_[0] = operand; }
- LOperand* result() { return results_[0]; }
-
- protected:
- EmbeddedContainer<LOperand*, R> results_;
- EmbeddedContainer<LOperand*, I> inputs_;
- EmbeddedContainer<LOperand*, T> temps_;
-
- private:
- // Iterator support.
- virtual int InputCount() { return I; }
- virtual LOperand* InputAt(int i) { return inputs_[i]; }
-
- virtual int TempCount() { return T; }
- virtual LOperand* TempAt(int i) { return temps_[i]; }
-};
-
-
-class LGap: public LTemplateInstruction<0, 0, 0> {
- public:
- explicit LGap(HBasicBlock* block) : block_(block) {
- parallel_moves_[BEFORE] = NULL;
- parallel_moves_[START] = NULL;
- parallel_moves_[END] = NULL;
- parallel_moves_[AFTER] = NULL;
- }
-
- // Can't use the DECLARE-macro here because of sub-classes.
- virtual bool IsGap() const { return true; }
- virtual void PrintDataTo(StringStream* stream);
- static LGap* cast(LInstruction* instr) {
- ASSERT(instr->IsGap());
- return reinterpret_cast<LGap*>(instr);
- }
-
- bool IsRedundant() const;
-
- HBasicBlock* block() const { return block_; }
-
- enum InnerPosition {
- BEFORE,
- START,
- END,
- AFTER,
- FIRST_INNER_POSITION = BEFORE,
- LAST_INNER_POSITION = AFTER
- };
-
- LParallelMove* GetOrCreateParallelMove(InnerPosition pos, Zone* zone) {
- if (parallel_moves_[pos] == NULL) {
- parallel_moves_[pos] = new(zone) LParallelMove(zone);
- }
- return parallel_moves_[pos];
- }
-
- LParallelMove* GetParallelMove(InnerPosition pos) {
- return parallel_moves_[pos];
- }
-
- private:
- LParallelMove* parallel_moves_[LAST_INNER_POSITION + 1];
- HBasicBlock* block_;
-};
-
-
-class LInstructionGap: public LGap {
- public:
- explicit LInstructionGap(HBasicBlock* block) : LGap(block) { }
- virtual bool ClobbersDoubleRegisters() const { return false; }
-
- DECLARE_CONCRETE_INSTRUCTION(InstructionGap, "gap")
-};
-
-
-class LGoto: public LTemplateInstruction<0, 0, 0> {
- public:
- explicit LGoto(int block_id) : block_id_(block_id) { }
-
- DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
- virtual void PrintDataTo(StringStream* stream);
- virtual bool IsControl() const { return true; }
-
- int block_id() const { return block_id_; }
-
- private:
- int block_id_;
-};
-
-
-class LLazyBailout: public LTemplateInstruction<0, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(LazyBailout, "lazy-bailout")
-};
-
-
-class LDummyUse: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LDummyUse(LOperand* value) {
- inputs_[0] = value;
- }
- DECLARE_CONCRETE_INSTRUCTION(DummyUse, "dummy-use")
-};
-
-
-class LDeoptimize: public LTemplateInstruction<0, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
-};
-
-
-class LLabel: public LGap {
- public:
- explicit LLabel(HBasicBlock* block)
- : LGap(block), replacement_(NULL) { }
-
- DECLARE_CONCRETE_INSTRUCTION(Label, "label")
-
- virtual void PrintDataTo(StringStream* stream);
-
- int block_id() const { return block()->block_id(); }
- bool is_loop_header() const { return block()->IsLoopHeader(); }
- Label* label() { return &label_; }
- LLabel* replacement() const { return replacement_; }
- void set_replacement(LLabel* label) { replacement_ = label; }
- bool HasReplacement() const { return replacement_ != NULL; }
-
- private:
- Label label_;
- LLabel* replacement_;
-};
-
-
-class LParameter: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter")
-};
-
-
-class LCallStub: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LCallStub(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallStub, "call-stub")
- DECLARE_HYDROGEN_ACCESSOR(CallStub)
-
- TranscendentalCache::Type transcendental_type() {
- return hydrogen()->transcendental_type();
- }
-};
-
-
-class LUnknownOSRValue: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown-osr-value")
-};
-
-
-template<int I, int T>
-class LControlInstruction: public LTemplateInstruction<0, I, T> {
- public:
- virtual bool IsControl() const { return true; }
-
- int SuccessorCount() { return hydrogen()->SuccessorCount(); }
- HBasicBlock* SuccessorAt(int i) { return hydrogen()->SuccessorAt(i); }
- int true_block_id() { return hydrogen()->SuccessorAt(0)->block_id(); }
- int false_block_id() { return hydrogen()->SuccessorAt(1)->block_id(); }
-
- private:
- HControlInstruction* hydrogen() {
- return HControlInstruction::cast(this->hydrogen_value());
- }
-};
-
-
-class LWrapReceiver: public LTemplateInstruction<1, 2, 1> {
- public:
- LWrapReceiver(LOperand* receiver,
- LOperand* function,
- LOperand* temp) {
- inputs_[0] = receiver;
- inputs_[1] = function;
- temps_[0] = temp;
- }
-
- LOperand* receiver() { return inputs_[0]; }
- LOperand* function() { return inputs_[1]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(WrapReceiver, "wrap-receiver")
-};
-
-
-class LApplyArguments: public LTemplateInstruction<1, 4, 0> {
- public:
- LApplyArguments(LOperand* function,
- LOperand* receiver,
- LOperand* length,
- LOperand* elements) {
- inputs_[0] = function;
- inputs_[1] = receiver;
- inputs_[2] = length;
- inputs_[3] = elements;
- }
-
- LOperand* function() { return inputs_[0]; }
- LOperand* receiver() { return inputs_[1]; }
- LOperand* length() { return inputs_[2]; }
- LOperand* elements() { return inputs_[3]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ApplyArguments, "apply-arguments")
-};
-
-
-class LAccessArgumentsAt: public LTemplateInstruction<1, 3, 0> {
- public:
- LAccessArgumentsAt(LOperand* arguments, LOperand* length, LOperand* index) {
- inputs_[0] = arguments;
- inputs_[1] = length;
- inputs_[2] = index;
- }
-
- LOperand* arguments() { return inputs_[0]; }
- LOperand* length() { return inputs_[1]; }
- LOperand* index() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(AccessArgumentsAt, "access-arguments-at")
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LArgumentsLength: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LArgumentsLength(LOperand* elements) {
- inputs_[0] = elements;
- }
-
- LOperand* elements() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ArgumentsLength, "arguments-length")
-};
-
-
-class LArgumentsElements: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments-elements")
- DECLARE_HYDROGEN_ACCESSOR(ArgumentsElements)
-};
-
-
-class LModI: public LTemplateInstruction<1, 2, 1> {
- public:
- LModI(LOperand* left, LOperand* right, LOperand* temp) {
- inputs_[0] = left;
- inputs_[1] = right;
- temps_[0] = temp;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ModI, "mod-i")
- DECLARE_HYDROGEN_ACCESSOR(Mod)
-};
-
-
-class LDivI: public LTemplateInstruction<1, 2, 1> {
- public:
- LDivI(LOperand* left, LOperand* right, LOperand* temp) {
- inputs_[0] = left;
- inputs_[1] = right;
- temps_[0] = temp;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- bool is_flooring() { return hydrogen_value()->IsMathFloorOfDiv(); }
-
- DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i")
- DECLARE_HYDROGEN_ACCESSOR(Div)
-};
-
-
-class LMathFloorOfDiv: public LTemplateInstruction<1, 2, 1> {
- public:
- LMathFloorOfDiv(LOperand* left,
- LOperand* right,
- LOperand* temp = NULL) {
- inputs_[0] = left;
- inputs_[1] = right;
- temps_[0] = temp;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathFloorOfDiv, "math-floor-of-div")
- DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
-};
-
-
-class LMulI: public LTemplateInstruction<1, 2, 1> {
- public:
- LMulI(LOperand* left, LOperand* right, LOperand* temp) {
- inputs_[0] = left;
- inputs_[1] = right;
- temps_[0] = temp;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MulI, "mul-i")
- DECLARE_HYDROGEN_ACCESSOR(Mul)
-};
-
-
-class LCmpIDAndBranch: public LControlInstruction<2, 0> {
- public:
- LCmpIDAndBranch(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpIDAndBranch, "cmp-id-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(CompareIDAndBranch)
-
- Token::Value op() const { return hydrogen()->token(); }
- bool is_double() const {
- return hydrogen()->representation().IsDouble();
- }
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LUnaryMathOperation: public LTemplateInstruction<1, 2, 0> {
- public:
- LUnaryMathOperation(LOperand* context, LOperand* value) {
- inputs_[1] = context;
- inputs_[0] = value;
- }
-
- LOperand* context() { return inputs_[1]; }
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(UnaryMathOperation, "unary-math-operation")
- DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
-
- virtual void PrintDataTo(StringStream* stream);
- BuiltinFunctionId op() const { return hydrogen()->op(); }
-};
-
-
-class LMathExp: public LTemplateInstruction<1, 1, 2> {
- public:
- LMathExp(LOperand* value,
- LOperand* temp1,
- LOperand* temp2) {
- inputs_[0] = value;
- temps_[0] = temp1;
- temps_[1] = temp2;
- ExternalReference::InitializeMathExpData();
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp1() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathExp, "math-exp")
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LMathPowHalf: public LTemplateInstruction<1, 2, 1> {
- public:
- LMathPowHalf(LOperand* context, LOperand* value, LOperand* temp) {
- inputs_[1] = context;
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* context() { return inputs_[1]; }
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathPowHalf, "math-pow-half")
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LMathRound: public LTemplateInstruction<1, 2, 1> {
- public:
- LMathRound(LOperand* context, LOperand* value, LOperand* temp) {
- inputs_[1] = context;
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* context() { return inputs_[1]; }
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathRound, "math-round")
- DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LCmpObjectEqAndBranch: public LControlInstruction<2, 0> {
- public:
- LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpObjectEqAndBranch,
- "cmp-object-eq-and-branch")
-};
-
-
-class LCmpConstantEqAndBranch: public LControlInstruction<1, 0> {
- public:
- explicit LCmpConstantEqAndBranch(LOperand* left) {
- inputs_[0] = left;
- }
-
- LOperand* left() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpConstantEqAndBranch,
- "cmp-constant-eq-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(CompareConstantEqAndBranch)
-};
-
-
-class LIsNilAndBranch: public LControlInstruction<1, 1> {
- public:
- LIsNilAndBranch(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(IsNilAndBranch, "is-nil-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(IsNilAndBranch)
-
- EqualityKind kind() const { return hydrogen()->kind(); }
- NilValue nil() const { return hydrogen()->nil(); }
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LIsObjectAndBranch: public LControlInstruction<1, 1> {
- public:
- LIsObjectAndBranch(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch")
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LIsStringAndBranch: public LControlInstruction<1, 1> {
- public:
- LIsStringAndBranch(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch, "is-string-and-branch")
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LIsSmiAndBranch: public LControlInstruction<1, 0> {
- public:
- explicit LIsSmiAndBranch(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(IsSmiAndBranch)
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LIsUndetectableAndBranch: public LControlInstruction<1, 1> {
- public:
- LIsUndetectableAndBranch(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(IsUndetectableAndBranch,
- "is-undetectable-and-branch")
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LStringCompareAndBranch: public LControlInstruction<3, 0> {
- public:
- LStringCompareAndBranch(LOperand* context, LOperand* left, LOperand* right) {
- inputs_[0] = context;
- inputs_[1] = left;
- inputs_[2] = right;
- }
-
- LOperand* left() { return inputs_[1]; }
- LOperand* right() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StringCompareAndBranch,
- "string-compare-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(StringCompareAndBranch)
-
- virtual void PrintDataTo(StringStream* stream);
-
- Token::Value op() const { return hydrogen()->token(); }
-};
-
-
-class LHasInstanceTypeAndBranch: public LControlInstruction<1, 1> {
- public:
- LHasInstanceTypeAndBranch(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(HasInstanceTypeAndBranch,
- "has-instance-type-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(HasInstanceTypeAndBranch)
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LGetCachedArrayIndex: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LGetCachedArrayIndex(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(GetCachedArrayIndex, "get-cached-array-index")
- DECLARE_HYDROGEN_ACCESSOR(GetCachedArrayIndex)
-};
-
-
-class LHasCachedArrayIndexAndBranch: public LControlInstruction<1, 0> {
- public:
- explicit LHasCachedArrayIndexAndBranch(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndexAndBranch,
- "has-cached-array-index-and-branch")
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LIsConstructCallAndBranch: public LControlInstruction<0, 1> {
- public:
- explicit LIsConstructCallAndBranch(LOperand* temp) {
- temps_[0] = temp;
- }
-
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(IsConstructCallAndBranch,
- "is-construct-call-and-branch")
-};
-
-
-class LClassOfTestAndBranch: public LControlInstruction<1, 2> {
- public:
- LClassOfTestAndBranch(LOperand* value, LOperand* temp, LOperand* temp2) {
- inputs_[0] = value;
- temps_[0] = temp;
- temps_[1] = temp2;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch,
- "class-of-test-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(ClassOfTestAndBranch)
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LCmpT: public LTemplateInstruction<1, 3, 0> {
- public:
- LCmpT(LOperand* context, LOperand* left, LOperand* right) {
- inputs_[0] = context;
- inputs_[1] = left;
- inputs_[2] = right;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
- DECLARE_HYDROGEN_ACCESSOR(CompareGeneric)
-
- Token::Value op() const { return hydrogen()->token(); }
-};
-
-
-class LInstanceOf: public LTemplateInstruction<1, 3, 0> {
- public:
- LInstanceOf(LOperand* context, LOperand* left, LOperand* right) {
- inputs_[0] = context;
- inputs_[1] = left;
- inputs_[2] = right;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(InstanceOf, "instance-of")
-};
-
-
-class LInstanceOfKnownGlobal: public LTemplateInstruction<1, 2, 1> {
- public:
- LInstanceOfKnownGlobal(LOperand* context, LOperand* value, LOperand* temp) {
- inputs_[0] = context;
- inputs_[1] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[1]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(InstanceOfKnownGlobal,
- "instance-of-known-global")
- DECLARE_HYDROGEN_ACCESSOR(InstanceOfKnownGlobal)
-
- Handle<JSFunction> function() const { return hydrogen()->function(); }
- LEnvironment* GetDeferredLazyDeoptimizationEnvironment() {
- return lazy_deopt_env_;
- }
- virtual void SetDeferredLazyDeoptimizationEnvironment(LEnvironment* env) {
- lazy_deopt_env_ = env;
- }
-
- private:
- LEnvironment* lazy_deopt_env_;
-};
-
-
-class LInstanceSize: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LInstanceSize(LOperand* object) {
- inputs_[0] = object;
- }
-
- LOperand* object() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(InstanceSize, "instance-size")
- DECLARE_HYDROGEN_ACCESSOR(InstanceSize)
-};
-
-
-class LBoundsCheck: public LTemplateInstruction<0, 2, 0> {
- public:
- LBoundsCheck(LOperand* index, LOperand* length) {
- inputs_[0] = index;
- inputs_[1] = length;
- }
-
- LOperand* index() { return inputs_[0]; }
- LOperand* length() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(BoundsCheck, "bounds-check")
- DECLARE_HYDROGEN_ACCESSOR(BoundsCheck)
-};
-
-
-class LBitI: public LTemplateInstruction<1, 2, 0> {
- public:
- LBitI(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(BitI, "bit-i")
- DECLARE_HYDROGEN_ACCESSOR(Bitwise)
-
- Token::Value op() const { return hydrogen()->op(); }
-};
-
-
-class LShiftI: public LTemplateInstruction<1, 2, 0> {
- public:
- LShiftI(Token::Value op, LOperand* left, LOperand* right, bool can_deopt)
- : op_(op), can_deopt_(can_deopt) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ShiftI, "shift-i")
-
- Token::Value op() const { return op_; }
- bool can_deopt() const { return can_deopt_; }
-
- private:
- Token::Value op_;
- bool can_deopt_;
-};
-
-
-class LSubI: public LTemplateInstruction<1, 2, 0> {
- public:
- LSubI(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(SubI, "sub-i")
- DECLARE_HYDROGEN_ACCESSOR(Sub)
-};
-
-
-class LConstantI: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ConstantI, "constant-i")
- DECLARE_HYDROGEN_ACCESSOR(Constant)
-
- int32_t value() const { return hydrogen()->Integer32Value(); }
-};
-
-
-class LConstantD: public LTemplateInstruction<1, 0, 1> {
- public:
- explicit LConstantD(LOperand* temp) {
- temps_[0] = temp;
- }
-
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ConstantD, "constant-d")
- DECLARE_HYDROGEN_ACCESSOR(Constant)
-
- double value() const { return hydrogen()->DoubleValue(); }
-};
-
-
-class LConstantT: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ConstantT, "constant-t")
- DECLARE_HYDROGEN_ACCESSOR(Constant)
-
- Handle<Object> value() const { return hydrogen()->handle(); }
-};
-
-
-class LBranch: public LControlInstruction<1, 1> {
- public:
- LBranch(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
- DECLARE_HYDROGEN_ACCESSOR(Branch)
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LCmpMapAndBranch: public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LCmpMapAndBranch(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(CompareMap)
-
- virtual bool IsControl() const { return true; }
-
- Handle<Map> map() const { return hydrogen()->map(); }
- int true_block_id() const {
- return hydrogen()->FirstSuccessor()->block_id();
- }
- int false_block_id() const {
- return hydrogen()->SecondSuccessor()->block_id();
- }
-};
-
-
-class LJSArrayLength: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LJSArrayLength(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(JSArrayLength, "js-array-length")
- DECLARE_HYDROGEN_ACCESSOR(JSArrayLength)
-};
-
-
-class LFixedArrayBaseLength: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LFixedArrayBaseLength(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(FixedArrayBaseLength,
- "fixed-array-base-length")
- DECLARE_HYDROGEN_ACCESSOR(FixedArrayBaseLength)
-};
-
-
-class LMapEnumLength: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LMapEnumLength(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MapEnumLength, "map-enum-length")
-};
-
-
-class LElementsKind: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LElementsKind(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ElementsKind, "elements-kind")
- DECLARE_HYDROGEN_ACCESSOR(ElementsKind)
-};
-
-
-class LValueOf: public LTemplateInstruction<1, 1, 1> {
- public:
- LValueOf(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ValueOf, "value-of")
- DECLARE_HYDROGEN_ACCESSOR(ValueOf)
-};
-
-
-class LDateField: public LTemplateInstruction<1, 1, 1> {
- public:
- LDateField(LOperand* date, LOperand* temp, Smi* index)
- : index_(index) {
- inputs_[0] = date;
- temps_[0] = temp;
- }
-
- LOperand* date() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(DateField, "date-field")
- DECLARE_HYDROGEN_ACCESSOR(DateField)
-
- Smi* index() const { return index_; }
-
- private:
- Smi* index_;
-};
-
-
-class LSeqStringSetChar: public LTemplateInstruction<1, 3, 0> {
- public:
- LSeqStringSetChar(String::Encoding encoding,
- LOperand* string,
- LOperand* index,
- LOperand* value) : encoding_(encoding) {
- inputs_[0] = string;
- inputs_[1] = index;
- inputs_[2] = value;
- }
-
- String::Encoding encoding() { return encoding_; }
- LOperand* string() { return inputs_[0]; }
- LOperand* index() { return inputs_[1]; }
- LOperand* value() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(SeqStringSetChar, "seq-string-set-char")
- DECLARE_HYDROGEN_ACCESSOR(SeqStringSetChar)
-
- private:
- String::Encoding encoding_;
-};
-
-
-class LThrow: public LTemplateInstruction<0, 2, 0> {
- public:
- LThrow(LOperand* context, LOperand* value) {
- inputs_[0] = context;
- inputs_[1] = value;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Throw, "throw")
-};
-
-
-class LBitNotI: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LBitNotI(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(BitNotI, "bit-not-i")
-};
-
-
-class LAddI: public LTemplateInstruction<1, 2, 0> {
- public:
- LAddI(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(AddI, "add-i")
- DECLARE_HYDROGEN_ACCESSOR(Add)
-};
-
-
-class LMathMinMax: public LTemplateInstruction<1, 2, 0> {
- public:
- LMathMinMax(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathMinMax, "min-max")
- DECLARE_HYDROGEN_ACCESSOR(MathMinMax)
-};
-
-
-class LPower: public LTemplateInstruction<1, 2, 0> {
- public:
- LPower(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Power, "power")
- DECLARE_HYDROGEN_ACCESSOR(Power)
-};
-
-
-class LRandom: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LRandom(LOperand* global_object) {
- inputs_[0] = global_object;
- }
-
- LOperand* global_object() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Random, "random")
- DECLARE_HYDROGEN_ACCESSOR(Random)
-};
-
-
-class LArithmeticD: public LTemplateInstruction<1, 2, 0> {
- public:
- LArithmeticD(Token::Value op, LOperand* left, LOperand* right)
- : op_(op) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- Token::Value op() const { return op_; }
-
- virtual Opcode opcode() const { return LInstruction::kArithmeticD; }
- virtual void CompileToNative(LCodeGen* generator);
- virtual const char* Mnemonic() const;
-
- private:
- Token::Value op_;
-};
-
-
-class LArithmeticT: public LTemplateInstruction<1, 3, 0> {
- public:
- LArithmeticT(Token::Value op,
- LOperand* context,
- LOperand* left,
- LOperand* right)
- : op_(op) {
- inputs_[0] = context;
- inputs_[1] = left;
- inputs_[2] = right;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* left() { return inputs_[1]; }
- LOperand* right() { return inputs_[2]; }
-
- virtual Opcode opcode() const { return LInstruction::kArithmeticT; }
- virtual void CompileToNative(LCodeGen* generator);
- virtual const char* Mnemonic() const;
-
- Token::Value op() const { return op_; }
-
- private:
- Token::Value op_;
-};
-
-
-class LReturn: public LTemplateInstruction<0, 2, 0> {
- public:
- explicit LReturn(LOperand* value, LOperand* context) {
- inputs_[0] = value;
- inputs_[1] = context;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(Return, "return")
-};
-
-
-class LLoadNamedField: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LLoadNamedField(LOperand* object) {
- inputs_[0] = object;
- }
-
- LOperand* object() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field")
- DECLARE_HYDROGEN_ACCESSOR(LoadNamedField)
-};
-
-
-class LLoadNamedFieldPolymorphic: public LTemplateInstruction<1, 2, 0> {
- public:
- LLoadNamedFieldPolymorphic(LOperand* context, LOperand* object) {
- inputs_[0] = context;
- inputs_[1] = object;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* object() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field-polymorphic")
- DECLARE_HYDROGEN_ACCESSOR(LoadNamedFieldPolymorphic)
-};
-
-
-class LLoadNamedGeneric: public LTemplateInstruction<1, 2, 0> {
- public:
- LLoadNamedGeneric(LOperand* context, LOperand* object) {
- inputs_[0] = context;
- inputs_[1] = object;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* object() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric, "load-named-generic")
- DECLARE_HYDROGEN_ACCESSOR(LoadNamedGeneric)
-
- Handle<Object> name() const { return hydrogen()->name(); }
-};
-
-
-class LLoadFunctionPrototype: public LTemplateInstruction<1, 1, 1> {
- public:
- LLoadFunctionPrototype(LOperand* function, LOperand* temp) {
- inputs_[0] = function;
- temps_[0] = temp;
- }
-
- LOperand* function() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadFunctionPrototype, "load-function-prototype")
- DECLARE_HYDROGEN_ACCESSOR(LoadFunctionPrototype)
-};
-
-
-class LLoadElements: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LLoadElements(LOperand* object) {
- inputs_[0] = object;
- }
-
- LOperand* object() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadElements, "load-elements")
-};
-
-
-class LLoadExternalArrayPointer: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LLoadExternalArrayPointer(LOperand* object) {
- inputs_[0] = object;
- }
-
- LOperand* object() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadExternalArrayPointer,
- "load-external-array-pointer")
-};
-
-
-class LLoadKeyed: public LTemplateInstruction<1, 2, 0> {
- public:
- LLoadKeyed(LOperand* elements, LOperand* key) {
- inputs_[0] = elements;
- inputs_[1] = key;
- }
- LOperand* elements() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
- ElementsKind elements_kind() const {
- return hydrogen()->elements_kind();
- }
- bool is_external() const {
- return hydrogen()->is_external();
- }
-
- virtual bool ClobbersDoubleRegisters() const {
- return !CpuFeatures::IsSupported(SSE2) &&
- !IsDoubleOrFloatElementsKind(hydrogen()->elements_kind());
- }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadKeyed, "load-keyed")
- DECLARE_HYDROGEN_ACCESSOR(LoadKeyed)
-
- virtual void PrintDataTo(StringStream* stream);
- uint32_t additional_index() const { return hydrogen()->index_offset(); }
- bool key_is_smi() {
- return hydrogen()->key()->representation().IsTagged();
- }
-};
-
-
-inline static bool ExternalArrayOpRequiresTemp(
- Representation key_representation,
- ElementsKind elements_kind) {
- // Operations that require the key to be divided by two to be converted into
- // an index cannot fold the scale operation into a load and need an extra
- // temp register to do the work.
- return key_representation.IsTagged() &&
- (elements_kind == EXTERNAL_BYTE_ELEMENTS ||
- elements_kind == EXTERNAL_UNSIGNED_BYTE_ELEMENTS ||
- elements_kind == EXTERNAL_PIXEL_ELEMENTS);
-}
-
-
-class LLoadKeyedGeneric: public LTemplateInstruction<1, 3, 0> {
- public:
- LLoadKeyedGeneric(LOperand* context, LOperand* obj, LOperand* key) {
- inputs_[0] = context;
- inputs_[1] = obj;
- inputs_[2] = key;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* object() { return inputs_[1]; }
- LOperand* key() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load-keyed-generic")
-};
-
-
-class LLoadGlobalCell: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(LoadGlobalCell, "load-global-cell")
- DECLARE_HYDROGEN_ACCESSOR(LoadGlobalCell)
-};
-
-
-class LLoadGlobalGeneric: public LTemplateInstruction<1, 2, 0> {
- public:
- LLoadGlobalGeneric(LOperand* context, LOperand* global_object) {
- inputs_[0] = context;
- inputs_[1] = global_object;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* global_object() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic")
- DECLARE_HYDROGEN_ACCESSOR(LoadGlobalGeneric)
-
- Handle<Object> name() const { return hydrogen()->name(); }
- bool for_typeof() const { return hydrogen()->for_typeof(); }
-};
-
-
-class LStoreGlobalCell: public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LStoreGlobalCell(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreGlobalCell, "store-global-cell")
- DECLARE_HYDROGEN_ACCESSOR(StoreGlobalCell)
-};
-
-
-class LStoreGlobalGeneric: public LTemplateInstruction<0, 3, 0> {
- public:
- LStoreGlobalGeneric(LOperand* context,
- LOperand* global_object,
- LOperand* value) {
- inputs_[0] = context;
- inputs_[1] = global_object;
- inputs_[2] = value;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* global_object() { return inputs_[1]; }
- LOperand* value() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreGlobalGeneric, "store-global-generic")
- DECLARE_HYDROGEN_ACCESSOR(StoreGlobalGeneric)
-
- Handle<Object> name() const { return hydrogen()->name(); }
- StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
-};
-
-
-class LLoadContextSlot: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LLoadContextSlot(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot, "load-context-slot")
- DECLARE_HYDROGEN_ACCESSOR(LoadContextSlot)
-
- int slot_index() { return hydrogen()->slot_index(); }
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LStoreContextSlot: public LTemplateInstruction<0, 2, 1> {
- public:
- LStoreContextSlot(LOperand* context, LOperand* value, LOperand* temp) {
- inputs_[0] = context;
- inputs_[1] = value;
- temps_[0] = temp;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreContextSlot, "store-context-slot")
- DECLARE_HYDROGEN_ACCESSOR(StoreContextSlot)
-
- int slot_index() { return hydrogen()->slot_index(); }
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LPushArgument: public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LPushArgument(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(PushArgument, "push-argument")
-};
-
-
-class LDrop: public LTemplateInstruction<0, 0, 0> {
- public:
- explicit LDrop(int count) : count_(count) { }
-
- int count() const { return count_; }
-
- DECLARE_CONCRETE_INSTRUCTION(Drop, "drop")
-
- private:
- int count_;
-};
-
-
-class LThisFunction: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function")
- DECLARE_HYDROGEN_ACCESSOR(ThisFunction)
-};
-
-
-class LContext: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(Context, "context")
- DECLARE_HYDROGEN_ACCESSOR(Context)
-};
-
-
-class LOuterContext: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LOuterContext(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(OuterContext, "outer-context")
-};
-
-
-class LDeclareGlobals: public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LDeclareGlobals(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(DeclareGlobals, "declare-globals")
- DECLARE_HYDROGEN_ACCESSOR(DeclareGlobals)
-};
-
-
-class LGlobalObject: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LGlobalObject(LOperand* context, bool qml_global) {
- inputs_[0] = context;
- qml_global_ = qml_global;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(GlobalObject, "global-object")
-
- bool qml_global() { return qml_global_; }
-
- private:
- bool qml_global_;
-};
-
-
-class LGlobalReceiver: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LGlobalReceiver(LOperand* global_object) {
- inputs_[0] = global_object;
- }
-
- LOperand* global() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(GlobalReceiver, "global-receiver")
-};
-
-
-class LCallConstantFunction: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(CallConstantFunction, "call-constant-function")
- DECLARE_HYDROGEN_ACCESSOR(CallConstantFunction)
-
- virtual void PrintDataTo(StringStream* stream);
-
- Handle<JSFunction> function() { return hydrogen()->function(); }
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
-class LInvokeFunction: public LTemplateInstruction<1, 2, 0> {
- public:
- LInvokeFunction(LOperand* context, LOperand* function) {
- inputs_[0] = context;
- inputs_[1] = function;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* function() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
- DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
-
- virtual void PrintDataTo(StringStream* stream);
-
- int arity() const { return hydrogen()->argument_count() - 1; }
- Handle<JSFunction> known_function() { return hydrogen()->known_function(); }
-};
-
-
-class LCallKeyed: public LTemplateInstruction<1, 2, 0> {
- public:
- LCallKeyed(LOperand* context, LOperand* key) {
- inputs_[0] = context;
- inputs_[1] = key;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallKeyed, "call-keyed")
- DECLARE_HYDROGEN_ACCESSOR(CallKeyed)
-
- virtual void PrintDataTo(StringStream* stream);
-
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
-class LCallNamed: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LCallNamed(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallNamed, "call-named")
- DECLARE_HYDROGEN_ACCESSOR(CallNamed)
-
- virtual void PrintDataTo(StringStream* stream);
-
- Handle<String> name() const { return hydrogen()->name(); }
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
-class LCallFunction: public LTemplateInstruction<1, 2, 0> {
- public:
- explicit LCallFunction(LOperand* context, LOperand* function) {
- inputs_[0] = context;
- inputs_[1] = function;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* function() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function")
- DECLARE_HYDROGEN_ACCESSOR(CallFunction)
-
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
-class LCallGlobal: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LCallGlobal(LOperand* context, bool qml_global)
- : qml_global_(qml_global) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallGlobal, "call-global")
- DECLARE_HYDROGEN_ACCESSOR(CallGlobal)
-
- virtual void PrintDataTo(StringStream* stream);
-
- Handle<String> name() const {return hydrogen()->name(); }
- int arity() const { return hydrogen()->argument_count() - 1; }
-
- bool qml_global() { return qml_global_; }
- private:
- bool qml_global_;
-};
-
-
-class LCallKnownGlobal: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(CallKnownGlobal, "call-known-global")
- DECLARE_HYDROGEN_ACCESSOR(CallKnownGlobal)
-
- virtual void PrintDataTo(StringStream* stream);
-
- Handle<JSFunction> target() const { return hydrogen()->target(); }
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
-class LCallNew: public LTemplateInstruction<1, 2, 0> {
- public:
- LCallNew(LOperand* context, LOperand* constructor) {
- inputs_[0] = context;
- inputs_[1] = constructor;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* constructor() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallNew, "call-new")
- DECLARE_HYDROGEN_ACCESSOR(CallNew)
-
- virtual void PrintDataTo(StringStream* stream);
-
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
-class LCallNewArray: public LTemplateInstruction<1, 2, 0> {
- public:
- LCallNewArray(LOperand* context, LOperand* constructor) {
- inputs_[0] = context;
- inputs_[1] = constructor;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* constructor() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallNewArray, "call-new-array")
- DECLARE_HYDROGEN_ACCESSOR(CallNewArray)
-
- virtual void PrintDataTo(StringStream* stream);
-
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
-class LCallRuntime: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LCallRuntime(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
- DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
-
- const Runtime::Function* function() const { return hydrogen()->function(); }
- int arity() const { return hydrogen()->argument_count(); }
-};
-
-
-class LInteger32ToDouble: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LInteger32ToDouble(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Integer32ToDouble, "int32-to-double")
-};
-
-
-class LUint32ToDouble: public LTemplateInstruction<1, 1, 1> {
- public:
- explicit LUint32ToDouble(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Uint32ToDouble, "uint32-to-double")
-};
-
-
-class LNumberTagI: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LNumberTagI(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(NumberTagI, "number-tag-i")
-};
-
-
-class LNumberTagU: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LNumberTagU(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(NumberTagU, "number-tag-u")
-};
-
-
-class LNumberTagD: public LTemplateInstruction<1, 1, 1> {
- public:
- LNumberTagD(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(NumberTagD, "number-tag-d")
- DECLARE_HYDROGEN_ACCESSOR(Change)
-};
-
-
-// Sometimes truncating conversion from a tagged value to an int32.
-class LDoubleToI: public LTemplateInstruction<1, 1, 1> {
- public:
- LDoubleToI(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(DoubleToI, "double-to-i")
- DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
-
- bool truncating() { return hydrogen()->CanTruncateToInt32(); }
-};
-
-
-// Truncating conversion from a tagged value to an int32.
-class LTaggedToI: public LTemplateInstruction<1, 1, 1> {
- public:
- LTaggedToI(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i")
- DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
-
- bool truncating() { return hydrogen()->CanTruncateToInt32(); }
-};
-
-
-class LSmiTag: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LSmiTag(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(SmiTag, "smi-tag")
-};
-
-
-class LNumberUntagD: public LTemplateInstruction<1, 1, 1> {
- public:
- explicit LNumberUntagD(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(NumberUntagD, "double-untag")
- DECLARE_HYDROGEN_ACCESSOR(Change);
-};
-
-
-class LSmiUntag: public LTemplateInstruction<1, 1, 0> {
- public:
- LSmiUntag(LOperand* value, bool needs_check)
- : needs_check_(needs_check) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(SmiUntag, "smi-untag")
-
- bool needs_check() const { return needs_check_; }
-
- private:
- bool needs_check_;
-};
-
-
-class LStoreNamedField: public LTemplateInstruction<0, 2, 2> {
- public:
- LStoreNamedField(LOperand* obj,
- LOperand* val,
- LOperand* temp,
- LOperand* temp_map) {
- inputs_[0] = obj;
- inputs_[1] = val;
- temps_[0] = temp;
- temps_[1] = temp_map;
- }
-
- LOperand* object() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
- LOperand* temp() { return temps_[0]; }
- LOperand* temp_map() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
- DECLARE_HYDROGEN_ACCESSOR(StoreNamedField)
-
- virtual void PrintDataTo(StringStream* stream);
-
- Handle<Object> name() const { return hydrogen()->name(); }
- bool is_in_object() { return hydrogen()->is_in_object(); }
- int offset() { return hydrogen()->offset(); }
- Handle<Map> transition() const { return hydrogen()->transition(); }
-};
-
-
-class LStoreNamedGeneric: public LTemplateInstruction<0, 3, 0> {
- public:
- LStoreNamedGeneric(LOperand* context, LOperand* object, LOperand* value) {
- inputs_[0] = context;
- inputs_[1] = object;
- inputs_[2] = value;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* object() { return inputs_[1]; }
- LOperand* value() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
- DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
-
- virtual void PrintDataTo(StringStream* stream);
- Handle<Object> name() const { return hydrogen()->name(); }
- StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
-};
-
-
-class LStoreKeyed: public LTemplateInstruction<0, 3, 0> {
- public:
- LStoreKeyed(LOperand* obj, LOperand* key, LOperand* val) {
- inputs_[0] = obj;
- inputs_[1] = key;
- inputs_[2] = val;
- }
-
- bool is_external() const { return hydrogen()->is_external(); }
- LOperand* elements() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
- LOperand* value() { return inputs_[2]; }
- ElementsKind elements_kind() const {
- return hydrogen()->elements_kind();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreKeyed, "store-keyed")
- DECLARE_HYDROGEN_ACCESSOR(StoreKeyed)
-
- virtual void PrintDataTo(StringStream* stream);
- uint32_t additional_index() const { return hydrogen()->index_offset(); }
- bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); }
-};
-
-
-class LStoreKeyedGeneric: public LTemplateInstruction<0, 4, 0> {
- public:
- LStoreKeyedGeneric(LOperand* context,
- LOperand* object,
- LOperand* key,
- LOperand* value) {
- inputs_[0] = context;
- inputs_[1] = object;
- inputs_[2] = key;
- inputs_[3] = value;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* object() { return inputs_[1]; }
- LOperand* key() { return inputs_[2]; }
- LOperand* value() { return inputs_[3]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
- DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
-
- virtual void PrintDataTo(StringStream* stream);
-
- StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
-};
-
-
-class LTransitionElementsKind: public LTemplateInstruction<0, 2, 2> {
- public:
- LTransitionElementsKind(LOperand* object,
- LOperand* context,
- LOperand* new_map_temp,
- LOperand* temp) {
- inputs_[0] = object;
- inputs_[1] = context;
- temps_[0] = new_map_temp;
- temps_[1] = temp;
- }
-
- LOperand* context() { return inputs_[1]; }
- LOperand* object() { return inputs_[0]; }
- LOperand* new_map_temp() { return temps_[0]; }
- LOperand* temp() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(TransitionElementsKind,
- "transition-elements-kind")
- DECLARE_HYDROGEN_ACCESSOR(TransitionElementsKind)
-
- virtual void PrintDataTo(StringStream* stream);
-
- Handle<Map> original_map() { return hydrogen()->original_map(); }
- Handle<Map> transitioned_map() { return hydrogen()->transitioned_map(); }
- ElementsKind from_kind() { return hydrogen()->from_kind(); }
- ElementsKind to_kind() { return hydrogen()->to_kind(); }
-};
-
-
-class LTrapAllocationMemento : public LTemplateInstruction<0, 1, 1> {
- public:
- LTrapAllocationMemento(LOperand* object,
- LOperand* temp) {
- inputs_[0] = object;
- temps_[0] = temp;
- }
-
- LOperand* object() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(TrapAllocationMemento,
- "trap-allocation-memento")
-};
-
-
-class LStringAdd: public LTemplateInstruction<1, 3, 0> {
- public:
- LStringAdd(LOperand* context, LOperand* left, LOperand* right) {
- inputs_[0] = context;
- inputs_[1] = left;
- inputs_[2] = right;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* left() { return inputs_[1]; }
- LOperand* right() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StringAdd, "string-add")
- DECLARE_HYDROGEN_ACCESSOR(StringAdd)
-};
-
-
-class LStringCharCodeAt: public LTemplateInstruction<1, 3, 0> {
- public:
- LStringCharCodeAt(LOperand* context, LOperand* string, LOperand* index) {
- inputs_[0] = context;
- inputs_[1] = string;
- inputs_[2] = index;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* string() { return inputs_[1]; }
- LOperand* index() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt, "string-char-code-at")
- DECLARE_HYDROGEN_ACCESSOR(StringCharCodeAt)
-};
-
-
-class LStringCharFromCode: public LTemplateInstruction<1, 2, 0> {
- public:
- LStringCharFromCode(LOperand* context, LOperand* char_code) {
- inputs_[0] = context;
- inputs_[1] = char_code;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* char_code() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StringCharFromCode, "string-char-from-code")
- DECLARE_HYDROGEN_ACCESSOR(StringCharFromCode)
-};
-
-
-class LStringLength: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LStringLength(LOperand* string) {
- inputs_[0] = string;
- }
-
- LOperand* string() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StringLength, "string-length")
- DECLARE_HYDROGEN_ACCESSOR(StringLength)
-};
-
-
-class LCheckFunction: public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LCheckFunction(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckFunction, "check-function")
- DECLARE_HYDROGEN_ACCESSOR(CheckFunction)
-};
-
-
-class LCheckInstanceType: public LTemplateInstruction<0, 1, 1> {
- public:
- LCheckInstanceType(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckInstanceType, "check-instance-type")
- DECLARE_HYDROGEN_ACCESSOR(CheckInstanceType)
-};
-
-
-class LCheckMaps: public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LCheckMaps(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckMaps, "check-maps")
- DECLARE_HYDROGEN_ACCESSOR(CheckMaps)
-};
-
-
-class LCheckPrototypeMaps: public LTemplateInstruction<1, 0, 1> {
- public:
- explicit LCheckPrototypeMaps(LOperand* temp) {
- temps_[0] = temp;
- }
-
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckPrototypeMaps, "check-prototype-maps")
- DECLARE_HYDROGEN_ACCESSOR(CheckPrototypeMaps)
-
- ZoneList<Handle<JSObject> >* prototypes() const {
- return hydrogen()->prototypes();
- }
- ZoneList<Handle<Map> >* maps() const { return hydrogen()->maps(); }
-};
-
-
-class LCheckSmi: public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LCheckSmi(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckSmi, "check-smi")
-};
-
-
-class LClampDToUint8: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LClampDToUint8(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* unclamped() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ClampDToUint8, "clamp-d-to-uint8")
-};
-
-
-class LClampIToUint8: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LClampIToUint8(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* unclamped() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ClampIToUint8, "clamp-i-to-uint8")
-};
-
-
-class LClampTToUint8: public LTemplateInstruction<1, 1, 1> {
- public:
- LClampTToUint8(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* unclamped() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ClampTToUint8, "clamp-t-to-uint8")
-};
-
-
-class LCheckNonSmi: public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LCheckNonSmi(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckNonSmi, "check-non-smi")
-};
-
-
-class LAllocateObject: public LTemplateInstruction<1, 1, 1> {
- public:
- LAllocateObject(LOperand* context, LOperand* temp) {
- inputs_[0] = context;
- temps_[0] = temp;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(AllocateObject, "allocate-object")
- DECLARE_HYDROGEN_ACCESSOR(AllocateObject)
-};
-
-
-class LAllocate: public LTemplateInstruction<1, 2, 1> {
- public:
- LAllocate(LOperand* context, LOperand* size, LOperand* temp) {
- inputs_[0] = context;
- inputs_[1] = size;
- temps_[0] = temp;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* size() { return inputs_[1]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Allocate, "allocate")
- DECLARE_HYDROGEN_ACCESSOR(Allocate)
-};
-
-
-class LFastLiteral: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LFastLiteral(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(FastLiteral, "fast-literal")
- DECLARE_HYDROGEN_ACCESSOR(FastLiteral)
-};
-
-
-class LArrayLiteral: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LArrayLiteral(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ArrayLiteral, "array-literal")
- DECLARE_HYDROGEN_ACCESSOR(ArrayLiteral)
-};
-
-
-class LObjectLiteral: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LObjectLiteral(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ObjectLiteral, "object-literal")
- DECLARE_HYDROGEN_ACCESSOR(ObjectLiteral)
-};
-
-
-class LRegExpLiteral: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LRegExpLiteral(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(RegExpLiteral, "regexp-literal")
- DECLARE_HYDROGEN_ACCESSOR(RegExpLiteral)
-};
-
-
-class LFunctionLiteral: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LFunctionLiteral(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(FunctionLiteral, "function-literal")
- DECLARE_HYDROGEN_ACCESSOR(FunctionLiteral)
-
- Handle<SharedFunctionInfo> shared_info() { return hydrogen()->shared_info(); }
-};
-
-
-class LToFastProperties: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LToFastProperties(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ToFastProperties, "to-fast-properties")
- DECLARE_HYDROGEN_ACCESSOR(ToFastProperties)
-};
-
-
-class LTypeof: public LTemplateInstruction<1, 2, 0> {
- public:
- LTypeof(LOperand* context, LOperand* value) {
- inputs_[0] = context;
- inputs_[1] = value;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Typeof, "typeof")
-};
-
-
-class LTypeofIsAndBranch: public LControlInstruction<1, 0> {
- public:
- explicit LTypeofIsAndBranch(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch, "typeof-is-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(TypeofIsAndBranch)
-
- Handle<String> type_literal() { return hydrogen()->type_literal(); }
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LDeleteProperty: public LTemplateInstruction<1, 3, 0> {
- public:
- LDeleteProperty(LOperand* context, LOperand* obj, LOperand* key) {
- inputs_[0] = context;
- inputs_[1] = obj;
- inputs_[2] = key;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* object() { return inputs_[1]; }
- LOperand* key() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(DeleteProperty, "delete-property")
-};
-
-
-class LOsrEntry: public LTemplateInstruction<0, 0, 0> {
- public:
- LOsrEntry();
-
- DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry")
-
- LOperand** SpilledRegisterArray() { return register_spills_; }
- LOperand** SpilledDoubleRegisterArray() { return double_register_spills_; }
-
- void MarkSpilledRegister(int allocation_index, LOperand* spill_operand);
- void MarkSpilledDoubleRegister(int allocation_index,
- LOperand* spill_operand);
-
- private:
- // Arrays of spill slot operands for registers with an assigned spill
- // slot, i.e., that must also be restored to the spill slot on OSR entry.
- // NULL if the register has no assigned spill slot. Indexed by allocation
- // index.
- LOperand* register_spills_[Register::kMaxNumAllocatableRegisters];
- LOperand* double_register_spills_[
- DoubleRegister::kMaxNumAllocatableRegisters];
-};
-
-
-class LStackCheck: public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LStackCheck(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StackCheck, "stack-check")
- DECLARE_HYDROGEN_ACCESSOR(StackCheck)
-
- Label* done_label() { return &done_label_; }
-
- private:
- Label done_label_;
-};
-
-
-class LIn: public LTemplateInstruction<1, 3, 0> {
- public:
- LIn(LOperand* context, LOperand* key, LOperand* object) {
- inputs_[0] = context;
- inputs_[1] = key;
- inputs_[2] = object;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
- LOperand* object() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(In, "in")
-};
-
-
-class LForInPrepareMap: public LTemplateInstruction<1, 2, 0> {
- public:
- LForInPrepareMap(LOperand* context, LOperand* object) {
- inputs_[0] = context;
- inputs_[1] = object;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* object() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ForInPrepareMap, "for-in-prepare-map")
-};
-
-
-class LForInCacheArray: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LForInCacheArray(LOperand* map) {
- inputs_[0] = map;
- }
-
- LOperand* map() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ForInCacheArray, "for-in-cache-array")
-
- int idx() {
- return HForInCacheArray::cast(this->hydrogen_value())->idx();
- }
-};
-
-
-class LCheckMapValue: public LTemplateInstruction<0, 2, 0> {
- public:
- LCheckMapValue(LOperand* value, LOperand* map) {
- inputs_[0] = value;
- inputs_[1] = map;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* map() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckMapValue, "check-map-value")
-};
-
-
-class LLoadFieldByIndex: public LTemplateInstruction<1, 2, 0> {
- public:
- LLoadFieldByIndex(LOperand* object, LOperand* index) {
- inputs_[0] = object;
- inputs_[1] = index;
- }
-
- LOperand* object() { return inputs_[0]; }
- LOperand* index() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadFieldByIndex, "load-field-by-index")
-};
-
-
-class LChunkBuilder;
-class LPlatformChunk: public LChunk {
- public:
- LPlatformChunk(CompilationInfo* info, HGraph* graph)
- : LChunk(info, graph),
- num_double_slots_(0) { }
-
- int GetNextSpillIndex(bool is_double);
- LOperand* GetNextSpillSlot(bool is_double);
-
- int num_double_slots() const { return num_double_slots_; }
-
- private:
- int num_double_slots_;
-};
-
-
-class LChunkBuilder BASE_EMBEDDED {
- public:
- LChunkBuilder(CompilationInfo* info, HGraph* graph, LAllocator* allocator)
- : chunk_(NULL),
- info_(info),
- graph_(graph),
- zone_(graph->zone()),
- status_(UNUSED),
- current_instruction_(NULL),
- current_block_(NULL),
- next_block_(NULL),
- argument_count_(0),
- allocator_(allocator),
- position_(RelocInfo::kNoPosition),
- instruction_pending_deoptimization_environment_(NULL),
- pending_deoptimization_ast_id_(BailoutId::None()) { }
-
- // Build the sequence for the graph.
- LPlatformChunk* Build();
-
- // Declare methods that deal with the individual node types.
-#define DECLARE_DO(type) LInstruction* Do##type(H##type* node);
- HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
-#undef DECLARE_DO
-
- static HValue* SimplifiedDividendForMathFloorOfDiv(HValue* val);
- static HValue* SimplifiedDivisorForMathFloorOfDiv(HValue* val);
-
- private:
- enum Status {
- UNUSED,
- BUILDING,
- DONE,
- ABORTED
- };
-
- LPlatformChunk* chunk() const { return chunk_; }
- CompilationInfo* info() const { return info_; }
- HGraph* graph() const { return graph_; }
- Zone* zone() const { return zone_; }
-
- bool is_unused() const { return status_ == UNUSED; }
- bool is_building() const { return status_ == BUILDING; }
- bool is_done() const { return status_ == DONE; }
- bool is_aborted() const { return status_ == ABORTED; }
-
- void Abort(const char* reason);
-
- // Methods for getting operands for Use / Define / Temp.
- LUnallocated* ToUnallocated(Register reg);
- LUnallocated* ToUnallocated(XMMRegister reg);
- LUnallocated* ToUnallocated(X87TopOfStackRegister reg);
-
- // Methods for setting up define-use relationships.
- MUST_USE_RESULT LOperand* Use(HValue* value, LUnallocated* operand);
- MUST_USE_RESULT LOperand* UseFixed(HValue* value, Register fixed_register);
- MUST_USE_RESULT LOperand* UseFixedDouble(HValue* value,
- XMMRegister fixed_register);
-
- // A value that is guaranteed to be allocated to a register.
- // Operand created by UseRegister is guaranteed to be live until the end of
- // instruction. This means that register allocator will not reuse it's
- // register for any other operand inside instruction.
- // Operand created by UseRegisterAtStart is guaranteed to be live only at
- // instruction start. Register allocator is free to assign the same register
- // to some other operand used inside instruction (i.e. temporary or
- // output).
- MUST_USE_RESULT LOperand* UseRegister(HValue* value);
- MUST_USE_RESULT LOperand* UseRegisterAtStart(HValue* value);
-
- // An input operand in a register that may be trashed.
- MUST_USE_RESULT LOperand* UseTempRegister(HValue* value);
-
- // An input operand in a register or stack slot.
- MUST_USE_RESULT LOperand* Use(HValue* value);
- MUST_USE_RESULT LOperand* UseAtStart(HValue* value);
-
- // An input operand in a register, stack slot or a constant operand.
- MUST_USE_RESULT LOperand* UseOrConstant(HValue* value);
- MUST_USE_RESULT LOperand* UseOrConstantAtStart(HValue* value);
-
- // An input operand in a register or a constant operand.
- MUST_USE_RESULT LOperand* UseRegisterOrConstant(HValue* value);
- MUST_USE_RESULT LOperand* UseRegisterOrConstantAtStart(HValue* value);
-
- // An input operand in register, stack slot or a constant operand.
- // Will not be moved to a register even if one is freely available.
- MUST_USE_RESULT LOperand* UseAny(HValue* value);
-
- // Temporary operand that must be in a register.
- MUST_USE_RESULT LUnallocated* TempRegister();
- MUST_USE_RESULT LOperand* FixedTemp(Register reg);
- MUST_USE_RESULT LOperand* FixedTemp(XMMRegister reg);
-
- // Methods for setting up define-use relationships.
- // Return the same instruction that they are passed.
- template<int I, int T>
- LInstruction* Define(LTemplateInstruction<1, I, T>* instr,
- LUnallocated* result);
- template<int I, int T>
- LInstruction* DefineAsRegister(LTemplateInstruction<1, I, T>* instr);
- template<int I, int T>
- LInstruction* DefineAsSpilled(LTemplateInstruction<1, I, T>* instr,
- int index);
- template<int I, int T>
- LInstruction* DefineSameAsFirst(LTemplateInstruction<1, I, T>* instr);
- template<int I, int T>
- LInstruction* DefineFixed(LTemplateInstruction<1, I, T>* instr,
- Register reg);
- template<int I, int T>
- LInstruction* DefineFixedDouble(LTemplateInstruction<1, I, T>* instr,
- XMMRegister reg);
- template<int I, int T>
- LInstruction* DefineX87TOS(LTemplateInstruction<1, I, T>* instr);
- // Assigns an environment to an instruction. An instruction which can
- // deoptimize must have an environment.
- LInstruction* AssignEnvironment(LInstruction* instr);
- // Assigns a pointer map to an instruction. An instruction which can
- // trigger a GC or a lazy deoptimization must have a pointer map.
- LInstruction* AssignPointerMap(LInstruction* instr);
-
- enum CanDeoptimize { CAN_DEOPTIMIZE_EAGERLY, CANNOT_DEOPTIMIZE_EAGERLY };
-
- // Marks a call for the register allocator. Assigns a pointer map to
- // support GC and lazy deoptimization. Assigns an environment to support
- // eager deoptimization if CAN_DEOPTIMIZE_EAGERLY.
- LInstruction* MarkAsCall(
- LInstruction* instr,
- HInstruction* hinstr,
- CanDeoptimize can_deoptimize = CANNOT_DEOPTIMIZE_EAGERLY);
-
- LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env,
- int* argument_index_accumulator);
-
- void VisitInstruction(HInstruction* current);
-
- void DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block);
- LInstruction* DoShift(Token::Value op, HBitwiseBinaryOperation* instr);
- LInstruction* DoArithmeticD(Token::Value op,
- HArithmeticBinaryOperation* instr);
- LInstruction* DoArithmeticT(Token::Value op,
- HArithmeticBinaryOperation* instr);
-
- LPlatformChunk* chunk_;
- CompilationInfo* info_;
- HGraph* const graph_;
- Zone* zone_;
- Status status_;
- HInstruction* current_instruction_;
- HBasicBlock* current_block_;
- HBasicBlock* next_block_;
- int argument_count_;
- LAllocator* allocator_;
- int position_;
- LInstruction* instruction_pending_deoptimization_environment_;
- BailoutId pending_deoptimization_ast_id_;
-
- DISALLOW_COPY_AND_ASSIGN(LChunkBuilder);
-};
-
-#undef DECLARE_HYDROGEN_ACCESSOR
-#undef DECLARE_CONCRETE_INSTRUCTION
-
-} } // namespace v8::internal
-
-#endif // V8_IA32_LITHIUM_IA32_H_
diff --git a/src/3rdparty/v8/src/ia32/macro-assembler-ia32.cc b/src/3rdparty/v8/src/ia32/macro-assembler-ia32.cc
deleted file mode 100644
index 587699f..0000000
--- a/src/3rdparty/v8/src/ia32/macro-assembler-ia32.cc
+++ /dev/null
@@ -1,3101 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_IA32)
-
-#include "bootstrapper.h"
-#include "codegen.h"
-#include "debug.h"
-#include "runtime.h"
-#include "serialize.h"
-
-namespace v8 {
-namespace internal {
-
-// -------------------------------------------------------------------------
-// MacroAssembler implementation.
-
-MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
- : Assembler(arg_isolate, buffer, size),
- generating_stub_(false),
- allow_stub_calls_(true),
- has_frame_(false) {
- if (isolate() != NULL) {
- code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
- isolate());
- }
-}
-
-
-void MacroAssembler::InNewSpace(
- Register object,
- Register scratch,
- Condition cc,
- Label* condition_met,
- Label::Distance condition_met_distance) {
- ASSERT(cc == equal || cc == not_equal);
- if (scratch.is(object)) {
- and_(scratch, Immediate(~Page::kPageAlignmentMask));
- } else {
- mov(scratch, Immediate(~Page::kPageAlignmentMask));
- and_(scratch, object);
- }
- // Check that we can use a test_b.
- ASSERT(MemoryChunk::IN_FROM_SPACE < 8);
- ASSERT(MemoryChunk::IN_TO_SPACE < 8);
- int mask = (1 << MemoryChunk::IN_FROM_SPACE)
- | (1 << MemoryChunk::IN_TO_SPACE);
- // If non-zero, the page belongs to new-space.
- test_b(Operand(scratch, MemoryChunk::kFlagsOffset),
- static_cast<uint8_t>(mask));
- j(cc, condition_met, condition_met_distance);
-}
-
-
-void MacroAssembler::RememberedSetHelper(
- Register object, // Only used for debug checks.
- Register addr,
- Register scratch,
- SaveFPRegsMode save_fp,
- MacroAssembler::RememberedSetFinalAction and_then) {
- Label done;
- if (emit_debug_code()) {
- Label ok;
- JumpIfNotInNewSpace(object, scratch, &ok, Label::kNear);
- int3();
- bind(&ok);
- }
- // Load store buffer top.
- ExternalReference store_buffer =
- ExternalReference::store_buffer_top(isolate());
- mov(scratch, Operand::StaticVariable(store_buffer));
- // Store pointer to buffer.
- mov(Operand(scratch, 0), addr);
- // Increment buffer top.
- add(scratch, Immediate(kPointerSize));
- // Write back new top of buffer.
- mov(Operand::StaticVariable(store_buffer), scratch);
- // Call stub on end of buffer.
- // Check for end of buffer.
- test(scratch, Immediate(StoreBuffer::kStoreBufferOverflowBit));
- if (and_then == kReturnAtEnd) {
- Label buffer_overflowed;
- j(not_equal, &buffer_overflowed, Label::kNear);
- ret(0);
- bind(&buffer_overflowed);
- } else {
- ASSERT(and_then == kFallThroughAtEnd);
- j(equal, &done, Label::kNear);
- }
- StoreBufferOverflowStub store_buffer_overflow =
- StoreBufferOverflowStub(save_fp);
- CallStub(&store_buffer_overflow);
- if (and_then == kReturnAtEnd) {
- ret(0);
- } else {
- ASSERT(and_then == kFallThroughAtEnd);
- bind(&done);
- }
-}
-
-
-void MacroAssembler::ClampDoubleToUint8(XMMRegister input_reg,
- XMMRegister scratch_reg,
- Register result_reg) {
- Label done;
- Label conv_failure;
- pxor(scratch_reg, scratch_reg);
- cvtsd2si(result_reg, input_reg);
- test(result_reg, Immediate(0xFFFFFF00));
- j(zero, &done, Label::kNear);
- cmp(result_reg, Immediate(0x80000000));
- j(equal, &conv_failure, Label::kNear);
- mov(result_reg, Immediate(0));
- setcc(above, result_reg);
- sub(result_reg, Immediate(1));
- and_(result_reg, Immediate(255));
- jmp(&done, Label::kNear);
- bind(&conv_failure);
- Set(result_reg, Immediate(0));
- ucomisd(input_reg, scratch_reg);
- j(below, &done, Label::kNear);
- Set(result_reg, Immediate(255));
- bind(&done);
-}
-
-
-void MacroAssembler::ClampUint8(Register reg) {
- Label done;
- test(reg, Immediate(0xFFFFFF00));
- j(zero, &done, Label::kNear);
- setcc(negative, reg); // 1 if negative, 0 if positive.
- dec_b(reg); // 0 if negative, 255 if positive.
- bind(&done);
-}
-
-
-static double kUint32Bias =
- static_cast<double>(static_cast<uint32_t>(0xFFFFFFFF)) + 1;
-
-
-void MacroAssembler::LoadUint32(XMMRegister dst,
- Register src,
- XMMRegister scratch) {
- Label done;
- cmp(src, Immediate(0));
- movdbl(scratch,
- Operand(reinterpret_cast<int32_t>(&kUint32Bias), RelocInfo::NONE32));
- cvtsi2sd(dst, src);
- j(not_sign, &done, Label::kNear);
- addsd(dst, scratch);
- bind(&done);
-}
-
-
-void MacroAssembler::RecordWriteArray(Register object,
- Register value,
- Register index,
- SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action,
- SmiCheck smi_check) {
- // First, check if a write barrier is even needed. The tests below
- // catch stores of Smis.
- Label done;
-
- // Skip barrier if writing a smi.
- if (smi_check == INLINE_SMI_CHECK) {
- ASSERT_EQ(0, kSmiTag);
- test(value, Immediate(kSmiTagMask));
- j(zero, &done);
- }
-
- // Array access: calculate the destination address in the same manner as
- // KeyedStoreIC::GenerateGeneric. Multiply a smi by 2 to get an offset
- // into an array of words.
- Register dst = index;
- lea(dst, Operand(object, index, times_half_pointer_size,
- FixedArray::kHeaderSize - kHeapObjectTag));
-
- RecordWrite(
- object, dst, value, save_fp, remembered_set_action, OMIT_SMI_CHECK);
-
- bind(&done);
-
- // Clobber clobbered input registers when running with the debug-code flag
- // turned on to provoke errors.
- if (emit_debug_code()) {
- mov(value, Immediate(BitCast<int32_t>(kZapValue)));
- mov(index, Immediate(BitCast<int32_t>(kZapValue)));
- }
-}
-
-
-void MacroAssembler::RecordWriteField(
- Register object,
- int offset,
- Register value,
- Register dst,
- SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action,
- SmiCheck smi_check) {
- // First, check if a write barrier is even needed. The tests below
- // catch stores of Smis.
- Label done;
-
- // Skip barrier if writing a smi.
- if (smi_check == INLINE_SMI_CHECK) {
- JumpIfSmi(value, &done, Label::kNear);
- }
-
- // Although the object register is tagged, the offset is relative to the start
- // of the object, so so offset must be a multiple of kPointerSize.
- ASSERT(IsAligned(offset, kPointerSize));
-
- lea(dst, FieldOperand(object, offset));
- if (emit_debug_code()) {
- Label ok;
- test_b(dst, (1 << kPointerSizeLog2) - 1);
- j(zero, &ok, Label::kNear);
- int3();
- bind(&ok);
- }
-
- RecordWrite(
- object, dst, value, save_fp, remembered_set_action, OMIT_SMI_CHECK);
-
- bind(&done);
-
- // Clobber clobbered input registers when running with the debug-code flag
- // turned on to provoke errors.
- if (emit_debug_code()) {
- mov(value, Immediate(BitCast<int32_t>(kZapValue)));
- mov(dst, Immediate(BitCast<int32_t>(kZapValue)));
- }
-}
-
-
-void MacroAssembler::RecordWriteForMap(
- Register object,
- Handle<Map> map,
- Register scratch1,
- Register scratch2,
- SaveFPRegsMode save_fp) {
- Label done;
-
- Register address = scratch1;
- Register value = scratch2;
- if (emit_debug_code()) {
- Label ok;
- lea(address, FieldOperand(object, HeapObject::kMapOffset));
- test_b(address, (1 << kPointerSizeLog2) - 1);
- j(zero, &ok, Label::kNear);
- int3();
- bind(&ok);
- }
-
- ASSERT(!object.is(value));
- ASSERT(!object.is(address));
- ASSERT(!value.is(address));
- AssertNotSmi(object);
-
- if (!FLAG_incremental_marking) {
- return;
- }
-
- // A single check of the map's pages interesting flag suffices, since it is
- // only set during incremental collection, and then it's also guaranteed that
- // the from object's page's interesting flag is also set. This optimization
- // relies on the fact that maps can never be in new space.
- ASSERT(!isolate()->heap()->InNewSpace(*map));
- CheckPageFlagForMap(map,
- MemoryChunk::kPointersToHereAreInterestingMask,
- zero,
- &done,
- Label::kNear);
-
- // Delay the initialization of |address| and |value| for the stub until it's
- // known that the will be needed. Up until this point their values are not
- // needed since they are embedded in the operands of instructions that need
- // them.
- lea(address, FieldOperand(object, HeapObject::kMapOffset));
- mov(value, Immediate(map));
- RecordWriteStub stub(object, value, address, OMIT_REMEMBERED_SET, save_fp);
- CallStub(&stub);
-
- bind(&done);
-
- // Clobber clobbered input registers when running with the debug-code flag
- // turned on to provoke errors.
- if (emit_debug_code()) {
- mov(value, Immediate(BitCast<int32_t>(kZapValue)));
- mov(scratch1, Immediate(BitCast<int32_t>(kZapValue)));
- mov(scratch2, Immediate(BitCast<int32_t>(kZapValue)));
- }
-}
-
-
-void MacroAssembler::RecordWrite(Register object,
- Register address,
- Register value,
- SaveFPRegsMode fp_mode,
- RememberedSetAction remembered_set_action,
- SmiCheck smi_check) {
- ASSERT(!object.is(value));
- ASSERT(!object.is(address));
- ASSERT(!value.is(address));
- AssertNotSmi(object);
-
- if (remembered_set_action == OMIT_REMEMBERED_SET &&
- !FLAG_incremental_marking) {
- return;
- }
-
- if (emit_debug_code()) {
- Label ok;
- cmp(value, Operand(address, 0));
- j(equal, &ok, Label::kNear);
- int3();
- bind(&ok);
- }
-
- // First, check if a write barrier is even needed. The tests below
- // catch stores of Smis and stores into young gen.
- Label done;
-
- if (smi_check == INLINE_SMI_CHECK) {
- // Skip barrier if writing a smi.
- JumpIfSmi(value, &done, Label::kNear);
- }
-
- CheckPageFlag(value,
- value, // Used as scratch.
- MemoryChunk::kPointersToHereAreInterestingMask,
- zero,
- &done,
- Label::kNear);
- CheckPageFlag(object,
- value, // Used as scratch.
- MemoryChunk::kPointersFromHereAreInterestingMask,
- zero,
- &done,
- Label::kNear);
-
- RecordWriteStub stub(object, value, address, remembered_set_action, fp_mode);
- CallStub(&stub);
-
- bind(&done);
-
- // Clobber clobbered registers when running with the debug-code flag
- // turned on to provoke errors.
- if (emit_debug_code()) {
- mov(address, Immediate(BitCast<int32_t>(kZapValue)));
- mov(value, Immediate(BitCast<int32_t>(kZapValue)));
- }
-}
-
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-void MacroAssembler::DebugBreak() {
- Set(eax, Immediate(0));
- mov(ebx, Immediate(ExternalReference(Runtime::kDebugBreak, isolate())));
- CEntryStub ces(1);
- call(ces.GetCode(isolate()), RelocInfo::DEBUG_BREAK);
-}
-#endif
-
-
-void MacroAssembler::Set(Register dst, const Immediate& x) {
- if (x.is_zero()) {
- xor_(dst, dst); // Shorter than mov.
- } else {
- mov(dst, x);
- }
-}
-
-
-void MacroAssembler::Set(const Operand& dst, const Immediate& x) {
- mov(dst, x);
-}
-
-
-bool MacroAssembler::IsUnsafeImmediate(const Immediate& x) {
- static const int kMaxImmediateBits = 17;
- if (!RelocInfo::IsNone(x.rmode_)) return false;
- return !is_intn(x.x_, kMaxImmediateBits);
-}
-
-
-void MacroAssembler::SafeSet(Register dst, const Immediate& x) {
- if (IsUnsafeImmediate(x) && jit_cookie() != 0) {
- Set(dst, Immediate(x.x_ ^ jit_cookie()));
- xor_(dst, jit_cookie());
- } else {
- Set(dst, x);
- }
-}
-
-
-void MacroAssembler::SafePush(const Immediate& x) {
- if (IsUnsafeImmediate(x) && jit_cookie() != 0) {
- push(Immediate(x.x_ ^ jit_cookie()));
- xor_(Operand(esp, 0), Immediate(jit_cookie()));
- } else {
- push(x);
- }
-}
-
-
-void MacroAssembler::CompareRoot(Register with, Heap::RootListIndex index) {
- // see ROOT_ACCESSOR macro in factory.h
- Handle<Object> value(&isolate()->heap()->roots_array_start()[index]);
- cmp(with, value);
-}
-
-
-void MacroAssembler::CompareRoot(const Operand& with,
- Heap::RootListIndex index) {
- // see ROOT_ACCESSOR macro in factory.h
- Handle<Object> value(&isolate()->heap()->roots_array_start()[index]);
- cmp(with, value);
-}
-
-
-void MacroAssembler::CmpObjectType(Register heap_object,
- InstanceType type,
- Register map) {
- mov(map, FieldOperand(heap_object, HeapObject::kMapOffset));
- CmpInstanceType(map, type);
-}
-
-
-void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
- cmpb(FieldOperand(map, Map::kInstanceTypeOffset),
- static_cast<int8_t>(type));
-}
-
-
-void MacroAssembler::CheckFastElements(Register map,
- Label* fail,
- Label::Distance distance) {
- STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
- STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
- STATIC_ASSERT(FAST_ELEMENTS == 2);
- STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
- cmpb(FieldOperand(map, Map::kBitField2Offset),
- Map::kMaximumBitField2FastHoleyElementValue);
- j(above, fail, distance);
-}
-
-
-void MacroAssembler::CheckFastObjectElements(Register map,
- Label* fail,
- Label::Distance distance) {
- STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
- STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
- STATIC_ASSERT(FAST_ELEMENTS == 2);
- STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
- cmpb(FieldOperand(map, Map::kBitField2Offset),
- Map::kMaximumBitField2FastHoleySmiElementValue);
- j(below_equal, fail, distance);
- cmpb(FieldOperand(map, Map::kBitField2Offset),
- Map::kMaximumBitField2FastHoleyElementValue);
- j(above, fail, distance);
-}
-
-
-void MacroAssembler::CheckFastSmiElements(Register map,
- Label* fail,
- Label::Distance distance) {
- STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
- STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
- cmpb(FieldOperand(map, Map::kBitField2Offset),
- Map::kMaximumBitField2FastHoleySmiElementValue);
- j(above, fail, distance);
-}
-
-
-void MacroAssembler::StoreNumberToDoubleElements(
- Register maybe_number,
- Register elements,
- Register key,
- Register scratch1,
- XMMRegister scratch2,
- Label* fail,
- bool specialize_for_processor,
- int elements_offset) {
- Label smi_value, done, maybe_nan, not_nan, is_nan, have_double_value;
- JumpIfSmi(maybe_number, &smi_value, Label::kNear);
-
- CheckMap(maybe_number,
- isolate()->factory()->heap_number_map(),
- fail,
- DONT_DO_SMI_CHECK);
-
- // Double value, canonicalize NaN.
- uint32_t offset = HeapNumber::kValueOffset + sizeof(kHoleNanLower32);
- cmp(FieldOperand(maybe_number, offset),
- Immediate(kNaNOrInfinityLowerBoundUpper32));
- j(greater_equal, &maybe_nan, Label::kNear);
-
- bind(&not_nan);
- ExternalReference canonical_nan_reference =
- ExternalReference::address_of_canonical_non_hole_nan();
- if (CpuFeatures::IsSupported(SSE2) && specialize_for_processor) {
- CpuFeatures::Scope use_sse2(SSE2);
- movdbl(scratch2, FieldOperand(maybe_number, HeapNumber::kValueOffset));
- bind(&have_double_value);
- movdbl(FieldOperand(elements, key, times_4,
- FixedDoubleArray::kHeaderSize - elements_offset),
- scratch2);
- } else {
- fld_d(FieldOperand(maybe_number, HeapNumber::kValueOffset));
- bind(&have_double_value);
- fstp_d(FieldOperand(elements, key, times_4,
- FixedDoubleArray::kHeaderSize - elements_offset));
- }
- jmp(&done);
-
- bind(&maybe_nan);
- // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
- // it's an Infinity, and the non-NaN code path applies.
- j(greater, &is_nan, Label::kNear);
- cmp(FieldOperand(maybe_number, HeapNumber::kValueOffset), Immediate(0));
- j(zero, &not_nan);
- bind(&is_nan);
- if (CpuFeatures::IsSupported(SSE2) && specialize_for_processor) {
- CpuFeatures::Scope use_sse2(SSE2);
- movdbl(scratch2, Operand::StaticVariable(canonical_nan_reference));
- } else {
- fld_d(Operand::StaticVariable(canonical_nan_reference));
- }
- jmp(&have_double_value, Label::kNear);
-
- bind(&smi_value);
- // Value is a smi. Convert to a double and store.
- // Preserve original value.
- mov(scratch1, maybe_number);
- SmiUntag(scratch1);
- if (CpuFeatures::IsSupported(SSE2) && specialize_for_processor) {
- CpuFeatures::Scope fscope(SSE2);
- cvtsi2sd(scratch2, scratch1);
- movdbl(FieldOperand(elements, key, times_4,
- FixedDoubleArray::kHeaderSize - elements_offset),
- scratch2);
- } else {
- push(scratch1);
- fild_s(Operand(esp, 0));
- pop(scratch1);
- fstp_d(FieldOperand(elements, key, times_4,
- FixedDoubleArray::kHeaderSize - elements_offset));
- }
- bind(&done);
-}
-
-
-void MacroAssembler::CompareMap(Register obj,
- Handle<Map> map,
- Label* early_success,
- CompareMapMode mode) {
- cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
- if (mode == ALLOW_ELEMENT_TRANSITION_MAPS) {
- ElementsKind kind = map->elements_kind();
- if (IsFastElementsKind(kind)) {
- bool packed = IsFastPackedElementsKind(kind);
- Map* current_map = *map;
- while (CanTransitionToMoreGeneralFastElementsKind(kind, packed)) {
- kind = GetNextMoreGeneralFastElementsKind(kind, packed);
- current_map = current_map->LookupElementsTransitionMap(kind);
- if (!current_map) break;
- j(equal, early_success, Label::kNear);
- cmp(FieldOperand(obj, HeapObject::kMapOffset),
- Handle<Map>(current_map));
- }
- }
- }
-}
-
-
-void MacroAssembler::CheckMap(Register obj,
- Handle<Map> map,
- Label* fail,
- SmiCheckType smi_check_type,
- CompareMapMode mode) {
- if (smi_check_type == DO_SMI_CHECK) {
- JumpIfSmi(obj, fail);
- }
-
- Label success;
- CompareMap(obj, map, &success, mode);
- j(not_equal, fail);
- bind(&success);
-}
-
-
-void MacroAssembler::DispatchMap(Register obj,
- Handle<Map> map,
- Handle<Code> success,
- SmiCheckType smi_check_type) {
- Label fail;
- if (smi_check_type == DO_SMI_CHECK) {
- JumpIfSmi(obj, &fail);
- }
- cmp(FieldOperand(obj, HeapObject::kMapOffset), Immediate(map));
- j(equal, success);
-
- bind(&fail);
-}
-
-
-Condition MacroAssembler::IsObjectStringType(Register heap_object,
- Register map,
- Register instance_type) {
- mov(map, FieldOperand(heap_object, HeapObject::kMapOffset));
- movzx_b(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
- STATIC_ASSERT(kNotStringTag != 0);
- test(instance_type, Immediate(kIsNotStringMask));
- return zero;
-}
-
-
-void MacroAssembler::IsObjectJSObjectType(Register heap_object,
- Register map,
- Register scratch,
- Label* fail) {
- mov(map, FieldOperand(heap_object, HeapObject::kMapOffset));
- IsInstanceJSObjectType(map, scratch, fail);
-}
-
-
-void MacroAssembler::IsInstanceJSObjectType(Register map,
- Register scratch,
- Label* fail) {
- movzx_b(scratch, FieldOperand(map, Map::kInstanceTypeOffset));
- sub(scratch, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
- cmp(scratch,
- LAST_NONCALLABLE_SPEC_OBJECT_TYPE - FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
- j(above, fail);
-}
-
-
-void MacroAssembler::FCmp() {
- if (CpuFeatures::IsSupported(CMOV)) {
- fucomip();
- fstp(0);
- } else {
- fucompp();
- push(eax);
- fnstsw_ax();
- sahf();
- pop(eax);
- }
-}
-
-
-void MacroAssembler::AssertNumber(Register object) {
- if (emit_debug_code()) {
- Label ok;
- JumpIfSmi(object, &ok);
- cmp(FieldOperand(object, HeapObject::kMapOffset),
- isolate()->factory()->heap_number_map());
- Check(equal, "Operand not a number");
- bind(&ok);
- }
-}
-
-
-void MacroAssembler::AssertSmi(Register object) {
- if (emit_debug_code()) {
- test(object, Immediate(kSmiTagMask));
- Check(equal, "Operand is not a smi");
- }
-}
-
-
-void MacroAssembler::AssertString(Register object) {
- if (emit_debug_code()) {
- test(object, Immediate(kSmiTagMask));
- Check(not_equal, "Operand is a smi and not a string");
- push(object);
- mov(object, FieldOperand(object, HeapObject::kMapOffset));
- CmpInstanceType(object, FIRST_NONSTRING_TYPE);
- pop(object);
- Check(below, "Operand is not a string");
- }
-}
-
-
-void MacroAssembler::AssertNotSmi(Register object) {
- if (emit_debug_code()) {
- test(object, Immediate(kSmiTagMask));
- Check(not_equal, "Operand is a smi");
- }
-}
-
-
-void MacroAssembler::EnterFrame(StackFrame::Type type) {
- push(ebp);
- mov(ebp, esp);
- push(esi);
- push(Immediate(Smi::FromInt(type)));
- push(Immediate(CodeObject()));
- if (emit_debug_code()) {
- cmp(Operand(esp, 0), Immediate(isolate()->factory()->undefined_value()));
- Check(not_equal, "code object not properly patched");
- }
-}
-
-
-void MacroAssembler::LeaveFrame(StackFrame::Type type) {
- if (emit_debug_code()) {
- cmp(Operand(ebp, StandardFrameConstants::kMarkerOffset),
- Immediate(Smi::FromInt(type)));
- Check(equal, "stack frame types must match");
- }
- leave();
-}
-
-
-void MacroAssembler::EnterExitFramePrologue() {
- // Set up the frame structure on the stack.
- ASSERT(ExitFrameConstants::kCallerSPDisplacement == +2 * kPointerSize);
- ASSERT(ExitFrameConstants::kCallerPCOffset == +1 * kPointerSize);
- ASSERT(ExitFrameConstants::kCallerFPOffset == 0 * kPointerSize);
- push(ebp);
- mov(ebp, esp);
-
- // Reserve room for entry stack pointer and push the code object.
- ASSERT(ExitFrameConstants::kSPOffset == -1 * kPointerSize);
- push(Immediate(0)); // Saved entry sp, patched before call.
- push(Immediate(CodeObject())); // Accessed from ExitFrame::code_slot.
-
- // Save the frame pointer and the context in top.
- ExternalReference c_entry_fp_address(Isolate::kCEntryFPAddress,
- isolate());
- ExternalReference context_address(Isolate::kContextAddress,
- isolate());
- mov(Operand::StaticVariable(c_entry_fp_address), ebp);
- mov(Operand::StaticVariable(context_address), esi);
-}
-
-
-void MacroAssembler::EnterExitFrameEpilogue(int argc, bool save_doubles) {
- // Optionally save all XMM registers.
- if (save_doubles) {
- CpuFeatures::Scope scope(SSE2);
- int space = XMMRegister::kNumRegisters * kDoubleSize + argc * kPointerSize;
- sub(esp, Immediate(space));
- const int offset = -2 * kPointerSize;
- for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
- XMMRegister reg = XMMRegister::from_code(i);
- movdbl(Operand(ebp, offset - ((i + 1) * kDoubleSize)), reg);
- }
- } else {
- sub(esp, Immediate(argc * kPointerSize));
- }
-
- // Get the required frame alignment for the OS.
- const int kFrameAlignment = OS::ActivationFrameAlignment();
- if (kFrameAlignment > 0) {
- ASSERT(IsPowerOf2(kFrameAlignment));
- and_(esp, -kFrameAlignment);
- }
-
- // Patch the saved entry sp.
- mov(Operand(ebp, ExitFrameConstants::kSPOffset), esp);
-}
-
-
-void MacroAssembler::EnterExitFrame(bool save_doubles) {
- EnterExitFramePrologue();
-
- // Set up argc and argv in callee-saved registers.
- int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
- mov(edi, eax);
- lea(esi, Operand(ebp, eax, times_4, offset));
-
- // Reserve space for argc, argv and isolate.
- EnterExitFrameEpilogue(3, save_doubles);
-}
-
-
-void MacroAssembler::EnterApiExitFrame(int argc) {
- EnterExitFramePrologue();
- EnterExitFrameEpilogue(argc, false);
-}
-
-
-void MacroAssembler::LeaveExitFrame(bool save_doubles) {
- // Optionally restore all XMM registers.
- if (save_doubles) {
- CpuFeatures::Scope scope(SSE2);
- const int offset = -2 * kPointerSize;
- for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
- XMMRegister reg = XMMRegister::from_code(i);
- movdbl(reg, Operand(ebp, offset - ((i + 1) * kDoubleSize)));
- }
- }
-
- // Get the return address from the stack and restore the frame pointer.
- mov(ecx, Operand(ebp, 1 * kPointerSize));
- mov(ebp, Operand(ebp, 0 * kPointerSize));
-
- // Pop the arguments and the receiver from the caller stack.
- lea(esp, Operand(esi, 1 * kPointerSize));
-
- // Push the return address to get ready to return.
- push(ecx);
-
- LeaveExitFrameEpilogue();
-}
-
-void MacroAssembler::LeaveExitFrameEpilogue() {
- // Restore current context from top and clear it in debug mode.
- ExternalReference context_address(Isolate::kContextAddress, isolate());
- mov(esi, Operand::StaticVariable(context_address));
-#ifdef DEBUG
- mov(Operand::StaticVariable(context_address), Immediate(0));
-#endif
-
- // Clear the top frame.
- ExternalReference c_entry_fp_address(Isolate::kCEntryFPAddress,
- isolate());
- mov(Operand::StaticVariable(c_entry_fp_address), Immediate(0));
-}
-
-
-void MacroAssembler::LeaveApiExitFrame() {
- mov(esp, ebp);
- pop(ebp);
-
- LeaveExitFrameEpilogue();
-}
-
-
-void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
- int handler_index) {
- // Adjust this code if not the case.
- STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
- STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
-
- // We will build up the handler from the bottom by pushing on the stack.
- // First push the frame pointer and context.
- if (kind == StackHandler::JS_ENTRY) {
- // The frame pointer does not point to a JS frame so we save NULL for
- // ebp. We expect the code throwing an exception to check ebp before
- // dereferencing it to restore the context.
- push(Immediate(0)); // NULL frame pointer.
- push(Immediate(Smi::FromInt(0))); // No context.
- } else {
- push(ebp);
- push(esi);
- }
- // Push the state and the code object.
- unsigned state =
- StackHandler::IndexField::encode(handler_index) |
- StackHandler::KindField::encode(kind);
- push(Immediate(state));
- Push(CodeObject());
-
- // Link the current handler as the next handler.
- ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
- push(Operand::StaticVariable(handler_address));
- // Set this new handler as the current one.
- mov(Operand::StaticVariable(handler_address), esp);
-}
-
-
-void MacroAssembler::PopTryHandler() {
- STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
- ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
- pop(Operand::StaticVariable(handler_address));
- add(esp, Immediate(StackHandlerConstants::kSize - kPointerSize));
-}
-
-
-void MacroAssembler::JumpToHandlerEntry() {
- // Compute the handler entry address and jump to it. The handler table is
- // a fixed array of (smi-tagged) code offsets.
- // eax = exception, edi = code object, edx = state.
- mov(ebx, FieldOperand(edi, Code::kHandlerTableOffset));
- shr(edx, StackHandler::kKindWidth);
- mov(edx, FieldOperand(ebx, edx, times_4, FixedArray::kHeaderSize));
- SmiUntag(edx);
- lea(edi, FieldOperand(edi, edx, times_1, Code::kHeaderSize));
- jmp(edi);
-}
-
-
-void MacroAssembler::Throw(Register value) {
- // Adjust this code if not the case.
- STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
- STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
-
- // The exception is expected in eax.
- if (!value.is(eax)) {
- mov(eax, value);
- }
- // Drop the stack pointer to the top of the top handler.
- ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
- mov(esp, Operand::StaticVariable(handler_address));
- // Restore the next handler.
- pop(Operand::StaticVariable(handler_address));
-
- // Remove the code object and state, compute the handler address in edi.
- pop(edi); // Code object.
- pop(edx); // Index and state.
-
- // Restore the context and frame pointer.
- pop(esi); // Context.
- pop(ebp); // Frame pointer.
-
- // If the handler is a JS frame, restore the context to the frame.
- // (kind == ENTRY) == (ebp == 0) == (esi == 0), so we could test either
- // ebp or esi.
- Label skip;
- test(esi, esi);
- j(zero, &skip, Label::kNear);
- mov(Operand(ebp, StandardFrameConstants::kContextOffset), esi);
- bind(&skip);
-
- JumpToHandlerEntry();
-}
-
-
-void MacroAssembler::ThrowUncatchable(Register value) {
- // Adjust this code if not the case.
- STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
- STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
-
- // The exception is expected in eax.
- if (!value.is(eax)) {
- mov(eax, value);
- }
- // Drop the stack pointer to the top of the top stack handler.
- ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
- mov(esp, Operand::StaticVariable(handler_address));
-
- // Unwind the handlers until the top ENTRY handler is found.
- Label fetch_next, check_kind;
- jmp(&check_kind, Label::kNear);
- bind(&fetch_next);
- mov(esp, Operand(esp, StackHandlerConstants::kNextOffset));
-
- bind(&check_kind);
- STATIC_ASSERT(StackHandler::JS_ENTRY == 0);
- test(Operand(esp, StackHandlerConstants::kStateOffset),
- Immediate(StackHandler::KindField::kMask));
- j(not_zero, &fetch_next);
-
- // Set the top handler address to next handler past the top ENTRY handler.
- pop(Operand::StaticVariable(handler_address));
-
- // Remove the code object and state, compute the handler address in edi.
- pop(edi); // Code object.
- pop(edx); // Index and state.
-
- // Clear the context pointer and frame pointer (0 was saved in the handler).
- pop(esi);
- pop(ebp);
-
- JumpToHandlerEntry();
-}
-
-
-void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
- Register scratch,
- Label* miss) {
- Label same_contexts;
-
- ASSERT(!holder_reg.is(scratch));
-
- // Load current lexical context from the stack frame.
- mov(scratch, Operand(ebp, StandardFrameConstants::kContextOffset));
-
- // When generating debug code, make sure the lexical context is set.
- if (emit_debug_code()) {
- cmp(scratch, Immediate(0));
- Check(not_equal, "we should not have an empty lexical context");
- }
- // Load the native context of the current context.
- int offset =
- Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
- mov(scratch, FieldOperand(scratch, offset));
- mov(scratch, FieldOperand(scratch, GlobalObject::kNativeContextOffset));
-
- // Check the context is a native context.
- if (emit_debug_code()) {
- push(scratch);
- // Read the first word and compare to native_context_map.
- mov(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
- cmp(scratch, isolate()->factory()->native_context_map());
- Check(equal, "JSGlobalObject::native_context should be a native context.");
- pop(scratch);
- }
-
- // Check if both contexts are the same.
- cmp(scratch, FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
- j(equal, &same_contexts);
-
- // Compare security tokens, save holder_reg on the stack so we can use it
- // as a temporary register.
- //
- // TODO(119): avoid push(holder_reg)/pop(holder_reg)
- push(holder_reg);
- // Check that the security token in the calling global object is
- // compatible with the security token in the receiving global
- // object.
- mov(holder_reg,
- FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
-
- // Check the context is a native context.
- if (emit_debug_code()) {
- cmp(holder_reg, isolate()->factory()->null_value());
- Check(not_equal, "JSGlobalProxy::context() should not be null.");
-
- push(holder_reg);
- // Read the first word and compare to native_context_map(),
- mov(holder_reg, FieldOperand(holder_reg, HeapObject::kMapOffset));
- cmp(holder_reg, isolate()->factory()->native_context_map());
- Check(equal, "JSGlobalObject::native_context should be a native context.");
- pop(holder_reg);
- }
-
- int token_offset = Context::kHeaderSize +
- Context::SECURITY_TOKEN_INDEX * kPointerSize;
- mov(scratch, FieldOperand(scratch, token_offset));
- cmp(scratch, FieldOperand(holder_reg, token_offset));
- pop(holder_reg);
- j(not_equal, miss);
-
- bind(&same_contexts);
-}
-
-
-// Compute the hash code from the untagged key. This must be kept in sync
-// with ComputeIntegerHash in utils.h.
-//
-// Note: r0 will contain hash code
-void MacroAssembler::GetNumberHash(Register r0, Register scratch) {
- // Xor original key with a seed.
- if (Serializer::enabled()) {
- ExternalReference roots_array_start =
- ExternalReference::roots_array_start(isolate());
- mov(scratch, Immediate(Heap::kHashSeedRootIndex));
- mov(scratch,
- Operand::StaticArray(scratch, times_pointer_size, roots_array_start));
- SmiUntag(scratch);
- xor_(r0, scratch);
- } else {
- int32_t seed = isolate()->heap()->HashSeed();
- xor_(r0, Immediate(seed));
- }
-
- // hash = ~hash + (hash << 15);
- mov(scratch, r0);
- not_(r0);
- shl(scratch, 15);
- add(r0, scratch);
- // hash = hash ^ (hash >> 12);
- mov(scratch, r0);
- shr(scratch, 12);
- xor_(r0, scratch);
- // hash = hash + (hash << 2);
- lea(r0, Operand(r0, r0, times_4, 0));
- // hash = hash ^ (hash >> 4);
- mov(scratch, r0);
- shr(scratch, 4);
- xor_(r0, scratch);
- // hash = hash * 2057;
- imul(r0, r0, 2057);
- // hash = hash ^ (hash >> 16);
- mov(scratch, r0);
- shr(scratch, 16);
- xor_(r0, scratch);
-}
-
-
-
-void MacroAssembler::LoadFromNumberDictionary(Label* miss,
- Register elements,
- Register key,
- Register r0,
- Register r1,
- Register r2,
- Register result) {
- // Register use:
- //
- // elements - holds the slow-case elements of the receiver and is unchanged.
- //
- // key - holds the smi key on entry and is unchanged.
- //
- // Scratch registers:
- //
- // r0 - holds the untagged key on entry and holds the hash once computed.
- //
- // r1 - used to hold the capacity mask of the dictionary
- //
- // r2 - used for the index into the dictionary.
- //
- // result - holds the result on exit if the load succeeds and we fall through.
-
- Label done;
-
- GetNumberHash(r0, r1);
-
- // Compute capacity mask.
- mov(r1, FieldOperand(elements, SeededNumberDictionary::kCapacityOffset));
- shr(r1, kSmiTagSize); // convert smi to int
- dec(r1);
-
- // Generate an unrolled loop that performs a few probes before giving up.
- const int kProbes = 4;
- for (int i = 0; i < kProbes; i++) {
- // Use r2 for index calculations and keep the hash intact in r0.
- mov(r2, r0);
- // Compute the masked index: (hash + i + i * i) & mask.
- if (i > 0) {
- add(r2, Immediate(SeededNumberDictionary::GetProbeOffset(i)));
- }
- and_(r2, r1);
-
- // Scale the index by multiplying by the entry size.
- ASSERT(SeededNumberDictionary::kEntrySize == 3);
- lea(r2, Operand(r2, r2, times_2, 0)); // r2 = r2 * 3
-
- // Check if the key matches.
- cmp(key, FieldOperand(elements,
- r2,
- times_pointer_size,
- SeededNumberDictionary::kElementsStartOffset));
- if (i != (kProbes - 1)) {
- j(equal, &done);
- } else {
- j(not_equal, miss);
- }
- }
-
- bind(&done);
- // Check that the value is a normal propety.
- const int kDetailsOffset =
- SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
- ASSERT_EQ(NORMAL, 0);
- test(FieldOperand(elements, r2, times_pointer_size, kDetailsOffset),
- Immediate(PropertyDetails::TypeField::kMask << kSmiTagSize));
- j(not_zero, miss);
-
- // Get the value at the masked, scaled index.
- const int kValueOffset =
- SeededNumberDictionary::kElementsStartOffset + kPointerSize;
- mov(result, FieldOperand(elements, r2, times_pointer_size, kValueOffset));
-}
-
-
-void MacroAssembler::LoadAllocationTopHelper(Register result,
- Register scratch,
- AllocationFlags flags) {
- ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address(isolate());
-
- // Just return if allocation top is already known.
- if ((flags & RESULT_CONTAINS_TOP) != 0) {
- // No use of scratch if allocation top is provided.
- ASSERT(scratch.is(no_reg));
-#ifdef DEBUG
- // Assert that result actually contains top on entry.
- cmp(result, Operand::StaticVariable(new_space_allocation_top));
- Check(equal, "Unexpected allocation top");
-#endif
- return;
- }
-
- // Move address of new object to result. Use scratch register if available.
- if (scratch.is(no_reg)) {
- mov(result, Operand::StaticVariable(new_space_allocation_top));
- } else {
- mov(scratch, Immediate(new_space_allocation_top));
- mov(result, Operand(scratch, 0));
- }
-}
-
-
-void MacroAssembler::UpdateAllocationTopHelper(Register result_end,
- Register scratch) {
- if (emit_debug_code()) {
- test(result_end, Immediate(kObjectAlignmentMask));
- Check(zero, "Unaligned allocation in new space");
- }
-
- ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address(isolate());
-
- // Update new top. Use scratch if available.
- if (scratch.is(no_reg)) {
- mov(Operand::StaticVariable(new_space_allocation_top), result_end);
- } else {
- mov(Operand(scratch, 0), result_end);
- }
-}
-
-
-void MacroAssembler::AllocateInNewSpace(int object_size,
- Register result,
- Register result_end,
- Register scratch,
- Label* gc_required,
- AllocationFlags flags) {
- ASSERT((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
- if (!FLAG_inline_new) {
- if (emit_debug_code()) {
- // Trash the registers to simulate an allocation failure.
- mov(result, Immediate(0x7091));
- if (result_end.is_valid()) {
- mov(result_end, Immediate(0x7191));
- }
- if (scratch.is_valid()) {
- mov(scratch, Immediate(0x7291));
- }
- }
- jmp(gc_required);
- return;
- }
- ASSERT(!result.is(result_end));
-
- // Load address of new object into result.
- LoadAllocationTopHelper(result, scratch, flags);
-
- // Align the next allocation. Storing the filler map without checking top is
- // always safe because the limit of the heap is always aligned.
- if ((flags & DOUBLE_ALIGNMENT) != 0) {
- ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
- Label aligned;
- test(result, Immediate(kDoubleAlignmentMask));
- j(zero, &aligned, Label::kNear);
- mov(Operand(result, 0),
- Immediate(isolate()->factory()->one_pointer_filler_map()));
- add(result, Immediate(kDoubleSize / 2));
- bind(&aligned);
- }
-
- Register top_reg = result_end.is_valid() ? result_end : result;
-
- // Calculate new top and bail out if new space is exhausted.
- ExternalReference new_space_allocation_limit =
- ExternalReference::new_space_allocation_limit_address(isolate());
-
- if (!top_reg.is(result)) {
- mov(top_reg, result);
- }
- add(top_reg, Immediate(object_size));
- j(carry, gc_required);
- cmp(top_reg, Operand::StaticVariable(new_space_allocation_limit));
- j(above, gc_required);
-
- // Update allocation top.
- UpdateAllocationTopHelper(top_reg, scratch);
-
- // Tag result if requested.
- bool tag_result = (flags & TAG_OBJECT) != 0;
- if (top_reg.is(result)) {
- if (tag_result) {
- sub(result, Immediate(object_size - kHeapObjectTag));
- } else {
- sub(result, Immediate(object_size));
- }
- } else if (tag_result) {
- ASSERT(kHeapObjectTag == 1);
- inc(result);
- }
-}
-
-
-void MacroAssembler::AllocateInNewSpace(
- int header_size,
- ScaleFactor element_size,
- Register element_count,
- RegisterValueType element_count_type,
- Register result,
- Register result_end,
- Register scratch,
- Label* gc_required,
- AllocationFlags flags) {
- ASSERT((flags & SIZE_IN_WORDS) == 0);
- if (!FLAG_inline_new) {
- if (emit_debug_code()) {
- // Trash the registers to simulate an allocation failure.
- mov(result, Immediate(0x7091));
- mov(result_end, Immediate(0x7191));
- if (scratch.is_valid()) {
- mov(scratch, Immediate(0x7291));
- }
- // Register element_count is not modified by the function.
- }
- jmp(gc_required);
- return;
- }
- ASSERT(!result.is(result_end));
-
- // Load address of new object into result.
- LoadAllocationTopHelper(result, scratch, flags);
-
- // Align the next allocation. Storing the filler map without checking top is
- // always safe because the limit of the heap is always aligned.
- if ((flags & DOUBLE_ALIGNMENT) != 0) {
- ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
- Label aligned;
- test(result, Immediate(kDoubleAlignmentMask));
- j(zero, &aligned, Label::kNear);
- mov(Operand(result, 0),
- Immediate(isolate()->factory()->one_pointer_filler_map()));
- add(result, Immediate(kDoubleSize / 2));
- bind(&aligned);
- }
-
- // Calculate new top and bail out if new space is exhausted.
- ExternalReference new_space_allocation_limit =
- ExternalReference::new_space_allocation_limit_address(isolate());
-
- // We assume that element_count*element_size + header_size does not
- // overflow.
- if (element_count_type == REGISTER_VALUE_IS_SMI) {
- STATIC_ASSERT(static_cast<ScaleFactor>(times_2 - 1) == times_1);
- STATIC_ASSERT(static_cast<ScaleFactor>(times_4 - 1) == times_2);
- STATIC_ASSERT(static_cast<ScaleFactor>(times_8 - 1) == times_4);
- ASSERT(element_size >= times_2);
- ASSERT(kSmiTagSize == 1);
- element_size = static_cast<ScaleFactor>(element_size - 1);
- } else {
- ASSERT(element_count_type == REGISTER_VALUE_IS_INT32);
- }
- lea(result_end, Operand(element_count, element_size, header_size));
- add(result_end, result);
- j(carry, gc_required);
- cmp(result_end, Operand::StaticVariable(new_space_allocation_limit));
- j(above, gc_required);
-
- if ((flags & TAG_OBJECT) != 0) {
- ASSERT(kHeapObjectTag == 1);
- inc(result);
- }
-
- // Update allocation top.
- UpdateAllocationTopHelper(result_end, scratch);
-}
-
-
-void MacroAssembler::AllocateInNewSpace(Register object_size,
- Register result,
- Register result_end,
- Register scratch,
- Label* gc_required,
- AllocationFlags flags) {
- ASSERT((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
- if (!FLAG_inline_new) {
- if (emit_debug_code()) {
- // Trash the registers to simulate an allocation failure.
- mov(result, Immediate(0x7091));
- mov(result_end, Immediate(0x7191));
- if (scratch.is_valid()) {
- mov(scratch, Immediate(0x7291));
- }
- // object_size is left unchanged by this function.
- }
- jmp(gc_required);
- return;
- }
- ASSERT(!result.is(result_end));
-
- // Load address of new object into result.
- LoadAllocationTopHelper(result, scratch, flags);
-
- // Align the next allocation. Storing the filler map without checking top is
- // always safe because the limit of the heap is always aligned.
- if ((flags & DOUBLE_ALIGNMENT) != 0) {
- ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
- Label aligned;
- test(result, Immediate(kDoubleAlignmentMask));
- j(zero, &aligned, Label::kNear);
- mov(Operand(result, 0),
- Immediate(isolate()->factory()->one_pointer_filler_map()));
- add(result, Immediate(kDoubleSize / 2));
- bind(&aligned);
- }
-
- // Calculate new top and bail out if new space is exhausted.
- ExternalReference new_space_allocation_limit =
- ExternalReference::new_space_allocation_limit_address(isolate());
- if (!object_size.is(result_end)) {
- mov(result_end, object_size);
- }
- add(result_end, result);
- j(carry, gc_required);
- cmp(result_end, Operand::StaticVariable(new_space_allocation_limit));
- j(above, gc_required);
-
- // Tag result if requested.
- if ((flags & TAG_OBJECT) != 0) {
- ASSERT(kHeapObjectTag == 1);
- inc(result);
- }
-
- // Update allocation top.
- UpdateAllocationTopHelper(result_end, scratch);
-}
-
-
-void MacroAssembler::UndoAllocationInNewSpace(Register object) {
- ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address(isolate());
-
- // Make sure the object has no tag before resetting top.
- and_(object, Immediate(~kHeapObjectTagMask));
-#ifdef DEBUG
- cmp(object, Operand::StaticVariable(new_space_allocation_top));
- Check(below, "Undo allocation of non allocated memory");
-#endif
- mov(Operand::StaticVariable(new_space_allocation_top), object);
-}
-
-
-void MacroAssembler::AllocateHeapNumber(Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
- // Allocate heap number in new space.
- AllocateInNewSpace(HeapNumber::kSize,
- result,
- scratch1,
- scratch2,
- gc_required,
- TAG_OBJECT);
-
- // Set the map.
- mov(FieldOperand(result, HeapObject::kMapOffset),
- Immediate(isolate()->factory()->heap_number_map()));
-}
-
-
-void MacroAssembler::AllocateTwoByteString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* gc_required) {
- // Calculate the number of bytes needed for the characters in the string while
- // observing object alignment.
- ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
- ASSERT(kShortSize == 2);
- // scratch1 = length * 2 + kObjectAlignmentMask.
- lea(scratch1, Operand(length, length, times_1, kObjectAlignmentMask));
- and_(scratch1, Immediate(~kObjectAlignmentMask));
-
- // Allocate two byte string in new space.
- AllocateInNewSpace(SeqTwoByteString::kHeaderSize,
- times_1,
- scratch1,
- REGISTER_VALUE_IS_INT32,
- result,
- scratch2,
- scratch3,
- gc_required,
- TAG_OBJECT);
-
- // Set the map, length and hash field.
- mov(FieldOperand(result, HeapObject::kMapOffset),
- Immediate(isolate()->factory()->string_map()));
- mov(scratch1, length);
- SmiTag(scratch1);
- mov(FieldOperand(result, String::kLengthOffset), scratch1);
- mov(FieldOperand(result, String::kHashFieldOffset),
- Immediate(String::kEmptyHashField));
-}
-
-
-void MacroAssembler::AllocateAsciiString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* gc_required) {
- // Calculate the number of bytes needed for the characters in the string while
- // observing object alignment.
- ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
- mov(scratch1, length);
- ASSERT(kCharSize == 1);
- add(scratch1, Immediate(kObjectAlignmentMask));
- and_(scratch1, Immediate(~kObjectAlignmentMask));
-
- // Allocate ASCII string in new space.
- AllocateInNewSpace(SeqOneByteString::kHeaderSize,
- times_1,
- scratch1,
- REGISTER_VALUE_IS_INT32,
- result,
- scratch2,
- scratch3,
- gc_required,
- TAG_OBJECT);
-
- // Set the map, length and hash field.
- mov(FieldOperand(result, HeapObject::kMapOffset),
- Immediate(isolate()->factory()->ascii_string_map()));
- mov(scratch1, length);
- SmiTag(scratch1);
- mov(FieldOperand(result, String::kLengthOffset), scratch1);
- mov(FieldOperand(result, String::kHashFieldOffset),
- Immediate(String::kEmptyHashField));
-}
-
-
-void MacroAssembler::AllocateAsciiString(Register result,
- int length,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
- ASSERT(length > 0);
-
- // Allocate ASCII string in new space.
- AllocateInNewSpace(SeqOneByteString::SizeFor(length),
- result,
- scratch1,
- scratch2,
- gc_required,
- TAG_OBJECT);
-
- // Set the map, length and hash field.
- mov(FieldOperand(result, HeapObject::kMapOffset),
- Immediate(isolate()->factory()->ascii_string_map()));
- mov(FieldOperand(result, String::kLengthOffset),
- Immediate(Smi::FromInt(length)));
- mov(FieldOperand(result, String::kHashFieldOffset),
- Immediate(String::kEmptyHashField));
-}
-
-
-void MacroAssembler::AllocateTwoByteConsString(Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
- // Allocate heap number in new space.
- AllocateInNewSpace(ConsString::kSize,
- result,
- scratch1,
- scratch2,
- gc_required,
- TAG_OBJECT);
-
- // Set the map. The other fields are left uninitialized.
- mov(FieldOperand(result, HeapObject::kMapOffset),
- Immediate(isolate()->factory()->cons_string_map()));
-}
-
-
-void MacroAssembler::AllocateAsciiConsString(Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
- // Allocate heap number in new space.
- AllocateInNewSpace(ConsString::kSize,
- result,
- scratch1,
- scratch2,
- gc_required,
- TAG_OBJECT);
-
- // Set the map. The other fields are left uninitialized.
- mov(FieldOperand(result, HeapObject::kMapOffset),
- Immediate(isolate()->factory()->cons_ascii_string_map()));
-}
-
-
-void MacroAssembler::AllocateTwoByteSlicedString(Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
- // Allocate heap number in new space.
- AllocateInNewSpace(SlicedString::kSize,
- result,
- scratch1,
- scratch2,
- gc_required,
- TAG_OBJECT);
-
- // Set the map. The other fields are left uninitialized.
- mov(FieldOperand(result, HeapObject::kMapOffset),
- Immediate(isolate()->factory()->sliced_string_map()));
-}
-
-
-void MacroAssembler::AllocateAsciiSlicedString(Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
- // Allocate heap number in new space.
- AllocateInNewSpace(SlicedString::kSize,
- result,
- scratch1,
- scratch2,
- gc_required,
- TAG_OBJECT);
-
- // Set the map. The other fields are left uninitialized.
- mov(FieldOperand(result, HeapObject::kMapOffset),
- Immediate(isolate()->factory()->sliced_ascii_string_map()));
-}
-
-
-// Copy memory, byte-by-byte, from source to destination. Not optimized for
-// long or aligned copies. The contents of scratch and length are destroyed.
-// Source and destination are incremented by length.
-// Many variants of movsb, loop unrolling, word moves, and indexed operands
-// have been tried here already, and this is fastest.
-// A simpler loop is faster on small copies, but 30% slower on large ones.
-// The cld() instruction must have been emitted, to set the direction flag(),
-// before calling this function.
-void MacroAssembler::CopyBytes(Register source,
- Register destination,
- Register length,
- Register scratch) {
- Label loop, done, short_string, short_loop;
- // Experimentation shows that the short string loop is faster if length < 10.
- cmp(length, Immediate(10));
- j(less_equal, &short_string);
-
- ASSERT(source.is(esi));
- ASSERT(destination.is(edi));
- ASSERT(length.is(ecx));
-
- // Because source is 4-byte aligned in our uses of this function,
- // we keep source aligned for the rep_movs call by copying the odd bytes
- // at the end of the ranges.
- mov(scratch, Operand(source, length, times_1, -4));
- mov(Operand(destination, length, times_1, -4), scratch);
- mov(scratch, ecx);
- shr(ecx, 2);
- rep_movs();
- and_(scratch, Immediate(0x3));
- add(destination, scratch);
- jmp(&done);
-
- bind(&short_string);
- test(length, length);
- j(zero, &done);
-
- bind(&short_loop);
- mov_b(scratch, Operand(source, 0));
- mov_b(Operand(destination, 0), scratch);
- inc(source);
- inc(destination);
- dec(length);
- j(not_zero, &short_loop);
-
- bind(&done);
-}
-
-
-void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
- Register end_offset,
- Register filler) {
- Label loop, entry;
- jmp(&entry);
- bind(&loop);
- mov(Operand(start_offset, 0), filler);
- add(start_offset, Immediate(kPointerSize));
- bind(&entry);
- cmp(start_offset, end_offset);
- j(less, &loop);
-}
-
-
-void MacroAssembler::BooleanBitTest(Register object,
- int field_offset,
- int bit_index) {
- bit_index += kSmiTagSize + kSmiShiftSize;
- ASSERT(IsPowerOf2(kBitsPerByte));
- int byte_index = bit_index / kBitsPerByte;
- int byte_bit_index = bit_index & (kBitsPerByte - 1);
- test_b(FieldOperand(object, field_offset + byte_index),
- static_cast<byte>(1 << byte_bit_index));
-}
-
-
-
-void MacroAssembler::NegativeZeroTest(Register result,
- Register op,
- Label* then_label) {
- Label ok;
- test(result, result);
- j(not_zero, &ok);
- test(op, op);
- j(sign, then_label);
- bind(&ok);
-}
-
-
-void MacroAssembler::NegativeZeroTest(Register result,
- Register op1,
- Register op2,
- Register scratch,
- Label* then_label) {
- Label ok;
- test(result, result);
- j(not_zero, &ok);
- mov(scratch, op1);
- or_(scratch, op2);
- j(sign, then_label);
- bind(&ok);
-}
-
-
-void MacroAssembler::TryGetFunctionPrototype(Register function,
- Register result,
- Register scratch,
- Label* miss,
- bool miss_on_bound_function) {
- // Check that the receiver isn't a smi.
- JumpIfSmi(function, miss);
-
- // Check that the function really is a function.
- CmpObjectType(function, JS_FUNCTION_TYPE, result);
- j(not_equal, miss);
-
- if (miss_on_bound_function) {
- // If a bound function, go to miss label.
- mov(scratch,
- FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
- BooleanBitTest(scratch, SharedFunctionInfo::kCompilerHintsOffset,
- SharedFunctionInfo::kBoundFunction);
- j(not_zero, miss);
- }
-
- // Make sure that the function has an instance prototype.
- Label non_instance;
- movzx_b(scratch, FieldOperand(result, Map::kBitFieldOffset));
- test(scratch, Immediate(1 << Map::kHasNonInstancePrototype));
- j(not_zero, &non_instance);
-
- // Get the prototype or initial map from the function.
- mov(result,
- FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
-
- // If the prototype or initial map is the hole, don't return it and
- // simply miss the cache instead. This will allow us to allocate a
- // prototype object on-demand in the runtime system.
- cmp(result, Immediate(isolate()->factory()->the_hole_value()));
- j(equal, miss);
-
- // If the function does not have an initial map, we're done.
- Label done;
- CmpObjectType(result, MAP_TYPE, scratch);
- j(not_equal, &done);
-
- // Get the prototype from the initial map.
- mov(result, FieldOperand(result, Map::kPrototypeOffset));
- jmp(&done);
-
- // Non-instance prototype: Fetch prototype from constructor field
- // in initial map.
- bind(&non_instance);
- mov(result, FieldOperand(result, Map::kConstructorOffset));
-
- // All done.
- bind(&done);
-}
-
-
-void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) {
- ASSERT(AllowThisStubCall(stub)); // Calls are not allowed in some stubs.
- call(stub->GetCode(isolate()), RelocInfo::CODE_TARGET, ast_id);
-}
-
-
-void MacroAssembler::TailCallStub(CodeStub* stub) {
- ASSERT(allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe());
- jmp(stub->GetCode(isolate()), RelocInfo::CODE_TARGET);
-}
-
-
-void MacroAssembler::StubReturn(int argc) {
- ASSERT(argc >= 1 && generating_stub());
- ret((argc - 1) * kPointerSize);
-}
-
-
-bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
- if (!has_frame_ && stub->SometimesSetsUpAFrame()) return false;
- return allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe();
-}
-
-
-void MacroAssembler::IllegalOperation(int num_arguments) {
- if (num_arguments > 0) {
- add(esp, Immediate(num_arguments * kPointerSize));
- }
- mov(eax, Immediate(isolate()->factory()->undefined_value()));
-}
-
-
-void MacroAssembler::IndexFromHash(Register hash, Register index) {
- // The assert checks that the constants for the maximum number of digits
- // for an array index cached in the hash field and the number of bits
- // reserved for it does not conflict.
- ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
- (1 << String::kArrayIndexValueBits));
- // We want the smi-tagged index in key. kArrayIndexValueMask has zeros in
- // the low kHashShift bits.
- and_(hash, String::kArrayIndexValueMask);
- STATIC_ASSERT(String::kHashShift >= kSmiTagSize && kSmiTag == 0);
- if (String::kHashShift > kSmiTagSize) {
- shr(hash, String::kHashShift - kSmiTagSize);
- }
- if (!index.is(hash)) {
- mov(index, hash);
- }
-}
-
-
-void MacroAssembler::CallRuntime(Runtime::FunctionId id, int num_arguments) {
- CallRuntime(Runtime::FunctionForId(id), num_arguments);
-}
-
-
-void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
- const Runtime::Function* function = Runtime::FunctionForId(id);
- Set(eax, Immediate(function->nargs));
- mov(ebx, Immediate(ExternalReference(function, isolate())));
- CEntryStub ces(1, CpuFeatures::IsSupported(SSE2) ? kSaveFPRegs
- : kDontSaveFPRegs);
- CallStub(&ces);
-}
-
-
-void MacroAssembler::CallRuntime(const Runtime::Function* f,
- int num_arguments) {
- // If the expected number of arguments of the runtime function is
- // constant, we check that the actual number of arguments match the
- // expectation.
- if (f->nargs >= 0 && f->nargs != num_arguments) {
- IllegalOperation(num_arguments);
- return;
- }
-
- // TODO(1236192): Most runtime routines don't need the number of
- // arguments passed in because it is constant. At some point we
- // should remove this need and make the runtime routine entry code
- // smarter.
- Set(eax, Immediate(num_arguments));
- mov(ebx, Immediate(ExternalReference(f, isolate())));
- CEntryStub ces(1);
- CallStub(&ces);
-}
-
-
-void MacroAssembler::CallExternalReference(ExternalReference ref,
- int num_arguments) {
- mov(eax, Immediate(num_arguments));
- mov(ebx, Immediate(ref));
-
- CEntryStub stub(1);
- CallStub(&stub);
-}
-
-
-void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
- int num_arguments,
- int result_size) {
- // TODO(1236192): Most runtime routines don't need the number of
- // arguments passed in because it is constant. At some point we
- // should remove this need and make the runtime routine entry code
- // smarter.
- Set(eax, Immediate(num_arguments));
- JumpToExternalReference(ext);
-}
-
-
-void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
- int num_arguments,
- int result_size) {
- TailCallExternalReference(ExternalReference(fid, isolate()),
- num_arguments,
- result_size);
-}
-
-
-// If true, a Handle<T> returned by value from a function with cdecl calling
-// convention will be returned directly as a value of location_ field in a
-// register eax.
-// If false, it is returned as a pointer to a preallocated by caller memory
-// region. Pointer to this region should be passed to a function as an
-// implicit first argument.
-#if defined(USING_BSD_ABI) || defined(__MINGW32__) || defined(__CYGWIN__)
-static const bool kReturnHandlesDirectly = true;
-#else
-static const bool kReturnHandlesDirectly = false;
-#endif
-
-
-Operand ApiParameterOperand(int index) {
- return Operand(
- esp, (index + (kReturnHandlesDirectly ? 0 : 1)) * kPointerSize);
-}
-
-
-void MacroAssembler::PrepareCallApiFunction(int argc) {
- if (kReturnHandlesDirectly) {
- EnterApiExitFrame(argc);
- // When handles are returned directly we don't have to allocate extra
- // space for and pass an out parameter.
- if (emit_debug_code()) {
- mov(esi, Immediate(BitCast<int32_t>(kZapValue)));
- }
- } else {
- // We allocate two additional slots: return value and pointer to it.
- EnterApiExitFrame(argc + 2);
-
- // The argument slots are filled as follows:
- //
- // n + 1: output slot
- // n: arg n
- // ...
- // 1: arg1
- // 0: pointer to the output slot
-
- lea(esi, Operand(esp, (argc + 1) * kPointerSize));
- mov(Operand(esp, 0 * kPointerSize), esi);
- if (emit_debug_code()) {
- mov(Operand(esi, 0), Immediate(0));
- }
- }
-}
-
-
-void MacroAssembler::CallApiFunctionAndReturn(Address function_address,
- int stack_space) {
- ExternalReference next_address =
- ExternalReference::handle_scope_next_address(isolate());
- ExternalReference limit_address =
- ExternalReference::handle_scope_limit_address(isolate());
- ExternalReference level_address =
- ExternalReference::handle_scope_level_address(isolate());
-
- // Allocate HandleScope in callee-save registers.
- mov(ebx, Operand::StaticVariable(next_address));
- mov(edi, Operand::StaticVariable(limit_address));
- add(Operand::StaticVariable(level_address), Immediate(1));
-
- if (FLAG_log_timer_events) {
- FrameScope frame(this, StackFrame::MANUAL);
- PushSafepointRegisters();
- PrepareCallCFunction(0, eax);
- CallCFunction(ExternalReference::log_enter_external_function(isolate()), 0);
- PopSafepointRegisters();
- }
-
- // Call the api function.
- call(function_address, RelocInfo::RUNTIME_ENTRY);
-
- if (FLAG_log_timer_events) {
- FrameScope frame(this, StackFrame::MANUAL);
- PushSafepointRegisters();
- PrepareCallCFunction(0, eax);
- CallCFunction(ExternalReference::log_leave_external_function(isolate()), 0);
- PopSafepointRegisters();
- }
-
- if (!kReturnHandlesDirectly) {
- // PrepareCallApiFunction saved pointer to the output slot into
- // callee-save register esi.
- mov(eax, Operand(esi, 0));
- }
-
- Label empty_handle;
- Label prologue;
- Label promote_scheduled_exception;
- Label delete_allocated_handles;
- Label leave_exit_frame;
-
- // Check if the result handle holds 0.
- test(eax, eax);
- j(zero, &empty_handle);
- // It was non-zero. Dereference to get the result value.
- mov(eax, Operand(eax, 0));
- bind(&prologue);
- // No more valid handles (the result handle was the last one). Restore
- // previous handle scope.
- mov(Operand::StaticVariable(next_address), ebx);
- sub(Operand::StaticVariable(level_address), Immediate(1));
- Assert(above_equal, "Invalid HandleScope level");
- cmp(edi, Operand::StaticVariable(limit_address));
- j(not_equal, &delete_allocated_handles);
- bind(&leave_exit_frame);
-
- // Check if the function scheduled an exception.
- ExternalReference scheduled_exception_address =
- ExternalReference::scheduled_exception_address(isolate());
- cmp(Operand::StaticVariable(scheduled_exception_address),
- Immediate(isolate()->factory()->the_hole_value()));
- j(not_equal, &promote_scheduled_exception);
-
-#if ENABLE_EXTRA_CHECKS
- // Check if the function returned a valid JavaScript value.
- Label ok;
- Register return_value = eax;
- Register map = ecx;
-
- JumpIfSmi(return_value, &ok, Label::kNear);
- mov(map, FieldOperand(return_value, HeapObject::kMapOffset));
-
- CmpInstanceType(map, FIRST_NONSTRING_TYPE);
- j(below, &ok, Label::kNear);
-
- CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
- j(above_equal, &ok, Label::kNear);
-
- cmp(map, isolate()->factory()->heap_number_map());
- j(equal, &ok, Label::kNear);
-
- cmp(return_value, isolate()->factory()->undefined_value());
- j(equal, &ok, Label::kNear);
-
- cmp(return_value, isolate()->factory()->true_value());
- j(equal, &ok, Label::kNear);
-
- cmp(return_value, isolate()->factory()->false_value());
- j(equal, &ok, Label::kNear);
-
- cmp(return_value, isolate()->factory()->null_value());
- j(equal, &ok, Label::kNear);
-
- Abort("API call returned invalid object");
-
- bind(&ok);
-#endif
-
- LeaveApiExitFrame();
- ret(stack_space * kPointerSize);
-
- bind(&empty_handle);
- // It was zero; the result is undefined.
- mov(eax, isolate()->factory()->undefined_value());
- jmp(&prologue);
-
- bind(&promote_scheduled_exception);
- TailCallRuntime(Runtime::kPromoteScheduledException, 0, 1);
-
- // HandleScope limit has changed. Delete allocated extensions.
- ExternalReference delete_extensions =
- ExternalReference::delete_handle_scope_extensions(isolate());
- bind(&delete_allocated_handles);
- mov(Operand::StaticVariable(limit_address), edi);
- mov(edi, eax);
- mov(Operand(esp, 0), Immediate(ExternalReference::isolate_address()));
- mov(eax, Immediate(delete_extensions));
- call(eax);
- mov(eax, edi);
- jmp(&leave_exit_frame);
-}
-
-
-void MacroAssembler::JumpToExternalReference(const ExternalReference& ext) {
- // Set the entry point and jump to the C entry runtime stub.
- mov(ebx, Immediate(ext));
- CEntryStub ces(1);
- jmp(ces.GetCode(isolate()), RelocInfo::CODE_TARGET);
-}
-
-
-void MacroAssembler::SetCallKind(Register dst, CallKind call_kind) {
- // This macro takes the dst register to make the code more readable
- // at the call sites. However, the dst register has to be ecx to
- // follow the calling convention which requires the call type to be
- // in ecx.
- ASSERT(dst.is(ecx));
- if (call_kind == CALL_AS_FUNCTION) {
- // Set to some non-zero smi by updating the least significant
- // byte.
- mov_b(dst, 1 << kSmiTagSize);
- } else {
- // Set to smi zero by clearing the register.
- xor_(dst, dst);
- }
-}
-
-
-void MacroAssembler::InvokePrologue(const ParameterCount& expected,
- const ParameterCount& actual,
- Handle<Code> code_constant,
- const Operand& code_operand,
- Label* done,
- bool* definitely_mismatches,
- InvokeFlag flag,
- Label::Distance done_near,
- const CallWrapper& call_wrapper,
- CallKind call_kind) {
- bool definitely_matches = false;
- *definitely_mismatches = false;
- Label invoke;
- if (expected.is_immediate()) {
- ASSERT(actual.is_immediate());
- if (expected.immediate() == actual.immediate()) {
- definitely_matches = true;
- } else {
- mov(eax, actual.immediate());
- const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
- if (expected.immediate() == sentinel) {
- // Don't worry about adapting arguments for builtins that
- // don't want that done. Skip adaption code by making it look
- // like we have a match between expected and actual number of
- // arguments.
- definitely_matches = true;
- } else {
- *definitely_mismatches = true;
- mov(ebx, expected.immediate());
- }
- }
- } else {
- if (actual.is_immediate()) {
- // Expected is in register, actual is immediate. This is the
- // case when we invoke function values without going through the
- // IC mechanism.
- cmp(expected.reg(), actual.immediate());
- j(equal, &invoke);
- ASSERT(expected.reg().is(ebx));
- mov(eax, actual.immediate());
- } else if (!expected.reg().is(actual.reg())) {
- // Both expected and actual are in (different) registers. This
- // is the case when we invoke functions using call and apply.
- cmp(expected.reg(), actual.reg());
- j(equal, &invoke);
- ASSERT(actual.reg().is(eax));
- ASSERT(expected.reg().is(ebx));
- }
- }
-
- if (!definitely_matches) {
- Handle<Code> adaptor =
- isolate()->builtins()->ArgumentsAdaptorTrampoline();
- if (!code_constant.is_null()) {
- mov(edx, Immediate(code_constant));
- add(edx, Immediate(Code::kHeaderSize - kHeapObjectTag));
- } else if (!code_operand.is_reg(edx)) {
- mov(edx, code_operand);
- }
-
- if (flag == CALL_FUNCTION) {
- call_wrapper.BeforeCall(CallSize(adaptor, RelocInfo::CODE_TARGET));
- SetCallKind(ecx, call_kind);
- call(adaptor, RelocInfo::CODE_TARGET);
- call_wrapper.AfterCall();
- if (!*definitely_mismatches) {
- jmp(done, done_near);
- }
- } else {
- SetCallKind(ecx, call_kind);
- jmp(adaptor, RelocInfo::CODE_TARGET);
- }
- bind(&invoke);
- }
-}
-
-
-void MacroAssembler::InvokeCode(const Operand& code,
- const ParameterCount& expected,
- const ParameterCount& actual,
- InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind) {
- // You can't call a function without a valid frame.
- ASSERT(flag == JUMP_FUNCTION || has_frame());
-
- Label done;
- bool definitely_mismatches = false;
- InvokePrologue(expected, actual, Handle<Code>::null(), code,
- &done, &definitely_mismatches, flag, Label::kNear,
- call_wrapper, call_kind);
- if (!definitely_mismatches) {
- if (flag == CALL_FUNCTION) {
- call_wrapper.BeforeCall(CallSize(code));
- SetCallKind(ecx, call_kind);
- call(code);
- call_wrapper.AfterCall();
- } else {
- ASSERT(flag == JUMP_FUNCTION);
- SetCallKind(ecx, call_kind);
- jmp(code);
- }
- bind(&done);
- }
-}
-
-
-void MacroAssembler::InvokeCode(Handle<Code> code,
- const ParameterCount& expected,
- const ParameterCount& actual,
- RelocInfo::Mode rmode,
- InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind) {
- // You can't call a function without a valid frame.
- ASSERT(flag == JUMP_FUNCTION || has_frame());
-
- Label done;
- Operand dummy(eax, 0);
- bool definitely_mismatches = false;
- InvokePrologue(expected, actual, code, dummy, &done, &definitely_mismatches,
- flag, Label::kNear, call_wrapper, call_kind);
- if (!definitely_mismatches) {
- if (flag == CALL_FUNCTION) {
- call_wrapper.BeforeCall(CallSize(code, rmode));
- SetCallKind(ecx, call_kind);
- call(code, rmode);
- call_wrapper.AfterCall();
- } else {
- ASSERT(flag == JUMP_FUNCTION);
- SetCallKind(ecx, call_kind);
- jmp(code, rmode);
- }
- bind(&done);
- }
-}
-
-
-void MacroAssembler::InvokeFunction(Register fun,
- const ParameterCount& actual,
- InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind) {
- // You can't call a function without a valid frame.
- ASSERT(flag == JUMP_FUNCTION || has_frame());
-
- ASSERT(fun.is(edi));
- mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
- mov(ebx, FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset));
- SmiUntag(ebx);
-
- ParameterCount expected(ebx);
- InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
- expected, actual, flag, call_wrapper, call_kind);
-}
-
-
-void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
- const ParameterCount& actual,
- InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind) {
- // You can't call a function without a valid frame.
- ASSERT(flag == JUMP_FUNCTION || has_frame());
-
- // Get the function and setup the context.
- LoadHeapObject(edi, function);
- mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
-
- ParameterCount expected(function->shared()->formal_parameter_count());
- // We call indirectly through the code field in the function to
- // allow recompilation to take effect without changing any of the
- // call sites.
- InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
- expected, actual, flag, call_wrapper, call_kind);
-}
-
-
-void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
- InvokeFlag flag,
- const CallWrapper& call_wrapper) {
- // You can't call a builtin without a valid frame.
- ASSERT(flag == JUMP_FUNCTION || has_frame());
-
- // Rely on the assertion to check that the number of provided
- // arguments match the expected number of arguments. Fake a
- // parameter count to avoid emitting code to do the check.
- ParameterCount expected(0);
- GetBuiltinFunction(edi, id);
- InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
- expected, expected, flag, call_wrapper, CALL_AS_METHOD);
-}
-
-
-void MacroAssembler::GetBuiltinFunction(Register target,
- Builtins::JavaScript id) {
- // Load the JavaScript builtin function from the builtins object.
- mov(target, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- mov(target, FieldOperand(target, GlobalObject::kBuiltinsOffset));
- mov(target, FieldOperand(target,
- JSBuiltinsObject::OffsetOfFunctionWithId(id)));
-}
-
-
-void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
- ASSERT(!target.is(edi));
- // Load the JavaScript builtin function from the builtins object.
- GetBuiltinFunction(edi, id);
- // Load the code entry point from the function into the target register.
- mov(target, FieldOperand(edi, JSFunction::kCodeEntryOffset));
-}
-
-
-void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
- if (context_chain_length > 0) {
- // Move up the chain of contexts to the context containing the slot.
- mov(dst, Operand(esi, Context::SlotOffset(Context::PREVIOUS_INDEX)));
- for (int i = 1; i < context_chain_length; i++) {
- mov(dst, Operand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
- }
- } else {
- // Slot is in the current function context. Move it into the
- // destination register in case we store into it (the write barrier
- // cannot be allowed to destroy the context in esi).
- mov(dst, esi);
- }
-
- // We should not have found a with context by walking the context chain
- // (i.e., the static scope chain and runtime context chain do not agree).
- // A variable occurring in such a scope should have slot type LOOKUP and
- // not CONTEXT.
- if (emit_debug_code()) {
- cmp(FieldOperand(dst, HeapObject::kMapOffset),
- isolate()->factory()->with_context_map());
- Check(not_equal, "Variable resolved to with context.");
- }
-}
-
-
-void MacroAssembler::LoadTransitionedArrayMapConditional(
- ElementsKind expected_kind,
- ElementsKind transitioned_kind,
- Register map_in_out,
- Register scratch,
- Label* no_map_match) {
- // Load the global or builtins object from the current context.
- mov(scratch, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- mov(scratch, FieldOperand(scratch, GlobalObject::kNativeContextOffset));
-
- // Check that the function's map is the same as the expected cached map.
- mov(scratch, Operand(scratch,
- Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
-
- size_t offset = expected_kind * kPointerSize +
- FixedArrayBase::kHeaderSize;
- cmp(map_in_out, FieldOperand(scratch, offset));
- j(not_equal, no_map_match);
-
- // Use the transitioned cached map.
- offset = transitioned_kind * kPointerSize +
- FixedArrayBase::kHeaderSize;
- mov(map_in_out, FieldOperand(scratch, offset));
-}
-
-
-void MacroAssembler::LoadInitialArrayMap(
- Register function_in, Register scratch,
- Register map_out, bool can_have_holes) {
- ASSERT(!function_in.is(map_out));
- Label done;
- mov(map_out, FieldOperand(function_in,
- JSFunction::kPrototypeOrInitialMapOffset));
- if (!FLAG_smi_only_arrays) {
- ElementsKind kind = can_have_holes ? FAST_HOLEY_ELEMENTS : FAST_ELEMENTS;
- LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- kind,
- map_out,
- scratch,
- &done);
- } else if (can_have_holes) {
- LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- FAST_HOLEY_SMI_ELEMENTS,
- map_out,
- scratch,
- &done);
- }
- bind(&done);
-}
-
-
-void MacroAssembler::LoadGlobalContext(Register global_context) {
- // Load the global or builtins object from the current context.
- mov(global_context,
- Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- // Load the native context from the global or builtins object.
- mov(global_context,
- FieldOperand(global_context, GlobalObject::kNativeContextOffset));
-}
-
-
-void MacroAssembler::LoadGlobalFunction(int index, Register function) {
- // Load the global or builtins object from the current context.
- mov(function,
- Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- // Load the native context from the global or builtins object.
- mov(function,
- FieldOperand(function, GlobalObject::kNativeContextOffset));
- // Load the function from the native context.
- mov(function, Operand(function, Context::SlotOffset(index)));
-}
-
-
-void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
- Register map) {
- // Load the initial map. The global functions all have initial maps.
- mov(map, FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
- if (emit_debug_code()) {
- Label ok, fail;
- CheckMap(map, isolate()->factory()->meta_map(), &fail, DO_SMI_CHECK);
- jmp(&ok);
- bind(&fail);
- Abort("Global functions must have initial map");
- bind(&ok);
- }
-}
-
-
-// Store the value in register src in the safepoint register stack
-// slot for register dst.
-void MacroAssembler::StoreToSafepointRegisterSlot(Register dst, Register src) {
- mov(SafepointRegisterSlot(dst), src);
-}
-
-
-void MacroAssembler::StoreToSafepointRegisterSlot(Register dst, Immediate src) {
- mov(SafepointRegisterSlot(dst), src);
-}
-
-
-void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
- mov(dst, SafepointRegisterSlot(src));
-}
-
-
-Operand MacroAssembler::SafepointRegisterSlot(Register reg) {
- return Operand(esp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
-}
-
-
-int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
- // The registers are pushed starting with the lowest encoding,
- // which means that lowest encodings are furthest away from
- // the stack pointer.
- ASSERT(reg_code >= 0 && reg_code < kNumSafepointRegisters);
- return kNumSafepointRegisters - reg_code - 1;
-}
-
-
-void MacroAssembler::LoadHeapObject(Register result,
- Handle<HeapObject> object) {
- if (isolate()->heap()->InNewSpace(*object)) {
- Handle<JSGlobalPropertyCell> cell =
- isolate()->factory()->NewJSGlobalPropertyCell(object);
- mov(result, Operand::Cell(cell));
- } else {
- mov(result, object);
- }
-}
-
-
-void MacroAssembler::PushHeapObject(Handle<HeapObject> object) {
- if (isolate()->heap()->InNewSpace(*object)) {
- Handle<JSGlobalPropertyCell> cell =
- isolate()->factory()->NewJSGlobalPropertyCell(object);
- push(Operand::Cell(cell));
- } else {
- Push(object);
- }
-}
-
-
-void MacroAssembler::Ret() {
- ret(0);
-}
-
-
-void MacroAssembler::Ret(int bytes_dropped, Register scratch) {
- if (is_uint16(bytes_dropped)) {
- ret(bytes_dropped);
- } else {
- pop(scratch);
- add(esp, Immediate(bytes_dropped));
- push(scratch);
- ret(0);
- }
-}
-
-
-void MacroAssembler::Drop(int stack_elements) {
- if (stack_elements > 0) {
- add(esp, Immediate(stack_elements * kPointerSize));
- }
-}
-
-
-void MacroAssembler::Move(Register dst, Register src) {
- if (!dst.is(src)) {
- mov(dst, src);
- }
-}
-
-
-void MacroAssembler::SetCounter(StatsCounter* counter, int value) {
- if (FLAG_native_code_counters && counter->Enabled()) {
- mov(Operand::StaticVariable(ExternalReference(counter)), Immediate(value));
- }
-}
-
-
-void MacroAssembler::IncrementCounter(StatsCounter* counter, int value) {
- ASSERT(value > 0);
- if (FLAG_native_code_counters && counter->Enabled()) {
- Operand operand = Operand::StaticVariable(ExternalReference(counter));
- if (value == 1) {
- inc(operand);
- } else {
- add(operand, Immediate(value));
- }
- }
-}
-
-
-void MacroAssembler::DecrementCounter(StatsCounter* counter, int value) {
- ASSERT(value > 0);
- if (FLAG_native_code_counters && counter->Enabled()) {
- Operand operand = Operand::StaticVariable(ExternalReference(counter));
- if (value == 1) {
- dec(operand);
- } else {
- sub(operand, Immediate(value));
- }
- }
-}
-
-
-void MacroAssembler::IncrementCounter(Condition cc,
- StatsCounter* counter,
- int value) {
- ASSERT(value > 0);
- if (FLAG_native_code_counters && counter->Enabled()) {
- Label skip;
- j(NegateCondition(cc), &skip);
- pushfd();
- IncrementCounter(counter, value);
- popfd();
- bind(&skip);
- }
-}
-
-
-void MacroAssembler::DecrementCounter(Condition cc,
- StatsCounter* counter,
- int value) {
- ASSERT(value > 0);
- if (FLAG_native_code_counters && counter->Enabled()) {
- Label skip;
- j(NegateCondition(cc), &skip);
- pushfd();
- DecrementCounter(counter, value);
- popfd();
- bind(&skip);
- }
-}
-
-
-void MacroAssembler::Assert(Condition cc, const char* msg) {
- if (emit_debug_code()) Check(cc, msg);
-}
-
-
-void MacroAssembler::AssertFastElements(Register elements) {
- if (emit_debug_code()) {
- Factory* factory = isolate()->factory();
- Label ok;
- cmp(FieldOperand(elements, HeapObject::kMapOffset),
- Immediate(factory->fixed_array_map()));
- j(equal, &ok);
- cmp(FieldOperand(elements, HeapObject::kMapOffset),
- Immediate(factory->fixed_double_array_map()));
- j(equal, &ok);
- cmp(FieldOperand(elements, HeapObject::kMapOffset),
- Immediate(factory->fixed_cow_array_map()));
- j(equal, &ok);
- Abort("JSObject with fast elements map has slow elements");
- bind(&ok);
- }
-}
-
-
-void MacroAssembler::Check(Condition cc, const char* msg) {
- Label L;
- j(cc, &L);
- Abort(msg);
- // will not return here
- bind(&L);
-}
-
-
-void MacroAssembler::CheckStackAlignment() {
- int frame_alignment = OS::ActivationFrameAlignment();
- int frame_alignment_mask = frame_alignment - 1;
- if (frame_alignment > kPointerSize) {
- ASSERT(IsPowerOf2(frame_alignment));
- Label alignment_as_expected;
- test(esp, Immediate(frame_alignment_mask));
- j(zero, &alignment_as_expected);
- // Abort if stack is not aligned.
- int3();
- bind(&alignment_as_expected);
- }
-}
-
-
-void MacroAssembler::Abort(const char* msg) {
- // We want to pass the msg string like a smi to avoid GC
- // problems, however msg is not guaranteed to be aligned
- // properly. Instead, we pass an aligned pointer that is
- // a proper v8 smi, but also pass the alignment difference
- // from the real pointer as a smi.
- intptr_t p1 = reinterpret_cast<intptr_t>(msg);
- intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
- ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
-#ifdef DEBUG
- if (msg != NULL) {
- RecordComment("Abort message: ");
- RecordComment(msg);
- }
-#endif
-
- push(eax);
- push(Immediate(p0));
- push(Immediate(reinterpret_cast<intptr_t>(Smi::FromInt(p1 - p0))));
- // Disable stub call restrictions to always allow calls to abort.
- if (!has_frame_) {
- // We don't actually want to generate a pile of code for this, so just
- // claim there is a stack frame, without generating one.
- FrameScope scope(this, StackFrame::NONE);
- CallRuntime(Runtime::kAbort, 2);
- } else {
- CallRuntime(Runtime::kAbort, 2);
- }
- // will not return here
- int3();
-}
-
-
-void MacroAssembler::LoadInstanceDescriptors(Register map,
- Register descriptors) {
- mov(descriptors, FieldOperand(map, Map::kDescriptorsOffset));
-}
-
-
-void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
- mov(dst, FieldOperand(map, Map::kBitField3Offset));
- DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
-}
-
-
-void MacroAssembler::LoadPowerOf2(XMMRegister dst,
- Register scratch,
- int power) {
- ASSERT(is_uintn(power + HeapNumber::kExponentBias,
- HeapNumber::kExponentBits));
- mov(scratch, Immediate(power + HeapNumber::kExponentBias));
- movd(dst, scratch);
- psllq(dst, HeapNumber::kMantissaBits);
-}
-
-
-void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(
- Register instance_type,
- Register scratch,
- Label* failure) {
- if (!scratch.is(instance_type)) {
- mov(scratch, instance_type);
- }
- and_(scratch,
- kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask);
- cmp(scratch, kStringTag | kSeqStringTag | kOneByteStringTag);
- j(not_equal, failure);
-}
-
-
-void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register object1,
- Register object2,
- Register scratch1,
- Register scratch2,
- Label* failure) {
- // Check that both objects are not smis.
- STATIC_ASSERT(kSmiTag == 0);
- mov(scratch1, object1);
- and_(scratch1, object2);
- JumpIfSmi(scratch1, failure);
-
- // Load instance type for both strings.
- mov(scratch1, FieldOperand(object1, HeapObject::kMapOffset));
- mov(scratch2, FieldOperand(object2, HeapObject::kMapOffset));
- movzx_b(scratch1, FieldOperand(scratch1, Map::kInstanceTypeOffset));
- movzx_b(scratch2, FieldOperand(scratch2, Map::kInstanceTypeOffset));
-
- // Check that both are flat ASCII strings.
- const int kFlatAsciiStringMask =
- kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
- const int kFlatAsciiStringTag = ASCII_STRING_TYPE;
- // Interleave bits from both instance types and compare them in one check.
- ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
- and_(scratch1, kFlatAsciiStringMask);
- and_(scratch2, kFlatAsciiStringMask);
- lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
- cmp(scratch1, kFlatAsciiStringTag | (kFlatAsciiStringTag << 3));
- j(not_equal, failure);
-}
-
-
-void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) {
- int frame_alignment = OS::ActivationFrameAlignment();
- if (frame_alignment != 0) {
- // Make stack end at alignment and make room for num_arguments words
- // and the original value of esp.
- mov(scratch, esp);
- sub(esp, Immediate((num_arguments + 1) * kPointerSize));
- ASSERT(IsPowerOf2(frame_alignment));
- and_(esp, -frame_alignment);
- mov(Operand(esp, num_arguments * kPointerSize), scratch);
- } else {
- sub(esp, Immediate(num_arguments * kPointerSize));
- }
-}
-
-
-void MacroAssembler::CallCFunction(ExternalReference function,
- int num_arguments) {
- // Trashing eax is ok as it will be the return value.
- mov(eax, Immediate(function));
- CallCFunction(eax, num_arguments);
-}
-
-
-void MacroAssembler::CallCFunction(Register function,
- int num_arguments) {
- ASSERT(has_frame());
- // Check stack alignment.
- if (emit_debug_code()) {
- CheckStackAlignment();
- }
-
- call(function);
- if (OS::ActivationFrameAlignment() != 0) {
- mov(esp, Operand(esp, num_arguments * kPointerSize));
- } else {
- add(esp, Immediate(num_arguments * kPointerSize));
- }
-}
-
-
-bool AreAliased(Register r1, Register r2, Register r3, Register r4) {
- if (r1.is(r2)) return true;
- if (r1.is(r3)) return true;
- if (r1.is(r4)) return true;
- if (r2.is(r3)) return true;
- if (r2.is(r4)) return true;
- if (r3.is(r4)) return true;
- return false;
-}
-
-
-CodePatcher::CodePatcher(byte* address, int size)
- : address_(address),
- size_(size),
- masm_(NULL, address, size + Assembler::kGap) {
- // Create a new macro assembler pointing to the address of the code to patch.
- // The size is adjusted with kGap on order for the assembler to generate size
- // bytes of instructions without failing with buffer size constraints.
- ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
-}
-
-
-CodePatcher::~CodePatcher() {
- // Indicate that code has changed.
- CPU::FlushICache(address_, size_);
-
- // Check that the code was patched as expected.
- ASSERT(masm_.pc_ == address_ + size_);
- ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
-}
-
-
-void MacroAssembler::CheckPageFlag(
- Register object,
- Register scratch,
- int mask,
- Condition cc,
- Label* condition_met,
- Label::Distance condition_met_distance) {
- ASSERT(cc == zero || cc == not_zero);
- if (scratch.is(object)) {
- and_(scratch, Immediate(~Page::kPageAlignmentMask));
- } else {
- mov(scratch, Immediate(~Page::kPageAlignmentMask));
- and_(scratch, object);
- }
- if (mask < (1 << kBitsPerByte)) {
- test_b(Operand(scratch, MemoryChunk::kFlagsOffset),
- static_cast<uint8_t>(mask));
- } else {
- test(Operand(scratch, MemoryChunk::kFlagsOffset), Immediate(mask));
- }
- j(cc, condition_met, condition_met_distance);
-}
-
-
-void MacroAssembler::CheckPageFlagForMap(
- Handle<Map> map,
- int mask,
- Condition cc,
- Label* condition_met,
- Label::Distance condition_met_distance) {
- ASSERT(cc == zero || cc == not_zero);
- Page* page = Page::FromAddress(map->address());
- ExternalReference reference(ExternalReference::page_flags(page));
- // The inlined static address check of the page's flags relies
- // on maps never being compacted.
- ASSERT(!isolate()->heap()->mark_compact_collector()->
- IsOnEvacuationCandidate(*map));
- if (mask < (1 << kBitsPerByte)) {
- test_b(Operand::StaticVariable(reference), static_cast<uint8_t>(mask));
- } else {
- test(Operand::StaticVariable(reference), Immediate(mask));
- }
- j(cc, condition_met, condition_met_distance);
-}
-
-
-void MacroAssembler::JumpIfBlack(Register object,
- Register scratch0,
- Register scratch1,
- Label* on_black,
- Label::Distance on_black_near) {
- HasColor(object, scratch0, scratch1,
- on_black, on_black_near,
- 1, 0); // kBlackBitPattern.
- ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
-}
-
-
-void MacroAssembler::HasColor(Register object,
- Register bitmap_scratch,
- Register mask_scratch,
- Label* has_color,
- Label::Distance has_color_distance,
- int first_bit,
- int second_bit) {
- ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, ecx));
-
- GetMarkBits(object, bitmap_scratch, mask_scratch);
-
- Label other_color, word_boundary;
- test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
- j(first_bit == 1 ? zero : not_zero, &other_color, Label::kNear);
- add(mask_scratch, mask_scratch); // Shift left 1 by adding.
- j(zero, &word_boundary, Label::kNear);
- test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
- j(second_bit == 1 ? not_zero : zero, has_color, has_color_distance);
- jmp(&other_color, Label::kNear);
-
- bind(&word_boundary);
- test_b(Operand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize), 1);
-
- j(second_bit == 1 ? not_zero : zero, has_color, has_color_distance);
- bind(&other_color);
-}
-
-
-void MacroAssembler::GetMarkBits(Register addr_reg,
- Register bitmap_reg,
- Register mask_reg) {
- ASSERT(!AreAliased(addr_reg, mask_reg, bitmap_reg, ecx));
- mov(bitmap_reg, Immediate(~Page::kPageAlignmentMask));
- and_(bitmap_reg, addr_reg);
- mov(ecx, addr_reg);
- int shift =
- Bitmap::kBitsPerCellLog2 + kPointerSizeLog2 - Bitmap::kBytesPerCellLog2;
- shr(ecx, shift);
- and_(ecx,
- (Page::kPageAlignmentMask >> shift) & ~(Bitmap::kBytesPerCell - 1));
-
- add(bitmap_reg, ecx);
- mov(ecx, addr_reg);
- shr(ecx, kPointerSizeLog2);
- and_(ecx, (1 << Bitmap::kBitsPerCellLog2) - 1);
- mov(mask_reg, Immediate(1));
- shl_cl(mask_reg);
-}
-
-
-void MacroAssembler::EnsureNotWhite(
- Register value,
- Register bitmap_scratch,
- Register mask_scratch,
- Label* value_is_white_and_not_data,
- Label::Distance distance) {
- ASSERT(!AreAliased(value, bitmap_scratch, mask_scratch, ecx));
- GetMarkBits(value, bitmap_scratch, mask_scratch);
-
- // If the value is black or grey we don't need to do anything.
- ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
- ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
- ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
- ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
-
- Label done;
-
- // Since both black and grey have a 1 in the first position and white does
- // not have a 1 there we only need to check one bit.
- test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
- j(not_zero, &done, Label::kNear);
-
- if (emit_debug_code()) {
- // Check for impossible bit pattern.
- Label ok;
- push(mask_scratch);
- // shl. May overflow making the check conservative.
- add(mask_scratch, mask_scratch);
- test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
- j(zero, &ok, Label::kNear);
- int3();
- bind(&ok);
- pop(mask_scratch);
- }
-
- // Value is white. We check whether it is data that doesn't need scanning.
- // Currently only checks for HeapNumber and non-cons strings.
- Register map = ecx; // Holds map while checking type.
- Register length = ecx; // Holds length of object after checking type.
- Label not_heap_number;
- Label is_data_object;
-
- // Check for heap-number
- mov(map, FieldOperand(value, HeapObject::kMapOffset));
- cmp(map, FACTORY->heap_number_map());
- j(not_equal, &not_heap_number, Label::kNear);
- mov(length, Immediate(HeapNumber::kSize));
- jmp(&is_data_object, Label::kNear);
-
- bind(&not_heap_number);
- // Check for strings.
- ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
- ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
- // If it's a string and it's not a cons string then it's an object containing
- // no GC pointers.
- Register instance_type = ecx;
- movzx_b(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
- test_b(instance_type, kIsIndirectStringMask | kIsNotStringMask);
- j(not_zero, value_is_white_and_not_data);
- // It's a non-indirect (non-cons and non-slice) string.
- // If it's external, the length is just ExternalString::kSize.
- // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
- Label not_external;
- // External strings are the only ones with the kExternalStringTag bit
- // set.
- ASSERT_EQ(0, kSeqStringTag & kExternalStringTag);
- ASSERT_EQ(0, kConsStringTag & kExternalStringTag);
- test_b(instance_type, kExternalStringTag);
- j(zero, &not_external, Label::kNear);
- mov(length, Immediate(ExternalString::kSize));
- jmp(&is_data_object, Label::kNear);
-
- bind(&not_external);
- // Sequential string, either ASCII or UC16.
- ASSERT(kOneByteStringTag == 0x04);
- and_(length, Immediate(kStringEncodingMask));
- xor_(length, Immediate(kStringEncodingMask));
- add(length, Immediate(0x04));
- // Value now either 4 (if ASCII) or 8 (if UC16), i.e., char-size shifted
- // by 2. If we multiply the string length as smi by this, it still
- // won't overflow a 32-bit value.
- ASSERT_EQ(SeqOneByteString::kMaxSize, SeqTwoByteString::kMaxSize);
- ASSERT(SeqOneByteString::kMaxSize <=
- static_cast<int>(0xffffffffu >> (2 + kSmiTagSize)));
- imul(length, FieldOperand(value, String::kLengthOffset));
- shr(length, 2 + kSmiTagSize + kSmiShiftSize);
- add(length, Immediate(SeqString::kHeaderSize + kObjectAlignmentMask));
- and_(length, Immediate(~kObjectAlignmentMask));
-
- bind(&is_data_object);
- // Value is a data object, and it is white. Mark it black. Since we know
- // that the object is white we can make it black by flipping one bit.
- or_(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
-
- and_(bitmap_scratch, Immediate(~Page::kPageAlignmentMask));
- add(Operand(bitmap_scratch, MemoryChunk::kLiveBytesOffset),
- length);
- if (emit_debug_code()) {
- mov(length, Operand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
- cmp(length, Operand(bitmap_scratch, MemoryChunk::kSizeOffset));
- Check(less_equal, "Live Bytes Count overflow chunk size");
- }
-
- bind(&done);
-}
-
-
-void MacroAssembler::EnumLength(Register dst, Register map) {
- STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
- mov(dst, FieldOperand(map, Map::kBitField3Offset));
- and_(dst, Immediate(Smi::FromInt(Map::EnumLengthBits::kMask)));
-}
-
-
-void MacroAssembler::CheckEnumCache(Label* call_runtime) {
- Label next, start;
- mov(ecx, eax);
-
- // Check if the enum length field is properly initialized, indicating that
- // there is an enum cache.
- mov(ebx, FieldOperand(ecx, HeapObject::kMapOffset));
-
- EnumLength(edx, ebx);
- cmp(edx, Immediate(Smi::FromInt(Map::kInvalidEnumCache)));
- j(equal, call_runtime);
-
- jmp(&start);
-
- bind(&next);
- mov(ebx, FieldOperand(ecx, HeapObject::kMapOffset));
-
- // For all objects but the receiver, check that the cache is empty.
- EnumLength(edx, ebx);
- cmp(edx, Immediate(Smi::FromInt(0)));
- j(not_equal, call_runtime);
-
- bind(&start);
-
- // Check that there are no elements. Register rcx contains the current JS
- // object we've reached through the prototype chain.
- mov(ecx, FieldOperand(ecx, JSObject::kElementsOffset));
- cmp(ecx, isolate()->factory()->empty_fixed_array());
- j(not_equal, call_runtime);
-
- mov(ecx, FieldOperand(ebx, Map::kPrototypeOffset));
- cmp(ecx, isolate()->factory()->null_value());
- j(not_equal, &next);
-}
-
-
-void MacroAssembler::TestJSArrayForAllocationSiteInfo(
- Register receiver_reg,
- Register scratch_reg) {
- Label no_info_available;
-
- ExternalReference new_space_start =
- ExternalReference::new_space_start(isolate());
- ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address(isolate());
-
- lea(scratch_reg, Operand(receiver_reg,
- JSArray::kSize + AllocationSiteInfo::kSize - kHeapObjectTag));
- cmp(scratch_reg, Immediate(new_space_start));
- j(less, &no_info_available);
- cmp(scratch_reg, Operand::StaticVariable(new_space_allocation_top));
- j(greater, &no_info_available);
- cmp(MemOperand(scratch_reg, -AllocationSiteInfo::kSize),
- Immediate(Handle<Map>(isolate()->heap()->allocation_site_info_map())));
- bind(&no_info_available);
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_IA32
diff --git a/src/3rdparty/v8/src/ia32/macro-assembler-ia32.h b/src/3rdparty/v8/src/ia32/macro-assembler-ia32.h
deleted file mode 100644
index 3a6e17b..0000000
--- a/src/3rdparty/v8/src/ia32/macro-assembler-ia32.h
+++ /dev/null
@@ -1,1018 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_IA32_MACRO_ASSEMBLER_IA32_H_
-#define V8_IA32_MACRO_ASSEMBLER_IA32_H_
-
-#include "assembler.h"
-#include "frames.h"
-#include "v8globals.h"
-
-namespace v8 {
-namespace internal {
-
-// Convenience for platform-independent signatures. We do not normally
-// distinguish memory operands from other operands on ia32.
-typedef Operand MemOperand;
-
-enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
-enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
-
-
-enum RegisterValueType {
- REGISTER_VALUE_IS_SMI,
- REGISTER_VALUE_IS_INT32
-};
-
-
-bool AreAliased(Register r1, Register r2, Register r3, Register r4);
-
-
-// MacroAssembler implements a collection of frequently used macros.
-class MacroAssembler: public Assembler {
- public:
- // The isolate parameter can be NULL if the macro assembler should
- // not use isolate-dependent functionality. In this case, it's the
- // responsibility of the caller to never invoke such function on the
- // macro assembler.
- MacroAssembler(Isolate* isolate, void* buffer, int size);
-
- // ---------------------------------------------------------------------------
- // GC Support
- enum RememberedSetFinalAction {
- kReturnAtEnd,
- kFallThroughAtEnd
- };
-
- // Record in the remembered set the fact that we have a pointer to new space
- // at the address pointed to by the addr register. Only works if addr is not
- // in new space.
- void RememberedSetHelper(Register object, // Used for debug code.
- Register addr,
- Register scratch,
- SaveFPRegsMode save_fp,
- RememberedSetFinalAction and_then);
-
- void CheckPageFlag(Register object,
- Register scratch,
- int mask,
- Condition cc,
- Label* condition_met,
- Label::Distance condition_met_distance = Label::kFar);
-
- void CheckPageFlagForMap(
- Handle<Map> map,
- int mask,
- Condition cc,
- Label* condition_met,
- Label::Distance condition_met_distance = Label::kFar);
-
- // Check if object is in new space. Jumps if the object is not in new space.
- // The register scratch can be object itself, but scratch will be clobbered.
- void JumpIfNotInNewSpace(Register object,
- Register scratch,
- Label* branch,
- Label::Distance distance = Label::kFar) {
- InNewSpace(object, scratch, zero, branch, distance);
- }
-
- // Check if object is in new space. Jumps if the object is in new space.
- // The register scratch can be object itself, but it will be clobbered.
- void JumpIfInNewSpace(Register object,
- Register scratch,
- Label* branch,
- Label::Distance distance = Label::kFar) {
- InNewSpace(object, scratch, not_zero, branch, distance);
- }
-
- // Check if an object has a given incremental marking color. Also uses ecx!
- void HasColor(Register object,
- Register scratch0,
- Register scratch1,
- Label* has_color,
- Label::Distance has_color_distance,
- int first_bit,
- int second_bit);
-
- void JumpIfBlack(Register object,
- Register scratch0,
- Register scratch1,
- Label* on_black,
- Label::Distance on_black_distance = Label::kFar);
-
- // Checks the color of an object. If the object is already grey or black
- // then we just fall through, since it is already live. If it is white and
- // we can determine that it doesn't need to be scanned, then we just mark it
- // black and fall through. For the rest we jump to the label so the
- // incremental marker can fix its assumptions.
- void EnsureNotWhite(Register object,
- Register scratch1,
- Register scratch2,
- Label* object_is_white_and_not_data,
- Label::Distance distance);
-
- // Notify the garbage collector that we wrote a pointer into an object.
- // |object| is the object being stored into, |value| is the object being
- // stored. value and scratch registers are clobbered by the operation.
- // The offset is the offset from the start of the object, not the offset from
- // the tagged HeapObject pointer. For use with FieldOperand(reg, off).
- void RecordWriteField(
- Register object,
- int offset,
- Register value,
- Register scratch,
- SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK);
-
- // As above, but the offset has the tag presubtracted. For use with
- // Operand(reg, off).
- void RecordWriteContextSlot(
- Register context,
- int offset,
- Register value,
- Register scratch,
- SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK) {
- RecordWriteField(context,
- offset + kHeapObjectTag,
- value,
- scratch,
- save_fp,
- remembered_set_action,
- smi_check);
- }
-
- // Notify the garbage collector that we wrote a pointer into a fixed array.
- // |array| is the array being stored into, |value| is the
- // object being stored. |index| is the array index represented as a
- // Smi. All registers are clobbered by the operation RecordWriteArray
- // filters out smis so it does not update the write barrier if the
- // value is a smi.
- void RecordWriteArray(
- Register array,
- Register value,
- Register index,
- SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK);
-
- // For page containing |object| mark region covering |address|
- // dirty. |object| is the object being stored into, |value| is the
- // object being stored. The address and value registers are clobbered by the
- // operation. RecordWrite filters out smis so it does not update the
- // write barrier if the value is a smi.
- void RecordWrite(
- Register object,
- Register address,
- Register value,
- SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK);
-
- // For page containing |object| mark the region covering the object's map
- // dirty. |object| is the object being stored into, |map| is the Map object
- // that was stored.
- void RecordWriteForMap(
- Register object,
- Handle<Map> map,
- Register scratch1,
- Register scratch2,
- SaveFPRegsMode save_fp);
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- // ---------------------------------------------------------------------------
- // Debugger Support
-
- void DebugBreak();
-#endif
-
- // Enter specific kind of exit frame. Expects the number of
- // arguments in register eax and sets up the number of arguments in
- // register edi and the pointer to the first argument in register
- // esi.
- void EnterExitFrame(bool save_doubles);
-
- void EnterApiExitFrame(int argc);
-
- // Leave the current exit frame. Expects the return value in
- // register eax:edx (untouched) and the pointer to the first
- // argument in register esi.
- void LeaveExitFrame(bool save_doubles);
-
- // Leave the current exit frame. Expects the return value in
- // register eax (untouched).
- void LeaveApiExitFrame();
-
- // Find the function context up the context chain.
- void LoadContext(Register dst, int context_chain_length);
-
- // Conditionally load the cached Array transitioned map of type
- // transitioned_kind from the native context if the map in register
- // map_in_out is the cached Array map in the native context of
- // expected_kind.
- void LoadTransitionedArrayMapConditional(
- ElementsKind expected_kind,
- ElementsKind transitioned_kind,
- Register map_in_out,
- Register scratch,
- Label* no_map_match);
-
- // Load the initial map for new Arrays from a JSFunction.
- void LoadInitialArrayMap(Register function_in,
- Register scratch,
- Register map_out,
- bool can_have_holes);
-
- void LoadGlobalContext(Register global_context);
-
- // Load the global function with the given index.
- void LoadGlobalFunction(int index, Register function);
-
- // Load the initial map from the global function. The registers
- // function and map can be the same.
- void LoadGlobalFunctionInitialMap(Register function, Register map);
-
- // Push and pop the registers that can hold pointers.
- void PushSafepointRegisters() { pushad(); }
- void PopSafepointRegisters() { popad(); }
- // Store the value in register/immediate src in the safepoint
- // register stack slot for register dst.
- void StoreToSafepointRegisterSlot(Register dst, Register src);
- void StoreToSafepointRegisterSlot(Register dst, Immediate src);
- void LoadFromSafepointRegisterSlot(Register dst, Register src);
-
- void LoadHeapObject(Register result, Handle<HeapObject> object);
- void PushHeapObject(Handle<HeapObject> object);
-
- void LoadObject(Register result, Handle<Object> object) {
- if (object->IsHeapObject()) {
- LoadHeapObject(result, Handle<HeapObject>::cast(object));
- } else {
- Set(result, Immediate(object));
- }
- }
-
- // ---------------------------------------------------------------------------
- // JavaScript invokes
-
- // Set up call kind marking in ecx. The method takes ecx as an
- // explicit first parameter to make the code more readable at the
- // call sites.
- void SetCallKind(Register dst, CallKind kind);
-
- // Invoke the JavaScript function code by either calling or jumping.
- void InvokeCode(Register code,
- const ParameterCount& expected,
- const ParameterCount& actual,
- InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind) {
- InvokeCode(Operand(code), expected, actual, flag, call_wrapper, call_kind);
- }
-
- void InvokeCode(const Operand& code,
- const ParameterCount& expected,
- const ParameterCount& actual,
- InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind);
-
- void InvokeCode(Handle<Code> code,
- const ParameterCount& expected,
- const ParameterCount& actual,
- RelocInfo::Mode rmode,
- InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind);
-
- // Invoke the JavaScript function in the given register. Changes the
- // current context to the context in the function before invoking.
- void InvokeFunction(Register function,
- const ParameterCount& actual,
- InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind);
-
- void InvokeFunction(Handle<JSFunction> function,
- const ParameterCount& actual,
- InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind);
-
- // Invoke specified builtin JavaScript function. Adds an entry to
- // the unresolved list if the name does not resolve.
- void InvokeBuiltin(Builtins::JavaScript id,
- InvokeFlag flag,
- const CallWrapper& call_wrapper = NullCallWrapper());
-
- // Store the function for the given builtin in the target register.
- void GetBuiltinFunction(Register target, Builtins::JavaScript id);
-
- // Store the code object for the given builtin in the target register.
- void GetBuiltinEntry(Register target, Builtins::JavaScript id);
-
- // Expression support
- void Set(Register dst, const Immediate& x);
- void Set(const Operand& dst, const Immediate& x);
-
- // Support for constant splitting.
- bool IsUnsafeImmediate(const Immediate& x);
- void SafeSet(Register dst, const Immediate& x);
- void SafePush(const Immediate& x);
-
- // Compare against a known root, e.g. undefined, null, true, ...
- void CompareRoot(Register with, Heap::RootListIndex index);
- void CompareRoot(const Operand& with, Heap::RootListIndex index);
-
- // Compare object type for heap object.
- // Incoming register is heap_object and outgoing register is map.
- void CmpObjectType(Register heap_object, InstanceType type, Register map);
-
- // Compare instance type for map.
- void CmpInstanceType(Register map, InstanceType type);
-
- // Check if a map for a JSObject indicates that the object has fast elements.
- // Jump to the specified label if it does not.
- void CheckFastElements(Register map,
- Label* fail,
- Label::Distance distance = Label::kFar);
-
- // Check if a map for a JSObject indicates that the object can have both smi
- // and HeapObject elements. Jump to the specified label if it does not.
- void CheckFastObjectElements(Register map,
- Label* fail,
- Label::Distance distance = Label::kFar);
-
- // Check if a map for a JSObject indicates that the object has fast smi only
- // elements. Jump to the specified label if it does not.
- void CheckFastSmiElements(Register map,
- Label* fail,
- Label::Distance distance = Label::kFar);
-
- // Check to see if maybe_number can be stored as a double in
- // FastDoubleElements. If it can, store it at the index specified by key in
- // the FastDoubleElements array elements, otherwise jump to fail.
- void StoreNumberToDoubleElements(Register maybe_number,
- Register elements,
- Register key,
- Register scratch1,
- XMMRegister scratch2,
- Label* fail,
- bool specialize_for_processor,
- int offset = 0);
-
- // Compare an object's map with the specified map and its transitioned
- // elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. FLAGS are set with
- // result of map compare. If multiple map compares are required, the compare
- // sequences branches to early_success.
- void CompareMap(Register obj,
- Handle<Map> map,
- Label* early_success,
- CompareMapMode mode = REQUIRE_EXACT_MAP);
-
- // Check if the map of an object is equal to a specified map and branch to
- // label if not. Skip the smi check if not required (object is known to be a
- // heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match
- // against maps that are ElementsKind transition maps of the specified map.
- void CheckMap(Register obj,
- Handle<Map> map,
- Label* fail,
- SmiCheckType smi_check_type,
- CompareMapMode mode = REQUIRE_EXACT_MAP);
-
- // Check if the map of an object is equal to a specified map and branch to a
- // specified target if equal. Skip the smi check if not required (object is
- // known to be a heap object)
- void DispatchMap(Register obj,
- Handle<Map> map,
- Handle<Code> success,
- SmiCheckType smi_check_type);
-
- // Check if the object in register heap_object is a string. Afterwards the
- // register map contains the object map and the register instance_type
- // contains the instance_type. The registers map and instance_type can be the
- // same in which case it contains the instance type afterwards. Either of the
- // registers map and instance_type can be the same as heap_object.
- Condition IsObjectStringType(Register heap_object,
- Register map,
- Register instance_type);
-
- // Check if a heap object's type is in the JSObject range, not including
- // JSFunction. The object's map will be loaded in the map register.
- // Any or all of the three registers may be the same.
- // The contents of the scratch register will always be overwritten.
- void IsObjectJSObjectType(Register heap_object,
- Register map,
- Register scratch,
- Label* fail);
-
- // The contents of the scratch register will be overwritten.
- void IsInstanceJSObjectType(Register map, Register scratch, Label* fail);
-
- // FCmp is similar to integer cmp, but requires unsigned
- // jcc instructions (je, ja, jae, jb, jbe, je, and jz).
- void FCmp();
-
- void ClampUint8(Register reg);
-
- void ClampDoubleToUint8(XMMRegister input_reg,
- XMMRegister scratch_reg,
- Register result_reg);
-
-
- // Smi tagging support.
- void SmiTag(Register reg) {
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize == 1);
- add(reg, reg);
- }
- void SmiUntag(Register reg) {
- sar(reg, kSmiTagSize);
- }
-
- // Modifies the register even if it does not contain a Smi!
- void SmiUntag(Register reg, Label* is_smi) {
- STATIC_ASSERT(kSmiTagSize == 1);
- sar(reg, kSmiTagSize);
- STATIC_ASSERT(kSmiTag == 0);
- j(not_carry, is_smi);
- }
-
- void LoadUint32(XMMRegister dst, Register src, XMMRegister scratch);
-
- // Jump the register contains a smi.
- inline void JumpIfSmi(Register value,
- Label* smi_label,
- Label::Distance distance = Label::kFar) {
- test(value, Immediate(kSmiTagMask));
- j(zero, smi_label, distance);
- }
- // Jump if the operand is a smi.
- inline void JumpIfSmi(Operand value,
- Label* smi_label,
- Label::Distance distance = Label::kFar) {
- test(value, Immediate(kSmiTagMask));
- j(zero, smi_label, distance);
- }
- // Jump if register contain a non-smi.
- inline void JumpIfNotSmi(Register value,
- Label* not_smi_label,
- Label::Distance distance = Label::kFar) {
- test(value, Immediate(kSmiTagMask));
- j(not_zero, not_smi_label, distance);
- }
-
- void LoadInstanceDescriptors(Register map, Register descriptors);
- void EnumLength(Register dst, Register map);
- void NumberOfOwnDescriptors(Register dst, Register map);
-
- template<typename Field>
- void DecodeField(Register reg) {
- static const int shift = Field::kShift;
- static const int mask = (Field::kMask >> Field::kShift) << kSmiTagSize;
- sar(reg, shift);
- and_(reg, Immediate(mask));
- }
- void LoadPowerOf2(XMMRegister dst, Register scratch, int power);
-
- // Abort execution if argument is not a number, enabled via --debug-code.
- void AssertNumber(Register object);
-
- // Abort execution if argument is not a smi, enabled via --debug-code.
- void AssertSmi(Register object);
-
- // Abort execution if argument is a smi, enabled via --debug-code.
- void AssertNotSmi(Register object);
-
- // Abort execution if argument is not a string, enabled via --debug-code.
- void AssertString(Register object);
-
- // ---------------------------------------------------------------------------
- // Exception handling
-
- // Push a new try handler and link it into try handler chain.
- void PushTryHandler(StackHandler::Kind kind, int handler_index);
-
- // Unlink the stack handler on top of the stack from the try handler chain.
- void PopTryHandler();
-
- // Throw to the top handler in the try hander chain.
- void Throw(Register value);
-
- // Throw past all JS frames to the top JS entry frame.
- void ThrowUncatchable(Register value);
-
- // ---------------------------------------------------------------------------
- // Inline caching support
-
- // Generate code for checking access rights - used for security checks
- // on access to global objects across environments. The holder register
- // is left untouched, but the scratch register is clobbered.
- void CheckAccessGlobalProxy(Register holder_reg,
- Register scratch,
- Label* miss);
-
- void GetNumberHash(Register r0, Register scratch);
-
- void LoadFromNumberDictionary(Label* miss,
- Register elements,
- Register key,
- Register r0,
- Register r1,
- Register r2,
- Register result);
-
-
- // ---------------------------------------------------------------------------
- // Allocation support
-
- // Allocate an object in new space. If the new space is exhausted control
- // continues at the gc_required label. The allocated object is returned in
- // result and end of the new object is returned in result_end. The register
- // scratch can be passed as no_reg in which case an additional object
- // reference will be added to the reloc info. The returned pointers in result
- // and result_end have not yet been tagged as heap objects. If
- // result_contains_top_on_entry is true the content of result is known to be
- // the allocation top on entry (could be result_end from a previous call to
- // AllocateInNewSpace). If result_contains_top_on_entry is true scratch
- // should be no_reg as it is never used.
- void AllocateInNewSpace(int object_size,
- Register result,
- Register result_end,
- Register scratch,
- Label* gc_required,
- AllocationFlags flags);
-
- void AllocateInNewSpace(int header_size,
- ScaleFactor element_size,
- Register element_count,
- RegisterValueType element_count_type,
- Register result,
- Register result_end,
- Register scratch,
- Label* gc_required,
- AllocationFlags flags);
-
- void AllocateInNewSpace(Register object_size,
- Register result,
- Register result_end,
- Register scratch,
- Label* gc_required,
- AllocationFlags flags);
-
- // Undo allocation in new space. The object passed and objects allocated after
- // it will no longer be allocated. Make sure that no pointers are left to the
- // object(s) no longer allocated as they would be invalid when allocation is
- // un-done.
- void UndoAllocationInNewSpace(Register object);
-
- // Allocate a heap number in new space with undefined value. The
- // register scratch2 can be passed as no_reg; the others must be
- // valid registers. Returns tagged pointer in result register, or
- // jumps to gc_required if new space is full.
- void AllocateHeapNumber(Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required);
-
- // Allocate a sequential string. All the header fields of the string object
- // are initialized.
- void AllocateTwoByteString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* gc_required);
- void AllocateAsciiString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* gc_required);
- void AllocateAsciiString(Register result,
- int length,
- Register scratch1,
- Register scratch2,
- Label* gc_required);
-
- // Allocate a raw cons string object. Only the map field of the result is
- // initialized.
- void AllocateTwoByteConsString(Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required);
- void AllocateAsciiConsString(Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required);
-
- // Allocate a raw sliced string object. Only the map field of the result is
- // initialized.
- void AllocateTwoByteSlicedString(Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required);
- void AllocateAsciiSlicedString(Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required);
-
- // Copy memory, byte-by-byte, from source to destination. Not optimized for
- // long or aligned copies.
- // The contents of index and scratch are destroyed.
- void CopyBytes(Register source,
- Register destination,
- Register length,
- Register scratch);
-
- // Initialize fields with filler values. Fields starting at |start_offset|
- // not including end_offset are overwritten with the value in |filler|. At
- // the end the loop, |start_offset| takes the value of |end_offset|.
- void InitializeFieldsWithFiller(Register start_offset,
- Register end_offset,
- Register filler);
-
- // ---------------------------------------------------------------------------
- // Support functions.
-
- // Check a boolean-bit of a Smi field.
- void BooleanBitTest(Register object, int field_offset, int bit_index);
-
- // Check if result is zero and op is negative.
- void NegativeZeroTest(Register result, Register op, Label* then_label);
-
- // Check if result is zero and any of op1 and op2 are negative.
- // Register scratch is destroyed, and it must be different from op2.
- void NegativeZeroTest(Register result, Register op1, Register op2,
- Register scratch, Label* then_label);
-
- // Try to get function prototype of a function and puts the value in
- // the result register. Checks that the function really is a
- // function and jumps to the miss label if the fast checks fail. The
- // function register will be untouched; the other registers may be
- // clobbered.
- void TryGetFunctionPrototype(Register function,
- Register result,
- Register scratch,
- Label* miss,
- bool miss_on_bound_function = false);
-
- // Generates code for reporting that an illegal operation has
- // occurred.
- void IllegalOperation(int num_arguments);
-
- // Picks out an array index from the hash field.
- // Register use:
- // hash - holds the index's hash. Clobbered.
- // index - holds the overwritten index on exit.
- void IndexFromHash(Register hash, Register index);
-
- // ---------------------------------------------------------------------------
- // Runtime calls
-
- // Call a code stub. Generate the code if necessary.
- void CallStub(CodeStub* stub, TypeFeedbackId ast_id = TypeFeedbackId::None());
-
- // Tail call a code stub (jump). Generate the code if necessary.
- void TailCallStub(CodeStub* stub);
-
- // Return from a code stub after popping its arguments.
- void StubReturn(int argc);
-
- // Call a runtime routine.
- void CallRuntime(const Runtime::Function* f, int num_arguments);
- void CallRuntimeSaveDoubles(Runtime::FunctionId id);
-
- // Convenience function: Same as above, but takes the fid instead.
- void CallRuntime(Runtime::FunctionId id, int num_arguments);
-
- // Convenience function: call an external reference.
- void CallExternalReference(ExternalReference ref, int num_arguments);
-
- // Tail call of a runtime routine (jump).
- // Like JumpToExternalReference, but also takes care of passing the number
- // of parameters.
- void TailCallExternalReference(const ExternalReference& ext,
- int num_arguments,
- int result_size);
-
- // Convenience function: tail call a runtime routine (jump).
- void TailCallRuntime(Runtime::FunctionId fid,
- int num_arguments,
- int result_size);
-
- // Before calling a C-function from generated code, align arguments on stack.
- // After aligning the frame, arguments must be stored in esp[0], esp[4],
- // etc., not pushed. The argument count assumes all arguments are word sized.
- // Some compilers/platforms require the stack to be aligned when calling
- // C++ code.
- // Needs a scratch register to do some arithmetic. This register will be
- // trashed.
- void PrepareCallCFunction(int num_arguments, Register scratch);
-
- // Calls a C function and cleans up the space for arguments allocated
- // by PrepareCallCFunction. The called function is not allowed to trigger a
- // garbage collection, since that might move the code and invalidate the
- // return address (unless this is somehow accounted for by the called
- // function).
- void CallCFunction(ExternalReference function, int num_arguments);
- void CallCFunction(Register function, int num_arguments);
-
- // Prepares stack to put arguments (aligns and so on). Reserves
- // space for return value if needed (assumes the return value is a handle).
- // Arguments must be stored in ApiParameterOperand(0), ApiParameterOperand(1)
- // etc. Saves context (esi). If space was reserved for return value then
- // stores the pointer to the reserved slot into esi.
- void PrepareCallApiFunction(int argc);
-
- // Calls an API function. Allocates HandleScope, extracts returned value
- // from handle and propagates exceptions. Clobbers ebx, edi and
- // caller-save registers. Restores context. On return removes
- // stack_space * kPointerSize (GCed).
- void CallApiFunctionAndReturn(Address function_address, int stack_space);
-
- // Jump to a runtime routine.
- void JumpToExternalReference(const ExternalReference& ext);
-
- // ---------------------------------------------------------------------------
- // Utilities
-
- void Ret();
-
- // Return and drop arguments from stack, where the number of arguments
- // may be bigger than 2^16 - 1. Requires a scratch register.
- void Ret(int bytes_dropped, Register scratch);
-
- // Emit code to discard a non-negative number of pointer-sized elements
- // from the stack, clobbering only the esp register.
- void Drop(int element_count);
-
- void Call(Label* target) { call(target); }
-
- // Emit call to the code we are currently generating.
- void CallSelf() {
- Handle<Code> self(reinterpret_cast<Code**>(CodeObject().location()));
- call(self, RelocInfo::CODE_TARGET);
- }
-
- // Move if the registers are not identical.
- void Move(Register target, Register source);
-
- // Push a handle value.
- void Push(Handle<Object> handle) { push(Immediate(handle)); }
- void Push(Smi* smi) { Push(Handle<Smi>(smi, isolate())); }
-
- Handle<Object> CodeObject() {
- ASSERT(!code_object_.is_null());
- return code_object_;
- }
-
-
- // ---------------------------------------------------------------------------
- // StatsCounter support
-
- void SetCounter(StatsCounter* counter, int value);
- void IncrementCounter(StatsCounter* counter, int value);
- void DecrementCounter(StatsCounter* counter, int value);
- void IncrementCounter(Condition cc, StatsCounter* counter, int value);
- void DecrementCounter(Condition cc, StatsCounter* counter, int value);
-
-
- // ---------------------------------------------------------------------------
- // Debugging
-
- // Calls Abort(msg) if the condition cc is not satisfied.
- // Use --debug_code to enable.
- void Assert(Condition cc, const char* msg);
-
- void AssertFastElements(Register elements);
-
- // Like Assert(), but always enabled.
- void Check(Condition cc, const char* msg);
-
- // Print a message to stdout and abort execution.
- void Abort(const char* msg);
-
- // Check that the stack is aligned.
- void CheckStackAlignment();
-
- // Verify restrictions about code generated in stubs.
- void set_generating_stub(bool value) { generating_stub_ = value; }
- bool generating_stub() { return generating_stub_; }
- void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; }
- bool allow_stub_calls() { return allow_stub_calls_; }
- void set_has_frame(bool value) { has_frame_ = value; }
- bool has_frame() { return has_frame_; }
- inline bool AllowThisStubCall(CodeStub* stub);
-
- // ---------------------------------------------------------------------------
- // String utilities.
-
- // Check whether the instance type represents a flat ASCII string. Jump to the
- // label if not. If the instance type can be scratched specify same register
- // for both instance type and scratch.
- void JumpIfInstanceTypeIsNotSequentialAscii(Register instance_type,
- Register scratch,
- Label* on_not_flat_ascii_string);
-
- // Checks if both objects are sequential ASCII strings, and jumps to label
- // if either is not.
- void JumpIfNotBothSequentialAsciiStrings(Register object1,
- Register object2,
- Register scratch1,
- Register scratch2,
- Label* on_not_flat_ascii_strings);
-
- static int SafepointRegisterStackIndex(Register reg) {
- return SafepointRegisterStackIndex(reg.code());
- }
-
- // Activation support.
- void EnterFrame(StackFrame::Type type);
- void LeaveFrame(StackFrame::Type type);
-
- // Expects object in eax and returns map with validated enum cache
- // in eax. Assumes that any other register can be used as a scratch.
- void CheckEnumCache(Label* call_runtime);
-
- // AllocationSiteInfo support. Arrays may have an associated
- // AllocationSiteInfo object that can be checked for in order to pretransition
- // to another type.
- // On entry, receiver_reg should point to the array object.
- // scratch_reg gets clobbered.
- // If allocation info is present, conditional code is set to equal
- void TestJSArrayForAllocationSiteInfo(Register receiver_reg,
- Register scratch_reg);
-
- private:
- bool generating_stub_;
- bool allow_stub_calls_;
- bool has_frame_;
- // This handle will be patched with the code object on installation.
- Handle<Object> code_object_;
-
- // Helper functions for generating invokes.
- void InvokePrologue(const ParameterCount& expected,
- const ParameterCount& actual,
- Handle<Code> code_constant,
- const Operand& code_operand,
- Label* done,
- bool* definitely_mismatches,
- InvokeFlag flag,
- Label::Distance done_distance,
- const CallWrapper& call_wrapper = NullCallWrapper(),
- CallKind call_kind = CALL_AS_METHOD);
-
- void EnterExitFramePrologue();
- void EnterExitFrameEpilogue(int argc, bool save_doubles);
-
- void LeaveExitFrameEpilogue();
-
- // Allocation support helpers.
- void LoadAllocationTopHelper(Register result,
- Register scratch,
- AllocationFlags flags);
- void UpdateAllocationTopHelper(Register result_end, Register scratch);
-
- // Helper for PopHandleScope. Allowed to perform a GC and returns
- // NULL if gc_allowed. Does not perform a GC if !gc_allowed, and
- // possibly returns a failure object indicating an allocation failure.
- MUST_USE_RESULT MaybeObject* PopHandleScopeHelper(Register saved,
- Register scratch,
- bool gc_allowed);
-
- // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
- void InNewSpace(Register object,
- Register scratch,
- Condition cc,
- Label* condition_met,
- Label::Distance condition_met_distance = Label::kFar);
-
- // Helper for finding the mark bits for an address. Afterwards, the
- // bitmap register points at the word with the mark bits and the mask
- // the position of the first bit. Uses ecx as scratch and leaves addr_reg
- // unchanged.
- inline void GetMarkBits(Register addr_reg,
- Register bitmap_reg,
- Register mask_reg);
-
- // Helper for throwing exceptions. Compute a handler address and jump to
- // it. See the implementation for register usage.
- void JumpToHandlerEntry();
-
- // Compute memory operands for safepoint stack slots.
- Operand SafepointRegisterSlot(Register reg);
- static int SafepointRegisterStackIndex(int reg_code);
-
- // Needs access to SafepointRegisterStackIndex for compiled frame
- // traversal.
- friend class StandardFrame;
-};
-
-
-// The code patcher is used to patch (typically) small parts of code e.g. for
-// debugging and other types of instrumentation. When using the code patcher
-// the exact number of bytes specified must be emitted. Is not legal to emit
-// relocation information. If any of these constraints are violated it causes
-// an assertion.
-class CodePatcher {
- public:
- CodePatcher(byte* address, int size);
- virtual ~CodePatcher();
-
- // Macro assembler to emit code.
- MacroAssembler* masm() { return &masm_; }
-
- private:
- byte* address_; // The address of the code being patched.
- int size_; // Number of bytes of the expected patch size.
- MacroAssembler masm_; // Macro assembler used to generate the code.
-};
-
-
-// -----------------------------------------------------------------------------
-// Static helper functions.
-
-// Generate an Operand for loading a field from an object.
-inline Operand FieldOperand(Register object, int offset) {
- return Operand(object, offset - kHeapObjectTag);
-}
-
-
-// Generate an Operand for loading an indexed field from an object.
-inline Operand FieldOperand(Register object,
- Register index,
- ScaleFactor scale,
- int offset) {
- return Operand(object, index, scale, offset - kHeapObjectTag);
-}
-
-
-inline Operand ContextOperand(Register context, int index) {
- return Operand(context, Context::SlotOffset(index));
-}
-
-
-inline Operand GlobalObjectOperand() {
- return ContextOperand(esi, Context::GLOBAL_OBJECT_INDEX);
-}
-
-static inline Operand QmlGlobalObjectOperand() {
- return ContextOperand(esi, Context::QML_GLOBAL_OBJECT_INDEX);
-}
-
-// Generates an Operand for saving parameters after PrepareCallApiFunction.
-Operand ApiParameterOperand(int index);
-
-
-#ifdef GENERATED_CODE_COVERAGE
-extern void LogGeneratedCodeCoverage(const char* file_line);
-#define CODE_COVERAGE_STRINGIFY(x) #x
-#define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
-#define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__)
-#define ACCESS_MASM(masm) { \
- byte* ia32_coverage_function = \
- reinterpret_cast<byte*>(FUNCTION_ADDR(LogGeneratedCodeCoverage)); \
- masm->pushfd(); \
- masm->pushad(); \
- masm->push(Immediate(reinterpret_cast<int>(&__FILE_LINE__))); \
- masm->call(ia32_coverage_function, RelocInfo::RUNTIME_ENTRY); \
- masm->pop(eax); \
- masm->popad(); \
- masm->popfd(); \
- } \
- masm->
-#else
-#define ACCESS_MASM(masm) masm->
-#endif
-
-
-} } // namespace v8::internal
-
-#endif // V8_IA32_MACRO_ASSEMBLER_IA32_H_
diff --git a/src/3rdparty/v8/src/ia32/regexp-macro-assembler-ia32.cc b/src/3rdparty/v8/src/ia32/regexp-macro-assembler-ia32.cc
deleted file mode 100644
index 49c75e1..0000000
--- a/src/3rdparty/v8/src/ia32/regexp-macro-assembler-ia32.cc
+++ /dev/null
@@ -1,1420 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_IA32)
-
-#include "unicode.h"
-#include "log.h"
-#include "regexp-stack.h"
-#include "macro-assembler.h"
-#include "regexp-macro-assembler.h"
-#include "ia32/regexp-macro-assembler-ia32.h"
-
-namespace v8 {
-namespace internal {
-
-#ifndef V8_INTERPRETED_REGEXP
-/*
- * This assembler uses the following register assignment convention
- * - edx : Current character. Must be loaded using LoadCurrentCharacter
- * before using any of the dispatch methods. Temporarily stores the
- * index of capture start after a matching pass for a global regexp.
- * - edi : Current position in input, as negative offset from end of string.
- * Please notice that this is the byte offset, not the character offset!
- * - esi : end of input (points to byte after last character in input).
- * - ebp : Frame pointer. Used to access arguments, local variables and
- * RegExp registers.
- * - esp : Points to tip of C stack.
- * - ecx : Points to tip of backtrack stack
- *
- * The registers eax and ebx are free to use for computations.
- *
- * Each call to a public method should retain this convention.
- * The stack will have the following structure:
- * - Isolate* isolate (address of the current isolate)
- * - direct_call (if 1, direct call from JavaScript code, if 0
- * call through the runtime system)
- * - stack_area_base (high end of the memory area to use as
- * backtracking stack)
- * - capture array size (may fit multiple sets of matches)
- * - int* capture_array (int[num_saved_registers_], for output).
- * - end of input (address of end of string)
- * - start of input (address of first character in string)
- * - start index (character index of start)
- * - String* input_string (location of a handle containing the string)
- * --- frame alignment (if applicable) ---
- * - return address
- * ebp-> - old ebp
- * - backup of caller esi
- * - backup of caller edi
- * - backup of caller ebx
- * - success counter (only for global regexps to count matches).
- * - Offset of location before start of input (effectively character
- * position -1). Used to initialize capture registers to a non-position.
- * - register 0 ebp[-4] (only positions must be stored in the first
- * - register 1 ebp[-8] num_saved_registers_ registers)
- * - ...
- *
- * The first num_saved_registers_ registers are initialized to point to
- * "character -1" in the string (i.e., char_size() bytes before the first
- * character of the string). The remaining registers starts out as garbage.
- *
- * The data up to the return address must be placed there by the calling
- * code, by calling the code entry as cast to a function with the signature:
- * int (*match)(String* input_string,
- * int start_index,
- * Address start,
- * Address end,
- * int* capture_output_array,
- * bool at_start,
- * byte* stack_area_base,
- * bool direct_call)
- */
-
-#define __ ACCESS_MASM(masm_)
-
-RegExpMacroAssemblerIA32::RegExpMacroAssemblerIA32(
- Mode mode,
- int registers_to_save,
- Zone* zone)
- : NativeRegExpMacroAssembler(zone),
- masm_(new MacroAssembler(Isolate::Current(), NULL, kRegExpCodeSize)),
- mode_(mode),
- num_registers_(registers_to_save),
- num_saved_registers_(registers_to_save),
- entry_label_(),
- start_label_(),
- success_label_(),
- backtrack_label_(),
- exit_label_() {
- ASSERT_EQ(0, registers_to_save % 2);
- __ jmp(&entry_label_); // We'll write the entry code later.
- __ bind(&start_label_); // And then continue from here.
-}
-
-
-RegExpMacroAssemblerIA32::~RegExpMacroAssemblerIA32() {
- delete masm_;
- // Unuse labels in case we throw away the assembler without calling GetCode.
- entry_label_.Unuse();
- start_label_.Unuse();
- success_label_.Unuse();
- backtrack_label_.Unuse();
- exit_label_.Unuse();
- check_preempt_label_.Unuse();
- stack_overflow_label_.Unuse();
-}
-
-
-int RegExpMacroAssemblerIA32::stack_limit_slack() {
- return RegExpStack::kStackLimitSlack;
-}
-
-
-void RegExpMacroAssemblerIA32::AdvanceCurrentPosition(int by) {
- if (by != 0) {
- __ add(edi, Immediate(by * char_size()));
- }
-}
-
-
-void RegExpMacroAssemblerIA32::AdvanceRegister(int reg, int by) {
- ASSERT(reg >= 0);
- ASSERT(reg < num_registers_);
- if (by != 0) {
- __ add(register_location(reg), Immediate(by));
- }
-}
-
-
-void RegExpMacroAssemblerIA32::Backtrack() {
- CheckPreemption();
- // Pop Code* offset from backtrack stack, add Code* and jump to location.
- Pop(ebx);
- __ add(ebx, Immediate(masm_->CodeObject()));
- __ jmp(ebx);
-}
-
-
-void RegExpMacroAssemblerIA32::Bind(Label* label) {
- __ bind(label);
-}
-
-
-void RegExpMacroAssemblerIA32::CheckCharacter(uint32_t c, Label* on_equal) {
- __ cmp(current_character(), c);
- BranchOrBacktrack(equal, on_equal);
-}
-
-
-void RegExpMacroAssemblerIA32::CheckCharacterGT(uc16 limit, Label* on_greater) {
- __ cmp(current_character(), limit);
- BranchOrBacktrack(greater, on_greater);
-}
-
-
-void RegExpMacroAssemblerIA32::CheckAtStart(Label* on_at_start) {
- Label not_at_start;
- // Did we start the match at the start of the string at all?
- __ cmp(Operand(ebp, kStartIndex), Immediate(0));
- BranchOrBacktrack(not_equal, &not_at_start);
- // If we did, are we still at the start of the input?
- __ lea(eax, Operand(esi, edi, times_1, 0));
- __ cmp(eax, Operand(ebp, kInputStart));
- BranchOrBacktrack(equal, on_at_start);
- __ bind(&not_at_start);
-}
-
-
-void RegExpMacroAssemblerIA32::CheckNotAtStart(Label* on_not_at_start) {
- // Did we start the match at the start of the string at all?
- __ cmp(Operand(ebp, kStartIndex), Immediate(0));
- BranchOrBacktrack(not_equal, on_not_at_start);
- // If we did, are we still at the start of the input?
- __ lea(eax, Operand(esi, edi, times_1, 0));
- __ cmp(eax, Operand(ebp, kInputStart));
- BranchOrBacktrack(not_equal, on_not_at_start);
-}
-
-
-void RegExpMacroAssemblerIA32::CheckCharacterLT(uc16 limit, Label* on_less) {
- __ cmp(current_character(), limit);
- BranchOrBacktrack(less, on_less);
-}
-
-
-void RegExpMacroAssemblerIA32::CheckCharacters(Vector<const uc16> str,
- int cp_offset,
- Label* on_failure,
- bool check_end_of_string) {
-#ifdef DEBUG
- // If input is ASCII, don't even bother calling here if the string to
- // match contains a non-ASCII character.
- if (mode_ == ASCII) {
- ASSERT(String::IsOneByte(str.start(), str.length()));
- }
-#endif
- int byte_length = str.length() * char_size();
- int byte_offset = cp_offset * char_size();
- if (check_end_of_string) {
- // Check that there are at least str.length() characters left in the input.
- __ cmp(edi, Immediate(-(byte_offset + byte_length)));
- BranchOrBacktrack(greater, on_failure);
- }
-
- if (on_failure == NULL) {
- // Instead of inlining a backtrack, (re)use the global backtrack target.
- on_failure = &backtrack_label_;
- }
-
- // Do one character test first to minimize loading for the case that
- // we don't match at all (loading more than one character introduces that
- // chance of reading unaligned and reading across cache boundaries).
- // If the first character matches, expect a larger chance of matching the
- // string, and start loading more characters at a time.
- if (mode_ == ASCII) {
- __ cmpb(Operand(esi, edi, times_1, byte_offset),
- static_cast<int8_t>(str[0]));
- } else {
- // Don't use 16-bit immediate. The size changing prefix throws off
- // pre-decoding.
- __ movzx_w(eax,
- Operand(esi, edi, times_1, byte_offset));
- __ cmp(eax, static_cast<int32_t>(str[0]));
- }
- BranchOrBacktrack(not_equal, on_failure);
-
- __ lea(ebx, Operand(esi, edi, times_1, 0));
- for (int i = 1, n = str.length(); i < n;) {
- if (mode_ == ASCII) {
- if (i <= n - 4) {
- int combined_chars =
- (static_cast<uint32_t>(str[i + 0]) << 0) |
- (static_cast<uint32_t>(str[i + 1]) << 8) |
- (static_cast<uint32_t>(str[i + 2]) << 16) |
- (static_cast<uint32_t>(str[i + 3]) << 24);
- __ cmp(Operand(ebx, byte_offset + i), Immediate(combined_chars));
- i += 4;
- } else {
- __ cmpb(Operand(ebx, byte_offset + i),
- static_cast<int8_t>(str[i]));
- i += 1;
- }
- } else {
- ASSERT(mode_ == UC16);
- if (i <= n - 2) {
- __ cmp(Operand(ebx, byte_offset + i * sizeof(uc16)),
- Immediate(*reinterpret_cast<const int*>(&str[i])));
- i += 2;
- } else {
- // Avoid a 16-bit immediate operation. It uses the length-changing
- // 0x66 prefix which causes pre-decoder misprediction and pipeline
- // stalls. See
- // "Intel(R) 64 and IA-32 Architectures Optimization Reference Manual"
- // (248966.pdf) section 3.4.2.3 "Length-Changing Prefixes (LCP)"
- __ movzx_w(eax,
- Operand(ebx, byte_offset + i * sizeof(uc16)));
- __ cmp(eax, static_cast<int32_t>(str[i]));
- i += 1;
- }
- }
- BranchOrBacktrack(not_equal, on_failure);
- }
-}
-
-
-void RegExpMacroAssemblerIA32::CheckGreedyLoop(Label* on_equal) {
- Label fallthrough;
- __ cmp(edi, Operand(backtrack_stackpointer(), 0));
- __ j(not_equal, &fallthrough);
- __ add(backtrack_stackpointer(), Immediate(kPointerSize)); // Pop.
- BranchOrBacktrack(no_condition, on_equal);
- __ bind(&fallthrough);
-}
-
-
-void RegExpMacroAssemblerIA32::CheckNotBackReferenceIgnoreCase(
- int start_reg,
- Label* on_no_match) {
- Label fallthrough;
- __ mov(edx, register_location(start_reg)); // Index of start of capture
- __ mov(ebx, register_location(start_reg + 1)); // Index of end of capture
- __ sub(ebx, edx); // Length of capture.
-
- // The length of a capture should not be negative. This can only happen
- // if the end of the capture is unrecorded, or at a point earlier than
- // the start of the capture.
- BranchOrBacktrack(less, on_no_match);
-
- // If length is zero, either the capture is empty or it is completely
- // uncaptured. In either case succeed immediately.
- __ j(equal, &fallthrough);
-
- // Check that there are sufficient characters left in the input.
- __ mov(eax, edi);
- __ add(eax, ebx);
- BranchOrBacktrack(greater, on_no_match);
-
- if (mode_ == ASCII) {
- Label success;
- Label fail;
- Label loop_increment;
- // Save register contents to make the registers available below.
- __ push(edi);
- __ push(backtrack_stackpointer());
- // After this, the eax, ecx, and edi registers are available.
-
- __ add(edx, esi); // Start of capture
- __ add(edi, esi); // Start of text to match against capture.
- __ add(ebx, edi); // End of text to match against capture.
-
- Label loop;
- __ bind(&loop);
- __ movzx_b(eax, Operand(edi, 0));
- __ cmpb_al(Operand(edx, 0));
- __ j(equal, &loop_increment);
-
- // Mismatch, try case-insensitive match (converting letters to lower-case).
- __ or_(eax, 0x20); // Convert match character to lower-case.
- __ lea(ecx, Operand(eax, -'a'));
- __ cmp(ecx, static_cast<int32_t>('z' - 'a')); // Is eax a lowercase letter?
-#ifndef ENABLE_LATIN_1
- __ j(above, &fail); // Weren't letters anyway.
-#else
- Label convert_capture;
- __ j(below_equal, &convert_capture); // In range 'a'-'z'.
- // Latin-1: Check for values in range [224,254] but not 247.
- __ sub(ecx, Immediate(224 - 'a'));
- __ cmp(ecx, Immediate(254 - 224));
- __ j(above, &fail); // Weren't Latin-1 letters.
- __ cmp(ecx, Immediate(247 - 224)); // Check for 247.
- __ j(equal, &fail);
- __ bind(&convert_capture);
-#endif
- // Also convert capture character.
- __ movzx_b(ecx, Operand(edx, 0));
- __ or_(ecx, 0x20);
-
- __ cmp(eax, ecx);
- __ j(not_equal, &fail);
-
- __ bind(&loop_increment);
- // Increment pointers into match and capture strings.
- __ add(edx, Immediate(1));
- __ add(edi, Immediate(1));
- // Compare to end of match, and loop if not done.
- __ cmp(edi, ebx);
- __ j(below, &loop);
- __ jmp(&success);
-
- __ bind(&fail);
- // Restore original values before failing.
- __ pop(backtrack_stackpointer());
- __ pop(edi);
- BranchOrBacktrack(no_condition, on_no_match);
-
- __ bind(&success);
- // Restore original value before continuing.
- __ pop(backtrack_stackpointer());
- // Drop original value of character position.
- __ add(esp, Immediate(kPointerSize));
- // Compute new value of character position after the matched part.
- __ sub(edi, esi);
- } else {
- ASSERT(mode_ == UC16);
- // Save registers before calling C function.
- __ push(esi);
- __ push(edi);
- __ push(backtrack_stackpointer());
- __ push(ebx);
-
- static const int argument_count = 4;
- __ PrepareCallCFunction(argument_count, ecx);
- // Put arguments into allocated stack area, last argument highest on stack.
- // Parameters are
- // Address byte_offset1 - Address captured substring's start.
- // Address byte_offset2 - Address of current character position.
- // size_t byte_length - length of capture in bytes(!)
- // Isolate* isolate
-
- // Set isolate.
- __ mov(Operand(esp, 3 * kPointerSize),
- Immediate(ExternalReference::isolate_address()));
- // Set byte_length.
- __ mov(Operand(esp, 2 * kPointerSize), ebx);
- // Set byte_offset2.
- // Found by adding negative string-end offset of current position (edi)
- // to end of string.
- __ add(edi, esi);
- __ mov(Operand(esp, 1 * kPointerSize), edi);
- // Set byte_offset1.
- // Start of capture, where edx already holds string-end negative offset.
- __ add(edx, esi);
- __ mov(Operand(esp, 0 * kPointerSize), edx);
-
- {
- AllowExternalCallThatCantCauseGC scope(masm_);
- ExternalReference compare =
- ExternalReference::re_case_insensitive_compare_uc16(masm_->isolate());
- __ CallCFunction(compare, argument_count);
- }
- // Pop original values before reacting on result value.
- __ pop(ebx);
- __ pop(backtrack_stackpointer());
- __ pop(edi);
- __ pop(esi);
-
- // Check if function returned non-zero for success or zero for failure.
- __ or_(eax, eax);
- BranchOrBacktrack(zero, on_no_match);
- // On success, increment position by length of capture.
- __ add(edi, ebx);
- }
- __ bind(&fallthrough);
-}
-
-
-void RegExpMacroAssemblerIA32::CheckNotBackReference(
- int start_reg,
- Label* on_no_match) {
- Label fallthrough;
- Label success;
- Label fail;
-
- // Find length of back-referenced capture.
- __ mov(edx, register_location(start_reg));
- __ mov(eax, register_location(start_reg + 1));
- __ sub(eax, edx); // Length to check.
- // Fail on partial or illegal capture (start of capture after end of capture).
- BranchOrBacktrack(less, on_no_match);
- // Succeed on empty capture (including no capture)
- __ j(equal, &fallthrough);
-
- // Check that there are sufficient characters left in the input.
- __ mov(ebx, edi);
- __ add(ebx, eax);
- BranchOrBacktrack(greater, on_no_match);
-
- // Save register to make it available below.
- __ push(backtrack_stackpointer());
-
- // Compute pointers to match string and capture string
- __ lea(ebx, Operand(esi, edi, times_1, 0)); // Start of match.
- __ add(edx, esi); // Start of capture.
- __ lea(ecx, Operand(eax, ebx, times_1, 0)); // End of match
-
- Label loop;
- __ bind(&loop);
- if (mode_ == ASCII) {
- __ movzx_b(eax, Operand(edx, 0));
- __ cmpb_al(Operand(ebx, 0));
- } else {
- ASSERT(mode_ == UC16);
- __ movzx_w(eax, Operand(edx, 0));
- __ cmpw_ax(Operand(ebx, 0));
- }
- __ j(not_equal, &fail);
- // Increment pointers into capture and match string.
- __ add(edx, Immediate(char_size()));
- __ add(ebx, Immediate(char_size()));
- // Check if we have reached end of match area.
- __ cmp(ebx, ecx);
- __ j(below, &loop);
- __ jmp(&success);
-
- __ bind(&fail);
- // Restore backtrack stackpointer.
- __ pop(backtrack_stackpointer());
- BranchOrBacktrack(no_condition, on_no_match);
-
- __ bind(&success);
- // Move current character position to position after match.
- __ mov(edi, ecx);
- __ sub(edi, esi);
- // Restore backtrack stackpointer.
- __ pop(backtrack_stackpointer());
-
- __ bind(&fallthrough);
-}
-
-
-void RegExpMacroAssemblerIA32::CheckNotCharacter(uint32_t c,
- Label* on_not_equal) {
- __ cmp(current_character(), c);
- BranchOrBacktrack(not_equal, on_not_equal);
-}
-
-
-void RegExpMacroAssemblerIA32::CheckCharacterAfterAnd(uint32_t c,
- uint32_t mask,
- Label* on_equal) {
- if (c == 0) {
- __ test(current_character(), Immediate(mask));
- } else {
- __ mov(eax, mask);
- __ and_(eax, current_character());
- __ cmp(eax, c);
- }
- BranchOrBacktrack(equal, on_equal);
-}
-
-
-void RegExpMacroAssemblerIA32::CheckNotCharacterAfterAnd(uint32_t c,
- uint32_t mask,
- Label* on_not_equal) {
- if (c == 0) {
- __ test(current_character(), Immediate(mask));
- } else {
- __ mov(eax, mask);
- __ and_(eax, current_character());
- __ cmp(eax, c);
- }
- BranchOrBacktrack(not_equal, on_not_equal);
-}
-
-
-void RegExpMacroAssemblerIA32::CheckNotCharacterAfterMinusAnd(
- uc16 c,
- uc16 minus,
- uc16 mask,
- Label* on_not_equal) {
- ASSERT(minus < String::kMaxUtf16CodeUnit);
- __ lea(eax, Operand(current_character(), -minus));
- if (c == 0) {
- __ test(eax, Immediate(mask));
- } else {
- __ and_(eax, mask);
- __ cmp(eax, c);
- }
- BranchOrBacktrack(not_equal, on_not_equal);
-}
-
-
-void RegExpMacroAssemblerIA32::CheckCharacterInRange(
- uc16 from,
- uc16 to,
- Label* on_in_range) {
- __ lea(eax, Operand(current_character(), -from));
- __ cmp(eax, to - from);
- BranchOrBacktrack(below_equal, on_in_range);
-}
-
-
-void RegExpMacroAssemblerIA32::CheckCharacterNotInRange(
- uc16 from,
- uc16 to,
- Label* on_not_in_range) {
- __ lea(eax, Operand(current_character(), -from));
- __ cmp(eax, to - from);
- BranchOrBacktrack(above, on_not_in_range);
-}
-
-
-void RegExpMacroAssemblerIA32::CheckBitInTable(
- Handle<ByteArray> table,
- Label* on_bit_set) {
- __ mov(eax, Immediate(table));
- Register index = current_character();
- if (mode_ != ASCII || kTableMask != String::kMaxOneByteCharCode) {
- __ mov(ebx, kTableSize - 1);
- __ and_(ebx, current_character());
- index = ebx;
- }
- __ cmpb(FieldOperand(eax, index, times_1, ByteArray::kHeaderSize), 0);
- BranchOrBacktrack(not_equal, on_bit_set);
-}
-
-
-bool RegExpMacroAssemblerIA32::CheckSpecialCharacterClass(uc16 type,
- Label* on_no_match) {
- // Range checks (c in min..max) are generally implemented by an unsigned
- // (c - min) <= (max - min) check
- switch (type) {
- case 's':
- // Match space-characters
- if (mode_ == ASCII) {
- // ASCII space characters are '\t'..'\r' and ' '.
- Label success;
- __ cmp(current_character(), ' ');
- __ j(equal, &success);
- // Check range 0x09..0x0d
- __ lea(eax, Operand(current_character(), -'\t'));
- __ cmp(eax, '\r' - '\t');
- BranchOrBacktrack(above, on_no_match);
- __ bind(&success);
- return true;
- }
- return false;
- case 'S':
- // Match non-space characters.
- if (mode_ == ASCII) {
- // ASCII space characters are '\t'..'\r' and ' '.
- __ cmp(current_character(), ' ');
- BranchOrBacktrack(equal, on_no_match);
- __ lea(eax, Operand(current_character(), -'\t'));
- __ cmp(eax, '\r' - '\t');
- BranchOrBacktrack(below_equal, on_no_match);
- return true;
- }
- return false;
- case 'd':
- // Match ASCII digits ('0'..'9')
- __ lea(eax, Operand(current_character(), -'0'));
- __ cmp(eax, '9' - '0');
- BranchOrBacktrack(above, on_no_match);
- return true;
- case 'D':
- // Match non ASCII-digits
- __ lea(eax, Operand(current_character(), -'0'));
- __ cmp(eax, '9' - '0');
- BranchOrBacktrack(below_equal, on_no_match);
- return true;
- case '.': {
- // Match non-newlines (not 0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029)
- __ mov(eax, current_character());
- __ xor_(eax, Immediate(0x01));
- // See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c
- __ sub(eax, Immediate(0x0b));
- __ cmp(eax, 0x0c - 0x0b);
- BranchOrBacktrack(below_equal, on_no_match);
- if (mode_ == UC16) {
- // Compare original value to 0x2028 and 0x2029, using the already
- // computed (current_char ^ 0x01 - 0x0b). I.e., check for
- // 0x201d (0x2028 - 0x0b) or 0x201e.
- __ sub(eax, Immediate(0x2028 - 0x0b));
- __ cmp(eax, 0x2029 - 0x2028);
- BranchOrBacktrack(below_equal, on_no_match);
- }
- return true;
- }
- case 'w': {
- if (mode_ != ASCII) {
- // Table is 128 entries, so all ASCII characters can be tested.
- __ cmp(current_character(), Immediate('z'));
- BranchOrBacktrack(above, on_no_match);
- }
- ASSERT_EQ(0, word_character_map[0]); // Character '\0' is not a word char.
- ExternalReference word_map = ExternalReference::re_word_character_map();
- __ test_b(current_character(),
- Operand::StaticArray(current_character(), times_1, word_map));
- BranchOrBacktrack(zero, on_no_match);
- return true;
- }
- case 'W': {
- Label done;
- if (mode_ != ASCII) {
- // Table is 128 entries, so all ASCII characters can be tested.
- __ cmp(current_character(), Immediate('z'));
- __ j(above, &done);
- }
- ASSERT_EQ(0, word_character_map[0]); // Character '\0' is not a word char.
- ExternalReference word_map = ExternalReference::re_word_character_map();
- __ test_b(current_character(),
- Operand::StaticArray(current_character(), times_1, word_map));
- BranchOrBacktrack(not_zero, on_no_match);
- if (mode_ != ASCII) {
- __ bind(&done);
- }
- return true;
- }
- // Non-standard classes (with no syntactic shorthand) used internally.
- case '*':
- // Match any character.
- return true;
- case 'n': {
- // Match newlines (0x0a('\n'), 0x0d('\r'), 0x2028 or 0x2029).
- // The opposite of '.'.
- __ mov(eax, current_character());
- __ xor_(eax, Immediate(0x01));
- // See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c
- __ sub(eax, Immediate(0x0b));
- __ cmp(eax, 0x0c - 0x0b);
- if (mode_ == ASCII) {
- BranchOrBacktrack(above, on_no_match);
- } else {
- Label done;
- BranchOrBacktrack(below_equal, &done);
- ASSERT_EQ(UC16, mode_);
- // Compare original value to 0x2028 and 0x2029, using the already
- // computed (current_char ^ 0x01 - 0x0b). I.e., check for
- // 0x201d (0x2028 - 0x0b) or 0x201e.
- __ sub(eax, Immediate(0x2028 - 0x0b));
- __ cmp(eax, 1);
- BranchOrBacktrack(above, on_no_match);
- __ bind(&done);
- }
- return true;
- }
- // No custom implementation (yet): s(UC16), S(UC16).
- default:
- return false;
- }
-}
-
-
-void RegExpMacroAssemblerIA32::Fail() {
- STATIC_ASSERT(FAILURE == 0); // Return value for failure is zero.
- if (!global()) {
- __ Set(eax, Immediate(FAILURE));
- }
- __ jmp(&exit_label_);
-}
-
-
-Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
- Label return_eax;
- // Finalize code - write the entry point code now we know how many
- // registers we need.
-
- // Entry code:
- __ bind(&entry_label_);
-
- // Tell the system that we have a stack frame. Because the type is MANUAL, no
- // code is generated.
- FrameScope scope(masm_, StackFrame::MANUAL);
-
- // Actually emit code to start a new stack frame.
- __ push(ebp);
- __ mov(ebp, esp);
- // Save callee-save registers. Order here should correspond to order of
- // kBackup_ebx etc.
- __ push(esi);
- __ push(edi);
- __ push(ebx); // Callee-save on MacOS.
- __ push(Immediate(0)); // Number of successful matches in a global regexp.
- __ push(Immediate(0)); // Make room for "input start - 1" constant.
-
- // Check if we have space on the stack for registers.
- Label stack_limit_hit;
- Label stack_ok;
-
- ExternalReference stack_limit =
- ExternalReference::address_of_stack_limit(masm_->isolate());
- __ mov(ecx, esp);
- __ sub(ecx, Operand::StaticVariable(stack_limit));
- // Handle it if the stack pointer is already below the stack limit.
- __ j(below_equal, &stack_limit_hit);
- // Check if there is room for the variable number of registers above
- // the stack limit.
- __ cmp(ecx, num_registers_ * kPointerSize);
- __ j(above_equal, &stack_ok);
- // Exit with OutOfMemory exception. There is not enough space on the stack
- // for our working registers.
- __ mov(eax, EXCEPTION);
- __ jmp(&return_eax);
-
- __ bind(&stack_limit_hit);
- CallCheckStackGuardState(ebx);
- __ or_(eax, eax);
- // If returned value is non-zero, we exit with the returned value as result.
- __ j(not_zero, &return_eax);
-
- __ bind(&stack_ok);
- // Load start index for later use.
- __ mov(ebx, Operand(ebp, kStartIndex));
-
- // Allocate space on stack for registers.
- __ sub(esp, Immediate(num_registers_ * kPointerSize));
- // Load string length.
- __ mov(esi, Operand(ebp, kInputEnd));
- // Load input position.
- __ mov(edi, Operand(ebp, kInputStart));
- // Set up edi to be negative offset from string end.
- __ sub(edi, esi);
-
- // Set eax to address of char before start of the string.
- // (effectively string position -1).
- __ neg(ebx);
- if (mode_ == UC16) {
- __ lea(eax, Operand(edi, ebx, times_2, -char_size()));
- } else {
- __ lea(eax, Operand(edi, ebx, times_1, -char_size()));
- }
- // Store this value in a local variable, for use when clearing
- // position registers.
- __ mov(Operand(ebp, kInputStartMinusOne), eax);
-
-#ifdef WIN32
- // Ensure that we write to each stack page, in order. Skipping a page
- // on Windows can cause segmentation faults. Assuming page size is 4k.
- const int kPageSize = 4096;
- const int kRegistersPerPage = kPageSize / kPointerSize;
- for (int i = num_saved_registers_ + kRegistersPerPage - 1;
- i < num_registers_;
- i += kRegistersPerPage) {
- __ mov(register_location(i), eax); // One write every page.
- }
-#endif // WIN32
-
- Label load_char_start_regexp, start_regexp;
- // Load newline if index is at start, previous character otherwise.
- __ cmp(Operand(ebp, kStartIndex), Immediate(0));
- __ j(not_equal, &load_char_start_regexp, Label::kNear);
- __ mov(current_character(), '\n');
- __ jmp(&start_regexp, Label::kNear);
-
- // Global regexp restarts matching here.
- __ bind(&load_char_start_regexp);
- // Load previous char as initial value of current character register.
- LoadCurrentCharacterUnchecked(-1, 1);
- __ bind(&start_regexp);
-
- // Initialize on-stack registers.
- if (num_saved_registers_ > 0) { // Always is, if generated from a regexp.
- // Fill saved registers with initial value = start offset - 1
- // Fill in stack push order, to avoid accessing across an unwritten
- // page (a problem on Windows).
- if (num_saved_registers_ > 8) {
- __ mov(ecx, kRegisterZero);
- Label init_loop;
- __ bind(&init_loop);
- __ mov(Operand(ebp, ecx, times_1, 0), eax);
- __ sub(ecx, Immediate(kPointerSize));
- __ cmp(ecx, kRegisterZero - num_saved_registers_ * kPointerSize);
- __ j(greater, &init_loop);
- } else { // Unroll the loop.
- for (int i = 0; i < num_saved_registers_; i++) {
- __ mov(register_location(i), eax);
- }
- }
- }
-
- // Initialize backtrack stack pointer.
- __ mov(backtrack_stackpointer(), Operand(ebp, kStackHighEnd));
-
- __ jmp(&start_label_);
-
- // Exit code:
- if (success_label_.is_linked()) {
- // Save captures when successful.
- __ bind(&success_label_);
- if (num_saved_registers_ > 0) {
- // copy captures to output
- __ mov(ebx, Operand(ebp, kRegisterOutput));
- __ mov(ecx, Operand(ebp, kInputEnd));
- __ mov(edx, Operand(ebp, kStartIndex));
- __ sub(ecx, Operand(ebp, kInputStart));
- if (mode_ == UC16) {
- __ lea(ecx, Operand(ecx, edx, times_2, 0));
- } else {
- __ add(ecx, edx);
- }
- for (int i = 0; i < num_saved_registers_; i++) {
- __ mov(eax, register_location(i));
- if (i == 0 && global_with_zero_length_check()) {
- // Keep capture start in edx for the zero-length check later.
- __ mov(edx, eax);
- }
- // Convert to index from start of string, not end.
- __ add(eax, ecx);
- if (mode_ == UC16) {
- __ sar(eax, 1); // Convert byte index to character index.
- }
- __ mov(Operand(ebx, i * kPointerSize), eax);
- }
- }
-
- if (global()) {
- // Restart matching if the regular expression is flagged as global.
- // Increment success counter.
- __ inc(Operand(ebp, kSuccessfulCaptures));
- // Capture results have been stored, so the number of remaining global
- // output registers is reduced by the number of stored captures.
- __ mov(ecx, Operand(ebp, kNumOutputRegisters));
- __ sub(ecx, Immediate(num_saved_registers_));
- // Check whether we have enough room for another set of capture results.
- __ cmp(ecx, Immediate(num_saved_registers_));
- __ j(less, &exit_label_);
-
- __ mov(Operand(ebp, kNumOutputRegisters), ecx);
- // Advance the location for output.
- __ add(Operand(ebp, kRegisterOutput),
- Immediate(num_saved_registers_ * kPointerSize));
-
- // Prepare eax to initialize registers with its value in the next run.
- __ mov(eax, Operand(ebp, kInputStartMinusOne));
-
- if (global_with_zero_length_check()) {
- // Special case for zero-length matches.
- // edx: capture start index
- __ cmp(edi, edx);
- // Not a zero-length match, restart.
- __ j(not_equal, &load_char_start_regexp);
- // edi (offset from the end) is zero if we already reached the end.
- __ test(edi, edi);
- __ j(zero, &exit_label_, Label::kNear);
- // Advance current position after a zero-length match.
- if (mode_ == UC16) {
- __ add(edi, Immediate(2));
- } else {
- __ inc(edi);
- }
- }
-
- __ jmp(&load_char_start_regexp);
- } else {
- __ mov(eax, Immediate(SUCCESS));
- }
- }
-
- __ bind(&exit_label_);
- if (global()) {
- // Return the number of successful captures.
- __ mov(eax, Operand(ebp, kSuccessfulCaptures));
- }
-
- __ bind(&return_eax);
- // Skip esp past regexp registers.
- __ lea(esp, Operand(ebp, kBackup_ebx));
- // Restore callee-save registers.
- __ pop(ebx);
- __ pop(edi);
- __ pop(esi);
- // Exit function frame, restore previous one.
- __ pop(ebp);
- __ ret(0);
-
- // Backtrack code (branch target for conditional backtracks).
- if (backtrack_label_.is_linked()) {
- __ bind(&backtrack_label_);
- Backtrack();
- }
-
- Label exit_with_exception;
-
- // Preempt-code
- if (check_preempt_label_.is_linked()) {
- SafeCallTarget(&check_preempt_label_);
-
- __ push(backtrack_stackpointer());
- __ push(edi);
-
- CallCheckStackGuardState(ebx);
- __ or_(eax, eax);
- // If returning non-zero, we should end execution with the given
- // result as return value.
- __ j(not_zero, &return_eax);
-
- __ pop(edi);
- __ pop(backtrack_stackpointer());
- // String might have moved: Reload esi from frame.
- __ mov(esi, Operand(ebp, kInputEnd));
- SafeReturn();
- }
-
- // Backtrack stack overflow code.
- if (stack_overflow_label_.is_linked()) {
- SafeCallTarget(&stack_overflow_label_);
- // Reached if the backtrack-stack limit has been hit.
-
- Label grow_failed;
- // Save registers before calling C function
- __ push(esi);
- __ push(edi);
-
- // Call GrowStack(backtrack_stackpointer())
- static const int num_arguments = 3;
- __ PrepareCallCFunction(num_arguments, ebx);
- __ mov(Operand(esp, 2 * kPointerSize),
- Immediate(ExternalReference::isolate_address()));
- __ lea(eax, Operand(ebp, kStackHighEnd));
- __ mov(Operand(esp, 1 * kPointerSize), eax);
- __ mov(Operand(esp, 0 * kPointerSize), backtrack_stackpointer());
- ExternalReference grow_stack =
- ExternalReference::re_grow_stack(masm_->isolate());
- __ CallCFunction(grow_stack, num_arguments);
- // If return NULL, we have failed to grow the stack, and
- // must exit with a stack-overflow exception.
- __ or_(eax, eax);
- __ j(equal, &exit_with_exception);
- // Otherwise use return value as new stack pointer.
- __ mov(backtrack_stackpointer(), eax);
- // Restore saved registers and continue.
- __ pop(edi);
- __ pop(esi);
- SafeReturn();
- }
-
- if (exit_with_exception.is_linked()) {
- // If any of the code above needed to exit with an exception.
- __ bind(&exit_with_exception);
- // Exit with Result EXCEPTION(-1) to signal thrown exception.
- __ mov(eax, EXCEPTION);
- __ jmp(&return_eax);
- }
-
- CodeDesc code_desc;
- masm_->GetCode(&code_desc);
- Handle<Code> code =
- masm_->isolate()->factory()->NewCode(code_desc,
- Code::ComputeFlags(Code::REGEXP),
- masm_->CodeObject());
- PROFILE(masm_->isolate(), RegExpCodeCreateEvent(*code, *source));
- return Handle<HeapObject>::cast(code);
-}
-
-
-void RegExpMacroAssemblerIA32::GoTo(Label* to) {
- BranchOrBacktrack(no_condition, to);
-}
-
-
-void RegExpMacroAssemblerIA32::IfRegisterGE(int reg,
- int comparand,
- Label* if_ge) {
- __ cmp(register_location(reg), Immediate(comparand));
- BranchOrBacktrack(greater_equal, if_ge);
-}
-
-
-void RegExpMacroAssemblerIA32::IfRegisterLT(int reg,
- int comparand,
- Label* if_lt) {
- __ cmp(register_location(reg), Immediate(comparand));
- BranchOrBacktrack(less, if_lt);
-}
-
-
-void RegExpMacroAssemblerIA32::IfRegisterEqPos(int reg,
- Label* if_eq) {
- __ cmp(edi, register_location(reg));
- BranchOrBacktrack(equal, if_eq);
-}
-
-
-RegExpMacroAssembler::IrregexpImplementation
- RegExpMacroAssemblerIA32::Implementation() {
- return kIA32Implementation;
-}
-
-
-void RegExpMacroAssemblerIA32::LoadCurrentCharacter(int cp_offset,
- Label* on_end_of_input,
- bool check_bounds,
- int characters) {
- ASSERT(cp_offset >= -1); // ^ and \b can look behind one character.
- ASSERT(cp_offset < (1<<30)); // Be sane! (And ensure negation works)
- if (check_bounds) {
- CheckPosition(cp_offset + characters - 1, on_end_of_input);
- }
- LoadCurrentCharacterUnchecked(cp_offset, characters);
-}
-
-
-void RegExpMacroAssemblerIA32::PopCurrentPosition() {
- Pop(edi);
-}
-
-
-void RegExpMacroAssemblerIA32::PopRegister(int register_index) {
- Pop(eax);
- __ mov(register_location(register_index), eax);
-}
-
-
-void RegExpMacroAssemblerIA32::PushBacktrack(Label* label) {
- Push(Immediate::CodeRelativeOffset(label));
- CheckStackLimit();
-}
-
-
-void RegExpMacroAssemblerIA32::PushCurrentPosition() {
- Push(edi);
-}
-
-
-void RegExpMacroAssemblerIA32::PushRegister(int register_index,
- StackCheckFlag check_stack_limit) {
- __ mov(eax, register_location(register_index));
- Push(eax);
- if (check_stack_limit) CheckStackLimit();
-}
-
-
-void RegExpMacroAssemblerIA32::ReadCurrentPositionFromRegister(int reg) {
- __ mov(edi, register_location(reg));
-}
-
-
-void RegExpMacroAssemblerIA32::ReadStackPointerFromRegister(int reg) {
- __ mov(backtrack_stackpointer(), register_location(reg));
- __ add(backtrack_stackpointer(), Operand(ebp, kStackHighEnd));
-}
-
-void RegExpMacroAssemblerIA32::SetCurrentPositionFromEnd(int by) {
- Label after_position;
- __ cmp(edi, -by * char_size());
- __ j(greater_equal, &after_position, Label::kNear);
- __ mov(edi, -by * char_size());
- // On RegExp code entry (where this operation is used), the character before
- // the current position is expected to be already loaded.
- // We have advanced the position, so it's safe to read backwards.
- LoadCurrentCharacterUnchecked(-1, 1);
- __ bind(&after_position);
-}
-
-void RegExpMacroAssemblerIA32::SetRegister(int register_index, int to) {
- ASSERT(register_index >= num_saved_registers_); // Reserved for positions!
- __ mov(register_location(register_index), Immediate(to));
-}
-
-
-bool RegExpMacroAssemblerIA32::Succeed() {
- __ jmp(&success_label_);
- return global();
-}
-
-
-void RegExpMacroAssemblerIA32::WriteCurrentPositionToRegister(int reg,
- int cp_offset) {
- if (cp_offset == 0) {
- __ mov(register_location(reg), edi);
- } else {
- __ lea(eax, Operand(edi, cp_offset * char_size()));
- __ mov(register_location(reg), eax);
- }
-}
-
-
-void RegExpMacroAssemblerIA32::ClearRegisters(int reg_from, int reg_to) {
- ASSERT(reg_from <= reg_to);
- __ mov(eax, Operand(ebp, kInputStartMinusOne));
- for (int reg = reg_from; reg <= reg_to; reg++) {
- __ mov(register_location(reg), eax);
- }
-}
-
-
-void RegExpMacroAssemblerIA32::WriteStackPointerToRegister(int reg) {
- __ mov(eax, backtrack_stackpointer());
- __ sub(eax, Operand(ebp, kStackHighEnd));
- __ mov(register_location(reg), eax);
-}
-
-
-// Private methods:
-
-void RegExpMacroAssemblerIA32::CallCheckStackGuardState(Register scratch) {
- static const int num_arguments = 3;
- __ PrepareCallCFunction(num_arguments, scratch);
- // RegExp code frame pointer.
- __ mov(Operand(esp, 2 * kPointerSize), ebp);
- // Code* of self.
- __ mov(Operand(esp, 1 * kPointerSize), Immediate(masm_->CodeObject()));
- // Next address on the stack (will be address of return address).
- __ lea(eax, Operand(esp, -kPointerSize));
- __ mov(Operand(esp, 0 * kPointerSize), eax);
- ExternalReference check_stack_guard =
- ExternalReference::re_check_stack_guard_state(masm_->isolate());
- __ CallCFunction(check_stack_guard, num_arguments);
-}
-
-
-// Helper function for reading a value out of a stack frame.
-template <typename T>
-static T& frame_entry(Address re_frame, int frame_offset) {
- return reinterpret_cast<T&>(Memory::int32_at(re_frame + frame_offset));
-}
-
-
-int RegExpMacroAssemblerIA32::CheckStackGuardState(Address* return_address,
- Code* re_code,
- Address re_frame) {
- Isolate* isolate = frame_entry<Isolate*>(re_frame, kIsolate);
- ASSERT(isolate == Isolate::Current());
- if (isolate->stack_guard()->IsStackOverflow()) {
- isolate->StackOverflow();
- return EXCEPTION;
- }
-
- // If not real stack overflow the stack guard was used to interrupt
- // execution for another purpose.
-
- // If this is a direct call from JavaScript retry the RegExp forcing the call
- // through the runtime system. Currently the direct call cannot handle a GC.
- if (frame_entry<int>(re_frame, kDirectCall) == 1) {
- return RETRY;
- }
-
- // Prepare for possible GC.
- HandleScope handles(isolate);
- Handle<Code> code_handle(re_code);
-
- Handle<String> subject(frame_entry<String*>(re_frame, kInputString));
-
- // Current string.
- bool is_ascii = subject->IsOneByteRepresentationUnderneath();
-
- ASSERT(re_code->instruction_start() <= *return_address);
- ASSERT(*return_address <=
- re_code->instruction_start() + re_code->instruction_size());
-
- MaybeObject* result = Execution::HandleStackGuardInterrupt(isolate);
-
- if (*code_handle != re_code) { // Return address no longer valid
- int delta = code_handle->address() - re_code->address();
- // Overwrite the return address on the stack.
- *return_address += delta;
- }
-
- if (result->IsException()) {
- return EXCEPTION;
- }
-
- Handle<String> subject_tmp = subject;
- int slice_offset = 0;
-
- // Extract the underlying string and the slice offset.
- if (StringShape(*subject_tmp).IsCons()) {
- subject_tmp = Handle<String>(ConsString::cast(*subject_tmp)->first());
- } else if (StringShape(*subject_tmp).IsSliced()) {
- SlicedString* slice = SlicedString::cast(*subject_tmp);
- subject_tmp = Handle<String>(slice->parent());
- slice_offset = slice->offset();
- }
-
- // String might have changed.
- if (subject_tmp->IsOneByteRepresentation() != is_ascii) {
- // If we changed between an ASCII and an UC16 string, the specialized
- // code cannot be used, and we need to restart regexp matching from
- // scratch (including, potentially, compiling a new version of the code).
- return RETRY;
- }
-
- // Otherwise, the content of the string might have moved. It must still
- // be a sequential or external string with the same content.
- // Update the start and end pointers in the stack frame to the current
- // location (whether it has actually moved or not).
- ASSERT(StringShape(*subject_tmp).IsSequential() ||
- StringShape(*subject_tmp).IsExternal());
-
- // The original start address of the characters to match.
- const byte* start_address = frame_entry<const byte*>(re_frame, kInputStart);
-
- // Find the current start address of the same character at the current string
- // position.
- int start_index = frame_entry<int>(re_frame, kStartIndex);
- const byte* new_address = StringCharacterPosition(*subject_tmp,
- start_index + slice_offset);
-
- if (start_address != new_address) {
- // If there is a difference, update the object pointer and start and end
- // addresses in the RegExp stack frame to match the new value.
- const byte* end_address = frame_entry<const byte* >(re_frame, kInputEnd);
- int byte_length = static_cast<int>(end_address - start_address);
- frame_entry<const String*>(re_frame, kInputString) = *subject;
- frame_entry<const byte*>(re_frame, kInputStart) = new_address;
- frame_entry<const byte*>(re_frame, kInputEnd) = new_address + byte_length;
- } else if (frame_entry<const String*>(re_frame, kInputString) != *subject) {
- // Subject string might have been a ConsString that underwent
- // short-circuiting during GC. That will not change start_address but
- // will change pointer inside the subject handle.
- frame_entry<const String*>(re_frame, kInputString) = *subject;
- }
-
- return 0;
-}
-
-
-Operand RegExpMacroAssemblerIA32::register_location(int register_index) {
- ASSERT(register_index < (1<<30));
- if (num_registers_ <= register_index) {
- num_registers_ = register_index + 1;
- }
- return Operand(ebp, kRegisterZero - register_index * kPointerSize);
-}
-
-
-void RegExpMacroAssemblerIA32::CheckPosition(int cp_offset,
- Label* on_outside_input) {
- __ cmp(edi, -cp_offset * char_size());
- BranchOrBacktrack(greater_equal, on_outside_input);
-}
-
-
-void RegExpMacroAssemblerIA32::BranchOrBacktrack(Condition condition,
- Label* to) {
- if (condition < 0) { // No condition
- if (to == NULL) {
- Backtrack();
- return;
- }
- __ jmp(to);
- return;
- }
- if (to == NULL) {
- __ j(condition, &backtrack_label_);
- return;
- }
- __ j(condition, to);
-}
-
-
-void RegExpMacroAssemblerIA32::SafeCall(Label* to) {
- Label return_to;
- __ push(Immediate::CodeRelativeOffset(&return_to));
- __ jmp(to);
- __ bind(&return_to);
-}
-
-
-void RegExpMacroAssemblerIA32::SafeReturn() {
- __ pop(ebx);
- __ add(ebx, Immediate(masm_->CodeObject()));
- __ jmp(ebx);
-}
-
-
-void RegExpMacroAssemblerIA32::SafeCallTarget(Label* name) {
- __ bind(name);
-}
-
-
-void RegExpMacroAssemblerIA32::Push(Register source) {
- ASSERT(!source.is(backtrack_stackpointer()));
- // Notice: This updates flags, unlike normal Push.
- __ sub(backtrack_stackpointer(), Immediate(kPointerSize));
- __ mov(Operand(backtrack_stackpointer(), 0), source);
-}
-
-
-void RegExpMacroAssemblerIA32::Push(Immediate value) {
- // Notice: This updates flags, unlike normal Push.
- __ sub(backtrack_stackpointer(), Immediate(kPointerSize));
- __ mov(Operand(backtrack_stackpointer(), 0), value);
-}
-
-
-void RegExpMacroAssemblerIA32::Pop(Register target) {
- ASSERT(!target.is(backtrack_stackpointer()));
- __ mov(target, Operand(backtrack_stackpointer(), 0));
- // Notice: This updates flags, unlike normal Pop.
- __ add(backtrack_stackpointer(), Immediate(kPointerSize));
-}
-
-
-void RegExpMacroAssemblerIA32::CheckPreemption() {
- // Check for preemption.
- Label no_preempt;
- ExternalReference stack_limit =
- ExternalReference::address_of_stack_limit(masm_->isolate());
- __ cmp(esp, Operand::StaticVariable(stack_limit));
- __ j(above, &no_preempt);
-
- SafeCall(&check_preempt_label_);
-
- __ bind(&no_preempt);
-}
-
-
-void RegExpMacroAssemblerIA32::CheckStackLimit() {
- Label no_stack_overflow;
- ExternalReference stack_limit =
- ExternalReference::address_of_regexp_stack_limit(masm_->isolate());
- __ cmp(backtrack_stackpointer(), Operand::StaticVariable(stack_limit));
- __ j(above, &no_stack_overflow);
-
- SafeCall(&stack_overflow_label_);
-
- __ bind(&no_stack_overflow);
-}
-
-
-void RegExpMacroAssemblerIA32::LoadCurrentCharacterUnchecked(int cp_offset,
- int characters) {
- if (mode_ == ASCII) {
- if (characters == 4) {
- __ mov(current_character(), Operand(esi, edi, times_1, cp_offset));
- } else if (characters == 2) {
- __ movzx_w(current_character(), Operand(esi, edi, times_1, cp_offset));
- } else {
- ASSERT(characters == 1);
- __ movzx_b(current_character(), Operand(esi, edi, times_1, cp_offset));
- }
- } else {
- ASSERT(mode_ == UC16);
- if (characters == 2) {
- __ mov(current_character(),
- Operand(esi, edi, times_1, cp_offset * sizeof(uc16)));
- } else {
- ASSERT(characters == 1);
- __ movzx_w(current_character(),
- Operand(esi, edi, times_1, cp_offset * sizeof(uc16)));
- }
- }
-}
-
-
-#undef __
-
-#endif // V8_INTERPRETED_REGEXP
-
-}} // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_IA32
diff --git a/src/3rdparty/v8/src/ia32/regexp-macro-assembler-ia32.h b/src/3rdparty/v8/src/ia32/regexp-macro-assembler-ia32.h
deleted file mode 100644
index 7aea385..0000000
--- a/src/3rdparty/v8/src/ia32/regexp-macro-assembler-ia32.h
+++ /dev/null
@@ -1,224 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_IA32_REGEXP_MACRO_ASSEMBLER_IA32_H_
-#define V8_IA32_REGEXP_MACRO_ASSEMBLER_IA32_H_
-
-#include "ia32/assembler-ia32.h"
-#include "ia32/assembler-ia32-inl.h"
-
-namespace v8 {
-namespace internal {
-
-#ifndef V8_INTERPRETED_REGEXP
-class RegExpMacroAssemblerIA32: public NativeRegExpMacroAssembler {
- public:
- RegExpMacroAssemblerIA32(Mode mode, int registers_to_save, Zone* zone);
- virtual ~RegExpMacroAssemblerIA32();
- virtual int stack_limit_slack();
- virtual void AdvanceCurrentPosition(int by);
- virtual void AdvanceRegister(int reg, int by);
- virtual void Backtrack();
- virtual void Bind(Label* label);
- virtual void CheckAtStart(Label* on_at_start);
- virtual void CheckCharacter(uint32_t c, Label* on_equal);
- virtual void CheckCharacterAfterAnd(uint32_t c,
- uint32_t mask,
- Label* on_equal);
- virtual void CheckCharacterGT(uc16 limit, Label* on_greater);
- virtual void CheckCharacterLT(uc16 limit, Label* on_less);
- virtual void CheckCharacters(Vector<const uc16> str,
- int cp_offset,
- Label* on_failure,
- bool check_end_of_string);
- // A "greedy loop" is a loop that is both greedy and with a simple
- // body. It has a particularly simple implementation.
- virtual void CheckGreedyLoop(Label* on_tos_equals_current_position);
- virtual void CheckNotAtStart(Label* on_not_at_start);
- virtual void CheckNotBackReference(int start_reg, Label* on_no_match);
- virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
- Label* on_no_match);
- virtual void CheckNotCharacter(uint32_t c, Label* on_not_equal);
- virtual void CheckNotCharacterAfterAnd(uint32_t c,
- uint32_t mask,
- Label* on_not_equal);
- virtual void CheckNotCharacterAfterMinusAnd(uc16 c,
- uc16 minus,
- uc16 mask,
- Label* on_not_equal);
- virtual void CheckCharacterInRange(uc16 from,
- uc16 to,
- Label* on_in_range);
- virtual void CheckCharacterNotInRange(uc16 from,
- uc16 to,
- Label* on_not_in_range);
- virtual void CheckBitInTable(Handle<ByteArray> table, Label* on_bit_set);
-
- // Checks whether the given offset from the current position is before
- // the end of the string.
- virtual void CheckPosition(int cp_offset, Label* on_outside_input);
- virtual bool CheckSpecialCharacterClass(uc16 type, Label* on_no_match);
- virtual void Fail();
- virtual Handle<HeapObject> GetCode(Handle<String> source);
- virtual void GoTo(Label* label);
- virtual void IfRegisterGE(int reg, int comparand, Label* if_ge);
- virtual void IfRegisterLT(int reg, int comparand, Label* if_lt);
- virtual void IfRegisterEqPos(int reg, Label* if_eq);
- virtual IrregexpImplementation Implementation();
- virtual void LoadCurrentCharacter(int cp_offset,
- Label* on_end_of_input,
- bool check_bounds = true,
- int characters = 1);
- virtual void PopCurrentPosition();
- virtual void PopRegister(int register_index);
- virtual void PushBacktrack(Label* label);
- virtual void PushCurrentPosition();
- virtual void PushRegister(int register_index,
- StackCheckFlag check_stack_limit);
- virtual void ReadCurrentPositionFromRegister(int reg);
- virtual void ReadStackPointerFromRegister(int reg);
- virtual void SetCurrentPositionFromEnd(int by);
- virtual void SetRegister(int register_index, int to);
- virtual bool Succeed();
- virtual void WriteCurrentPositionToRegister(int reg, int cp_offset);
- virtual void ClearRegisters(int reg_from, int reg_to);
- virtual void WriteStackPointerToRegister(int reg);
-
- // Called from RegExp if the stack-guard is triggered.
- // If the code object is relocated, the return address is fixed before
- // returning.
- static int CheckStackGuardState(Address* return_address,
- Code* re_code,
- Address re_frame);
-
- private:
- // Offsets from ebp of function parameters and stored registers.
- static const int kFramePointer = 0;
- // Above the frame pointer - function parameters and return address.
- static const int kReturn_eip = kFramePointer + kPointerSize;
- static const int kFrameAlign = kReturn_eip + kPointerSize;
- // Parameters.
- static const int kInputString = kFrameAlign;
- static const int kStartIndex = kInputString + kPointerSize;
- static const int kInputStart = kStartIndex + kPointerSize;
- static const int kInputEnd = kInputStart + kPointerSize;
- static const int kRegisterOutput = kInputEnd + kPointerSize;
- // For the case of global regular expression, we have room to store at least
- // one set of capture results. For the case of non-global regexp, we ignore
- // this value.
- static const int kNumOutputRegisters = kRegisterOutput + kPointerSize;
- static const int kStackHighEnd = kNumOutputRegisters + kPointerSize;
- static const int kDirectCall = kStackHighEnd + kPointerSize;
- static const int kIsolate = kDirectCall + kPointerSize;
- // Below the frame pointer - local stack variables.
- // When adding local variables remember to push space for them in
- // the frame in GetCode.
- static const int kBackup_esi = kFramePointer - kPointerSize;
- static const int kBackup_edi = kBackup_esi - kPointerSize;
- static const int kBackup_ebx = kBackup_edi - kPointerSize;
- static const int kSuccessfulCaptures = kBackup_ebx - kPointerSize;
- static const int kInputStartMinusOne = kSuccessfulCaptures - kPointerSize;
- // First register address. Following registers are below it on the stack.
- static const int kRegisterZero = kInputStartMinusOne - kPointerSize;
-
- // Initial size of code buffer.
- static const size_t kRegExpCodeSize = 1024;
-
- // Load a number of characters at the given offset from the
- // current position, into the current-character register.
- void LoadCurrentCharacterUnchecked(int cp_offset, int character_count);
-
- // Check whether preemption has been requested.
- void CheckPreemption();
-
- // Check whether we are exceeding the stack limit on the backtrack stack.
- void CheckStackLimit();
-
- // Generate a call to CheckStackGuardState.
- void CallCheckStackGuardState(Register scratch);
-
- // The ebp-relative location of a regexp register.
- Operand register_location(int register_index);
-
- // The register containing the current character after LoadCurrentCharacter.
- inline Register current_character() { return edx; }
-
- // The register containing the backtrack stack top. Provides a meaningful
- // name to the register.
- inline Register backtrack_stackpointer() { return ecx; }
-
- // Byte size of chars in the string to match (decided by the Mode argument)
- inline int char_size() { return static_cast<int>(mode_); }
-
- // Equivalent to a conditional branch to the label, unless the label
- // is NULL, in which case it is a conditional Backtrack.
- void BranchOrBacktrack(Condition condition, Label* to);
-
- // Call and return internally in the generated code in a way that
- // is GC-safe (i.e., doesn't leave absolute code addresses on the stack)
- inline void SafeCall(Label* to);
- inline void SafeReturn();
- inline void SafeCallTarget(Label* name);
-
- // Pushes the value of a register on the backtrack stack. Decrements the
- // stack pointer (ecx) by a word size and stores the register's value there.
- inline void Push(Register source);
-
- // Pushes a value on the backtrack stack. Decrements the stack pointer (ecx)
- // by a word size and stores the value there.
- inline void Push(Immediate value);
-
- // Pops a value from the backtrack stack. Reads the word at the stack pointer
- // (ecx) and increments it by a word size.
- inline void Pop(Register target);
-
- MacroAssembler* masm_;
-
- // Which mode to generate code for (ASCII or UC16).
- Mode mode_;
-
- // One greater than maximal register index actually used.
- int num_registers_;
-
- // Number of registers to output at the end (the saved registers
- // are always 0..num_saved_registers_-1)
- int num_saved_registers_;
-
- // Labels used internally.
- Label entry_label_;
- Label start_label_;
- Label success_label_;
- Label backtrack_label_;
- Label exit_label_;
- Label check_preempt_label_;
- Label stack_overflow_label_;
-};
-#endif // V8_INTERPRETED_REGEXP
-
-}} // namespace v8::internal
-
-#endif // V8_IA32_REGEXP_MACRO_ASSEMBLER_IA32_H_
diff --git a/src/3rdparty/v8/src/ia32/simulator-ia32.cc b/src/3rdparty/v8/src/ia32/simulator-ia32.cc
deleted file mode 100644
index ab81693..0000000
--- a/src/3rdparty/v8/src/ia32/simulator-ia32.cc
+++ /dev/null
@@ -1,30 +0,0 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-// Since there is no simulator for the ia32 architecture this file is empty.
-
diff --git a/src/3rdparty/v8/src/ia32/simulator-ia32.h b/src/3rdparty/v8/src/ia32/simulator-ia32.h
deleted file mode 100644
index 478d4ce..0000000
--- a/src/3rdparty/v8/src/ia32/simulator-ia32.h
+++ /dev/null
@@ -1,74 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_IA32_SIMULATOR_IA32_H_
-#define V8_IA32_SIMULATOR_IA32_H_
-
-#include "allocation.h"
-
-namespace v8 {
-namespace internal {
-
-// Since there is no simulator for the ia32 architecture the only thing we can
-// do is to call the entry directly.
-#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
- (entry(p0, p1, p2, p3, p4))
-
-
-typedef int (*regexp_matcher)(String*, int, const byte*,
- const byte*, int*, int, Address, int, Isolate*);
-
-// Call the generated regexp code directly. The code at the entry address should
-// expect eight int/pointer sized arguments and return an int.
-#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
- (FUNCTION_CAST<regexp_matcher>(entry)(p0, p1, p2, p3, p4, p5, p6, p7, p8))
-
-
-#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
- (reinterpret_cast<TryCatch*>(try_catch_address))
-
-// The stack limit beyond which we will throw stack overflow errors in
-// generated code. Because generated code on ia32 uses the C stack, we
-// just use the C stack limit.
-class SimulatorStack : public v8::internal::AllStatic {
- public:
- static inline uintptr_t JsLimitFromCLimit(Isolate* isolate,
- uintptr_t c_limit) {
- USE(isolate);
- return c_limit;
- }
-
- static inline uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
- return try_catch_address;
- }
-
- static inline void UnregisterCTryCatch() { }
-};
-
-} } // namespace v8::internal
-
-#endif // V8_IA32_SIMULATOR_IA32_H_
diff --git a/src/3rdparty/v8/src/ia32/stub-cache-ia32.cc b/src/3rdparty/v8/src/ia32/stub-cache-ia32.cc
deleted file mode 100644
index 34ce36d..0000000
--- a/src/3rdparty/v8/src/ia32/stub-cache-ia32.cc
+++ /dev/null
@@ -1,3833 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_IA32)
-
-#include "ic-inl.h"
-#include "codegen.h"
-#include "stub-cache.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-
-static void ProbeTable(Isolate* isolate,
- MacroAssembler* masm,
- Code::Flags flags,
- StubCache::Table table,
- Register name,
- Register receiver,
- // Number of the cache entry pointer-size scaled.
- Register offset,
- Register extra) {
- ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
- ExternalReference value_offset(isolate->stub_cache()->value_reference(table));
- ExternalReference map_offset(isolate->stub_cache()->map_reference(table));
-
- Label miss;
-
- // Multiply by 3 because there are 3 fields per entry (name, code, map).
- __ lea(offset, Operand(offset, offset, times_2, 0));
-
- if (extra.is_valid()) {
- // Get the code entry from the cache.
- __ mov(extra, Operand::StaticArray(offset, times_1, value_offset));
-
- // Check that the key in the entry matches the name.
- __ cmp(name, Operand::StaticArray(offset, times_1, key_offset));
- __ j(not_equal, &miss);
-
- // Check the map matches.
- __ mov(offset, Operand::StaticArray(offset, times_1, map_offset));
- __ cmp(offset, FieldOperand(receiver, HeapObject::kMapOffset));
- __ j(not_equal, &miss);
-
- // Check that the flags match what we're looking for.
- __ mov(offset, FieldOperand(extra, Code::kFlagsOffset));
- __ and_(offset, ~Code::kFlagsNotUsedInLookup);
- __ cmp(offset, flags);
- __ j(not_equal, &miss);
-
-#ifdef DEBUG
- if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
- __ jmp(&miss);
- } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
- __ jmp(&miss);
- }
-#endif
-
- // Jump to the first instruction in the code stub.
- __ add(extra, Immediate(Code::kHeaderSize - kHeapObjectTag));
- __ jmp(extra);
-
- __ bind(&miss);
- } else {
- // Save the offset on the stack.
- __ push(offset);
-
- // Check that the key in the entry matches the name.
- __ cmp(name, Operand::StaticArray(offset, times_1, key_offset));
- __ j(not_equal, &miss);
-
- // Check the map matches.
- __ mov(offset, Operand::StaticArray(offset, times_1, map_offset));
- __ cmp(offset, FieldOperand(receiver, HeapObject::kMapOffset));
- __ j(not_equal, &miss);
-
- // Restore offset register.
- __ mov(offset, Operand(esp, 0));
-
- // Get the code entry from the cache.
- __ mov(offset, Operand::StaticArray(offset, times_1, value_offset));
-
- // Check that the flags match what we're looking for.
- __ mov(offset, FieldOperand(offset, Code::kFlagsOffset));
- __ and_(offset, ~Code::kFlagsNotUsedInLookup);
- __ cmp(offset, flags);
- __ j(not_equal, &miss);
-
-#ifdef DEBUG
- if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
- __ jmp(&miss);
- } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
- __ jmp(&miss);
- }
-#endif
-
- // Restore offset and re-load code entry from cache.
- __ pop(offset);
- __ mov(offset, Operand::StaticArray(offset, times_1, value_offset));
-
- // Jump to the first instruction in the code stub.
- __ add(offset, Immediate(Code::kHeaderSize - kHeapObjectTag));
- __ jmp(offset);
-
- // Pop at miss.
- __ bind(&miss);
- __ pop(offset);
- }
-}
-
-
-// Helper function used to check that the dictionary doesn't contain
-// the property. This function may return false negatives, so miss_label
-// must always call a backup property check that is complete.
-// This function is safe to call if the receiver has fast properties.
-// Name must be an internalized string and receiver must be a heap object.
-static void GenerateDictionaryNegativeLookup(MacroAssembler* masm,
- Label* miss_label,
- Register receiver,
- Handle<String> name,
- Register r0,
- Register r1) {
- ASSERT(name->IsInternalizedString());
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->negative_lookups(), 1);
- __ IncrementCounter(counters->negative_lookups_miss(), 1);
-
- __ mov(r0, FieldOperand(receiver, HeapObject::kMapOffset));
-
- const int kInterceptorOrAccessCheckNeededMask =
- (1 << Map::kHasNamedInterceptor) | (1 << Map::kIsAccessCheckNeeded);
-
- // Bail out if the receiver has a named interceptor or requires access checks.
- __ test_b(FieldOperand(r0, Map::kBitFieldOffset),
- kInterceptorOrAccessCheckNeededMask);
- __ j(not_zero, miss_label);
-
- // Check that receiver is a JSObject.
- __ CmpInstanceType(r0, FIRST_SPEC_OBJECT_TYPE);
- __ j(below, miss_label);
-
- // Load properties array.
- Register properties = r0;
- __ mov(properties, FieldOperand(receiver, JSObject::kPropertiesOffset));
-
- // Check that the properties array is a dictionary.
- __ cmp(FieldOperand(properties, HeapObject::kMapOffset),
- Immediate(masm->isolate()->factory()->hash_table_map()));
- __ j(not_equal, miss_label);
-
- Label done;
- StringDictionaryLookupStub::GenerateNegativeLookup(masm,
- miss_label,
- &done,
- properties,
- name,
- r1);
- __ bind(&done);
- __ DecrementCounter(counters->negative_lookups_miss(), 1);
-}
-
-
-void StubCache::GenerateProbe(MacroAssembler* masm,
- Code::Flags flags,
- Register receiver,
- Register name,
- Register scratch,
- Register extra,
- Register extra2,
- Register extra3) {
- Label miss;
-
- // Assert that code is valid. The multiplying code relies on the entry size
- // being 12.
- ASSERT(sizeof(Entry) == 12);
-
- // Assert the flags do not name a specific type.
- ASSERT(Code::ExtractTypeFromFlags(flags) == 0);
-
- // Assert that there are no register conflicts.
- ASSERT(!scratch.is(receiver));
- ASSERT(!scratch.is(name));
- ASSERT(!extra.is(receiver));
- ASSERT(!extra.is(name));
- ASSERT(!extra.is(scratch));
-
- // Assert scratch and extra registers are valid, and extra2/3 are unused.
- ASSERT(!scratch.is(no_reg));
- ASSERT(extra2.is(no_reg));
- ASSERT(extra3.is(no_reg));
-
- Register offset = scratch;
- scratch = no_reg;
-
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1);
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, &miss);
-
- // Get the map of the receiver and compute the hash.
- __ mov(offset, FieldOperand(name, String::kHashFieldOffset));
- __ add(offset, FieldOperand(receiver, HeapObject::kMapOffset));
- __ xor_(offset, flags);
- // We mask out the last two bits because they are not part of the hash and
- // they are always 01 for maps. Also in the two 'and' instructions below.
- __ and_(offset, (kPrimaryTableSize - 1) << kHeapObjectTagSize);
- // ProbeTable expects the offset to be pointer scaled, which it is, because
- // the heap object tag size is 2 and the pointer size log 2 is also 2.
- ASSERT(kHeapObjectTagSize == kPointerSizeLog2);
-
- // Probe the primary table.
- ProbeTable(isolate(), masm, flags, kPrimary, name, receiver, offset, extra);
-
- // Primary miss: Compute hash for secondary probe.
- __ mov(offset, FieldOperand(name, String::kHashFieldOffset));
- __ add(offset, FieldOperand(receiver, HeapObject::kMapOffset));
- __ xor_(offset, flags);
- __ and_(offset, (kPrimaryTableSize - 1) << kHeapObjectTagSize);
- __ sub(offset, name);
- __ add(offset, Immediate(flags));
- __ and_(offset, (kSecondaryTableSize - 1) << kHeapObjectTagSize);
-
- // Probe the secondary table.
- ProbeTable(
- isolate(), masm, flags, kSecondary, name, receiver, offset, extra);
-
- // Cache miss: Fall-through and let caller handle the miss by
- // entering the runtime system.
- __ bind(&miss);
- __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1);
-}
-
-
-void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm,
- int index,
- Register prototype) {
- __ LoadGlobalFunction(index, prototype);
- __ LoadGlobalFunctionInitialMap(prototype, prototype);
- // Load the prototype from the initial map.
- __ mov(prototype, FieldOperand(prototype, Map::kPrototypeOffset));
-}
-
-
-void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
- MacroAssembler* masm,
- int index,
- Register prototype,
- Label* miss) {
- // Check we're still in the same context.
- __ cmp(Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)),
- masm->isolate()->global_object());
- __ j(not_equal, miss);
- // Get the global function with the given index.
- Handle<JSFunction> function(
- JSFunction::cast(masm->isolate()->native_context()->get(index)));
- // Load its initial map. The global functions all have initial maps.
- __ Set(prototype, Immediate(Handle<Map>(function->initial_map())));
- // Load the prototype from the initial map.
- __ mov(prototype, FieldOperand(prototype, Map::kPrototypeOffset));
-}
-
-
-void StubCompiler::GenerateLoadArrayLength(MacroAssembler* masm,
- Register receiver,
- Register scratch,
- Label* miss_label) {
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, miss_label);
-
- // Check that the object is a JS array.
- __ CmpObjectType(receiver, JS_ARRAY_TYPE, scratch);
- __ j(not_equal, miss_label);
-
- // Load length directly from the JS array.
- __ mov(eax, FieldOperand(receiver, JSArray::kLengthOffset));
- __ ret(0);
-}
-
-
-// Generate code to check if an object is a string. If the object is
-// a string, the map's instance type is left in the scratch register.
-static void GenerateStringCheck(MacroAssembler* masm,
- Register receiver,
- Register scratch,
- Label* smi,
- Label* non_string_object) {
- // Check that the object isn't a smi.
- __ JumpIfSmi(receiver, smi);
-
- // Check that the object is a string.
- __ mov(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
- __ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
- STATIC_ASSERT(kNotStringTag != 0);
- __ test(scratch, Immediate(kNotStringTag));
- __ j(not_zero, non_string_object);
-}
-
-
-void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Label* miss,
- bool support_wrappers) {
- Label check_wrapper;
-
- // Check if the object is a string leaving the instance type in the
- // scratch register.
- GenerateStringCheck(masm, receiver, scratch1, miss,
- support_wrappers ? &check_wrapper : miss);
-
- // Load length from the string and convert to a smi.
- __ mov(eax, FieldOperand(receiver, String::kLengthOffset));
- __ ret(0);
-
- if (support_wrappers) {
- // Check if the object is a JSValue wrapper.
- __ bind(&check_wrapper);
- __ cmp(scratch1, JS_VALUE_TYPE);
- __ j(not_equal, miss);
-
- // Check if the wrapped value is a string and load the length
- // directly if it is.
- __ mov(scratch2, FieldOperand(receiver, JSValue::kValueOffset));
- GenerateStringCheck(masm, scratch2, scratch1, miss, miss);
- __ mov(eax, FieldOperand(scratch2, String::kLengthOffset));
- __ ret(0);
- }
-}
-
-
-void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Label* miss_label) {
- __ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label);
- __ mov(eax, scratch1);
- __ ret(0);
-}
-
-
-// Load a fast property out of a holder object (src). In-object properties
-// are loaded directly otherwise the property is loaded from the properties
-// fixed array.
-void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
- Register dst,
- Register src,
- Handle<JSObject> holder,
- PropertyIndex index) {
- DoGenerateFastPropertyLoad(
- masm, dst, src, index.is_inobject(holder), index.translate(holder));
-}
-
-
-void StubCompiler::DoGenerateFastPropertyLoad(MacroAssembler* masm,
- Register dst,
- Register src,
- bool inobject,
- int index) {
- int offset = index * kPointerSize;
- if (!inobject) {
- // Calculate the offset into the properties array.
- offset = offset + FixedArray::kHeaderSize;
- __ mov(dst, FieldOperand(src, JSObject::kPropertiesOffset));
- src = dst;
- }
- __ mov(dst, FieldOperand(src, offset));
-}
-
-
-static void PushInterceptorArguments(MacroAssembler* masm,
- Register receiver,
- Register holder,
- Register name,
- Handle<JSObject> holder_obj) {
- __ push(name);
- Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor());
- ASSERT(!masm->isolate()->heap()->InNewSpace(*interceptor));
- Register scratch = name;
- __ mov(scratch, Immediate(interceptor));
- __ push(scratch);
- __ push(receiver);
- __ push(holder);
- __ push(FieldOperand(scratch, InterceptorInfo::kDataOffset));
- __ push(Immediate(reinterpret_cast<int>(masm->isolate())));
-}
-
-
-static void CompileCallLoadPropertyWithInterceptor(
- MacroAssembler* masm,
- Register receiver,
- Register holder,
- Register name,
- Handle<JSObject> holder_obj) {
- PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
- __ CallExternalReference(
- ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorOnly),
- masm->isolate()),
- 6);
-}
-
-
-// Number of pointers to be reserved on stack for fast API call.
-static const int kFastApiCallArguments = 4;
-
-
-// Reserves space for the extra arguments to API function in the
-// caller's frame.
-//
-// These arguments are set by CheckPrototypes and GenerateFastApiCall.
-static void ReserveSpaceForFastApiCall(MacroAssembler* masm, Register scratch) {
- // ----------- S t a t e -------------
- // -- esp[0] : return address
- // -- esp[4] : last argument in the internal frame of the caller
- // -----------------------------------
- __ pop(scratch);
- for (int i = 0; i < kFastApiCallArguments; i++) {
- __ push(Immediate(Smi::FromInt(0)));
- }
- __ push(scratch);
-}
-
-
-// Undoes the effects of ReserveSpaceForFastApiCall.
-static void FreeSpaceForFastApiCall(MacroAssembler* masm, Register scratch) {
- // ----------- S t a t e -------------
- // -- esp[0] : return address.
- // -- esp[4] : last fast api call extra argument.
- // -- ...
- // -- esp[kFastApiCallArguments * 4] : first fast api call extra argument.
- // -- esp[kFastApiCallArguments * 4 + 4] : last argument in the internal
- // frame.
- // -----------------------------------
- __ pop(scratch);
- __ add(esp, Immediate(kPointerSize * kFastApiCallArguments));
- __ push(scratch);
-}
-
-
-// Generates call to API function.
-static void GenerateFastApiCall(MacroAssembler* masm,
- const CallOptimization& optimization,
- int argc) {
- // ----------- S t a t e -------------
- // -- esp[0] : return address
- // -- esp[4] : object passing the type check
- // (last fast api call extra argument,
- // set by CheckPrototypes)
- // -- esp[8] : api function
- // (first fast api call extra argument)
- // -- esp[12] : api call data
- // -- esp[16] : isolate
- // -- esp[20] : last argument
- // -- ...
- // -- esp[(argc + 4) * 4] : first argument
- // -- esp[(argc + 5) * 4] : receiver
- // -----------------------------------
- // Get the function and setup the context.
- Handle<JSFunction> function = optimization.constant_function();
- __ LoadHeapObject(edi, function);
- __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
-
- // Pass the additional arguments.
- __ mov(Operand(esp, 2 * kPointerSize), edi);
- Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
- Handle<Object> call_data(api_call_info->data(), masm->isolate());
- if (masm->isolate()->heap()->InNewSpace(*call_data)) {
- __ mov(ecx, api_call_info);
- __ mov(ebx, FieldOperand(ecx, CallHandlerInfo::kDataOffset));
- __ mov(Operand(esp, 3 * kPointerSize), ebx);
- } else {
- __ mov(Operand(esp, 3 * kPointerSize), Immediate(call_data));
- }
- __ mov(Operand(esp, 4 * kPointerSize),
- Immediate(reinterpret_cast<int>(masm->isolate())));
-
- // Prepare arguments.
- __ lea(eax, Operand(esp, 4 * kPointerSize));
-
- const int kApiArgc = 1; // API function gets reference to the v8::Arguments.
-
- // Allocate the v8::Arguments structure in the arguments' space since
- // it's not controlled by GC.
- const int kApiStackSpace = 4;
-
- __ PrepareCallApiFunction(kApiArgc + kApiStackSpace);
-
- __ mov(ApiParameterOperand(1), eax); // v8::Arguments::implicit_args_.
- __ add(eax, Immediate(argc * kPointerSize));
- __ mov(ApiParameterOperand(2), eax); // v8::Arguments::values_.
- __ Set(ApiParameterOperand(3), Immediate(argc)); // v8::Arguments::length_.
- // v8::Arguments::is_construct_call_.
- __ Set(ApiParameterOperand(4), Immediate(0));
-
- // v8::InvocationCallback's argument.
- __ lea(eax, ApiParameterOperand(1));
- __ mov(ApiParameterOperand(0), eax);
-
- // Function address is a foreign pointer outside V8's heap.
- Address function_address = v8::ToCData<Address>(api_call_info->callback());
- __ CallApiFunctionAndReturn(function_address,
- argc + kFastApiCallArguments + 1);
-}
-
-
-class CallInterceptorCompiler BASE_EMBEDDED {
- public:
- CallInterceptorCompiler(StubCompiler* stub_compiler,
- const ParameterCount& arguments,
- Register name,
- Code::ExtraICState extra_state)
- : stub_compiler_(stub_compiler),
- arguments_(arguments),
- name_(name),
- extra_state_(extra_state) {}
-
- void Compile(MacroAssembler* masm,
- Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<String> name,
- LookupResult* lookup,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* miss) {
- ASSERT(holder->HasNamedInterceptor());
- ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, miss);
-
- CallOptimization optimization(lookup);
- if (optimization.is_constant_call()) {
- CompileCacheable(masm, object, receiver, scratch1, scratch2, scratch3,
- holder, lookup, name, optimization, miss);
- } else {
- CompileRegular(masm, object, receiver, scratch1, scratch2, scratch3,
- name, holder, miss);
- }
- }
-
- private:
- void CompileCacheable(MacroAssembler* masm,
- Handle<JSObject> object,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Handle<JSObject> interceptor_holder,
- LookupResult* lookup,
- Handle<String> name,
- const CallOptimization& optimization,
- Label* miss_label) {
- ASSERT(optimization.is_constant_call());
- ASSERT(!lookup->holder()->IsGlobalObject());
-
- int depth1 = kInvalidProtoDepth;
- int depth2 = kInvalidProtoDepth;
- bool can_do_fast_api_call = false;
- if (optimization.is_simple_api_call() &&
- !lookup->holder()->IsGlobalObject()) {
- depth1 = optimization.GetPrototypeDepthOfExpectedType(
- object, interceptor_holder);
- if (depth1 == kInvalidProtoDepth) {
- depth2 = optimization.GetPrototypeDepthOfExpectedType(
- interceptor_holder, Handle<JSObject>(lookup->holder()));
- }
- can_do_fast_api_call =
- depth1 != kInvalidProtoDepth || depth2 != kInvalidProtoDepth;
- }
-
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->call_const_interceptor(), 1);
-
- if (can_do_fast_api_call) {
- __ IncrementCounter(counters->call_const_interceptor_fast_api(), 1);
- ReserveSpaceForFastApiCall(masm, scratch1);
- }
-
- // Check that the maps from receiver to interceptor's holder
- // haven't changed and thus we can invoke interceptor.
- Label miss_cleanup;
- Label* miss = can_do_fast_api_call ? &miss_cleanup : miss_label;
- Register holder =
- stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
- scratch1, scratch2, scratch3,
- name, depth1, miss);
-
- // Invoke an interceptor and if it provides a value,
- // branch to |regular_invoke|.
- Label regular_invoke;
- LoadWithInterceptor(masm, receiver, holder, interceptor_holder,
- &regular_invoke);
-
- // Interceptor returned nothing for this property. Try to use cached
- // constant function.
-
- // Check that the maps from interceptor's holder to constant function's
- // holder haven't changed and thus we can use cached constant function.
- if (*interceptor_holder != lookup->holder()) {
- stub_compiler_->CheckPrototypes(interceptor_holder, receiver,
- Handle<JSObject>(lookup->holder()),
- scratch1, scratch2, scratch3,
- name, depth2, miss);
- } else {
- // CheckPrototypes has a side effect of fetching a 'holder'
- // for API (object which is instanceof for the signature). It's
- // safe to omit it here, as if present, it should be fetched
- // by the previous CheckPrototypes.
- ASSERT(depth2 == kInvalidProtoDepth);
- }
-
- // Invoke function.
- if (can_do_fast_api_call) {
- GenerateFastApiCall(masm, optimization, arguments_.immediate());
- } else {
- CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
- ? CALL_AS_FUNCTION
- : CALL_AS_METHOD;
- __ InvokeFunction(optimization.constant_function(), arguments_,
- JUMP_FUNCTION, NullCallWrapper(), call_kind);
- }
-
- // Deferred code for fast API call case---clean preallocated space.
- if (can_do_fast_api_call) {
- __ bind(&miss_cleanup);
- FreeSpaceForFastApiCall(masm, scratch1);
- __ jmp(miss_label);
- }
-
- // Invoke a regular function.
- __ bind(&regular_invoke);
- if (can_do_fast_api_call) {
- FreeSpaceForFastApiCall(masm, scratch1);
- }
- }
-
- void CompileRegular(MacroAssembler* masm,
- Handle<JSObject> object,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Handle<String> name,
- Handle<JSObject> interceptor_holder,
- Label* miss_label) {
- Register holder =
- stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
- scratch1, scratch2, scratch3,
- name, miss_label);
-
- FrameScope scope(masm, StackFrame::INTERNAL);
- // Save the name_ register across the call.
- __ push(name_);
-
- PushInterceptorArguments(masm, receiver, holder, name_, interceptor_holder);
-
- __ CallExternalReference(
- ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForCall),
- masm->isolate()),
- 6);
-
- // Restore the name_ register.
- __ pop(name_);
-
- // Leave the internal frame.
- }
-
- void LoadWithInterceptor(MacroAssembler* masm,
- Register receiver,
- Register holder,
- Handle<JSObject> holder_obj,
- Label* interceptor_succeeded) {
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(holder); // Save the holder.
- __ push(name_); // Save the name.
-
- CompileCallLoadPropertyWithInterceptor(masm,
- receiver,
- holder,
- name_,
- holder_obj);
-
- __ pop(name_); // Restore the name.
- __ pop(receiver); // Restore the holder.
- // Leave the internal frame.
- }
-
- __ cmp(eax, masm->isolate()->factory()->no_interceptor_result_sentinel());
- __ j(not_equal, interceptor_succeeded);
- }
-
- StubCompiler* stub_compiler_;
- const ParameterCount& arguments_;
- Register name_;
- Code::ExtraICState extra_state_;
-};
-
-
-void StubCompiler::GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind) {
- ASSERT(kind == Code::LOAD_IC || kind == Code::KEYED_LOAD_IC);
- Handle<Code> code = (kind == Code::LOAD_IC)
- ? masm->isolate()->builtins()->LoadIC_Miss()
- : masm->isolate()->builtins()->KeyedLoadIC_Miss();
- __ jmp(code, RelocInfo::CODE_TARGET);
-}
-
-
-void StubCompiler::GenerateStoreMiss(MacroAssembler* masm, Code::Kind kind) {
- ASSERT(kind == Code::STORE_IC || kind == Code::KEYED_STORE_IC);
- Handle<Code> code = (kind == Code::STORE_IC)
- ? masm->isolate()->builtins()->StoreIC_Miss()
- : masm->isolate()->builtins()->KeyedStoreIC_Miss();
- __ jmp(code, RelocInfo::CODE_TARGET);
-}
-
-
-void StubCompiler::GenerateKeyedLoadMissForceGeneric(MacroAssembler* masm) {
- Handle<Code> code =
- masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
- __ jmp(code, RelocInfo::CODE_TARGET);
-}
-
-
-// Both name_reg and receiver_reg are preserved on jumps to miss_label,
-// but may be destroyed if store is successful.
-void StubCompiler::GenerateStoreField(MacroAssembler* masm,
- Handle<JSObject> object,
- int index,
- Handle<Map> transition,
- Handle<String> name,
- Register receiver_reg,
- Register name_reg,
- Register scratch1,
- Register scratch2,
- Label* miss_label) {
- LookupResult lookup(masm->isolate());
- object->Lookup(*name, &lookup);
- if (lookup.IsFound() && (lookup.IsReadOnly() || !lookup.IsCacheable())) {
- // In sloppy mode, we could just return the value and be done. However, we
- // might be in strict mode, where we have to throw. Since we cannot tell,
- // go into slow case unconditionally.
- __ jmp(miss_label);
- return;
- }
-
- // Check that the map of the object hasn't changed.
- CompareMapMode mode = transition.is_null() ? ALLOW_ELEMENT_TRANSITION_MAPS
- : REQUIRE_EXACT_MAP;
- __ CheckMap(receiver_reg, Handle<Map>(object->map()),
- miss_label, DO_SMI_CHECK, mode);
-
- // Perform global security token check if needed.
- if (object->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(receiver_reg, scratch1, miss_label);
- }
-
- // Check that we are allowed to write this.
- if (!transition.is_null() && object->GetPrototype()->IsJSObject()) {
- JSObject* holder;
- if (lookup.IsFound()) {
- holder = lookup.holder();
- } else {
- // Find the top object.
- holder = *object;
- do {
- holder = JSObject::cast(holder->GetPrototype());
- } while (holder->GetPrototype()->IsJSObject());
- }
- // We need an extra register, push
- __ push(name_reg);
- Label miss_pop, done_check;
- CheckPrototypes(object, receiver_reg, Handle<JSObject>(holder), name_reg,
- scratch1, scratch2, name, &miss_pop);
- __ jmp(&done_check);
- __ bind(&miss_pop);
- __ pop(name_reg);
- __ jmp(miss_label);
- __ bind(&done_check);
- __ pop(name_reg);
- }
-
- // Stub never generated for non-global objects that require access
- // checks.
- ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
-
- // Perform map transition for the receiver if necessary.
- if (!transition.is_null() && (object->map()->unused_property_fields() == 0)) {
- // The properties must be extended before we can store the value.
- // We jump to a runtime call that extends the properties array.
- __ pop(scratch1); // Return address.
- __ push(receiver_reg);
- __ push(Immediate(transition));
- __ push(eax);
- __ push(scratch1);
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage),
- masm->isolate()),
- 3,
- 1);
- return;
- }
-
- if (!transition.is_null()) {
- // Update the map of the object.
- __ mov(scratch1, Immediate(transition));
- __ mov(FieldOperand(receiver_reg, HeapObject::kMapOffset), scratch1);
-
- // Update the write barrier for the map field and pass the now unused
- // name_reg as scratch register.
- __ RecordWriteField(receiver_reg,
- HeapObject::kMapOffset,
- scratch1,
- name_reg,
- kDontSaveFPRegs,
- OMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- }
-
- // Adjust for the number of properties stored in the object. Even in the
- // face of a transition we can use the old map here because the size of the
- // object and the number of in-object properties is not going to change.
- index -= object->map()->inobject_properties();
-
- if (index < 0) {
- // Set the property straight into the object.
- int offset = object->map()->instance_size() + (index * kPointerSize);
- __ mov(FieldOperand(receiver_reg, offset), eax);
-
- // Update the write barrier for the array address.
- // Pass the value being stored in the now unused name_reg.
- __ mov(name_reg, eax);
- __ RecordWriteField(receiver_reg,
- offset,
- name_reg,
- scratch1,
- kDontSaveFPRegs);
- } else {
- // Write to the properties array.
- int offset = index * kPointerSize + FixedArray::kHeaderSize;
- // Get the properties array (optimistically).
- __ mov(scratch1, FieldOperand(receiver_reg, JSObject::kPropertiesOffset));
- __ mov(FieldOperand(scratch1, offset), eax);
-
- // Update the write barrier for the array address.
- // Pass the value being stored in the now unused name_reg.
- __ mov(name_reg, eax);
- __ RecordWriteField(scratch1,
- offset,
- name_reg,
- receiver_reg,
- kDontSaveFPRegs);
- }
-
- // Return the value (register eax).
- __ ret(0);
-}
-
-
-// Generate code to check that a global property cell is empty. Create
-// the property cell at compilation time if no cell exists for the
-// property.
-static void GenerateCheckPropertyCell(MacroAssembler* masm,
- Handle<GlobalObject> global,
- Handle<String> name,
- Register scratch,
- Label* miss) {
- Handle<JSGlobalPropertyCell> cell =
- GlobalObject::EnsurePropertyCell(global, name);
- ASSERT(cell->value()->IsTheHole());
- Handle<Oddball> the_hole = masm->isolate()->factory()->the_hole_value();
- if (Serializer::enabled()) {
- __ mov(scratch, Immediate(cell));
- __ cmp(FieldOperand(scratch, JSGlobalPropertyCell::kValueOffset),
- Immediate(the_hole));
- } else {
- __ cmp(Operand::Cell(cell), Immediate(the_hole));
- }
- __ j(not_equal, miss);
-}
-
-
-// Calls GenerateCheckPropertyCell for each global object in the prototype chain
-// from object to (but not including) holder.
-static void GenerateCheckPropertyCells(MacroAssembler* masm,
- Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<String> name,
- Register scratch,
- Label* miss) {
- Handle<JSObject> current = object;
- while (!current.is_identical_to(holder)) {
- if (current->IsGlobalObject()) {
- GenerateCheckPropertyCell(masm,
- Handle<GlobalObject>::cast(current),
- name,
- scratch,
- miss);
- }
- current = Handle<JSObject>(JSObject::cast(current->GetPrototype()));
- }
-}
-
-#undef __
-#define __ ACCESS_MASM(masm())
-
-
-void StubCompiler::GenerateTailCall(Handle<Code> code) {
- __ jmp(code, RelocInfo::CODE_TARGET);
-}
-
-
-Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
- Register object_reg,
- Handle<JSObject> holder,
- Register holder_reg,
- Register scratch1,
- Register scratch2,
- Handle<String> name,
- int save_at_depth,
- Label* miss,
- PrototypeCheckType check) {
- Handle<JSObject> first = object;
- // Make sure there's no overlap between holder and object registers.
- ASSERT(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
- ASSERT(!scratch2.is(object_reg) && !scratch2.is(holder_reg)
- && !scratch2.is(scratch1));
-
- // Keep track of the current object in register reg.
- Register reg = object_reg;
- Handle<JSObject> current = object;
- int depth = 0;
-
- if (save_at_depth == depth) {
- __ mov(Operand(esp, kPointerSize), reg);
- }
-
- // Traverse the prototype chain and check the maps in the prototype chain for
- // fast and global objects or do negative lookup for normal objects.
- while (!current.is_identical_to(holder)) {
- ++depth;
-
- // Only global objects and objects that do not require access
- // checks are allowed in stubs.
- ASSERT(current->IsJSGlobalProxy() || !current->IsAccessCheckNeeded());
-
- Handle<JSObject> prototype(JSObject::cast(current->GetPrototype()));
- if (!current->HasFastProperties() &&
- !current->IsJSGlobalObject() &&
- !current->IsJSGlobalProxy()) {
- if (!name->IsInternalizedString()) {
- name = factory()->InternalizeString(name);
- }
- ASSERT(current->property_dictionary()->FindEntry(*name) ==
- StringDictionary::kNotFound);
-
- GenerateDictionaryNegativeLookup(masm(), miss, reg, name,
- scratch1, scratch2);
-
- __ mov(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
- reg = holder_reg; // From now on the object will be in holder_reg.
- __ mov(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
- } else {
- bool in_new_space = heap()->InNewSpace(*prototype);
- Handle<Map> current_map(current->map());
- if (in_new_space) {
- // Save the map in scratch1 for later.
- __ mov(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
- }
- if (!current.is_identical_to(first) || check == CHECK_ALL_MAPS) {
- __ CheckMap(reg, current_map, miss, DONT_DO_SMI_CHECK,
- ALLOW_ELEMENT_TRANSITION_MAPS);
- }
-
- // Check access rights to the global object. This has to happen after
- // the map check so that we know that the object is actually a global
- // object.
- if (current->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(reg, scratch2, miss);
- }
- reg = holder_reg; // From now on the object will be in holder_reg.
-
- if (in_new_space) {
- // The prototype is in new space; we cannot store a reference to it
- // in the code. Load it from the map.
- __ mov(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
- } else {
- // The prototype is in old space; load it directly.
- __ mov(reg, prototype);
- }
- }
-
- if (save_at_depth == depth) {
- __ mov(Operand(esp, kPointerSize), reg);
- }
-
- // Go to the next object in the prototype chain.
- current = prototype;
- }
- ASSERT(current.is_identical_to(holder));
-
- // Log the check depth.
- LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
-
- if (!holder.is_identical_to(first) || check == CHECK_ALL_MAPS) {
- // Check the holder map.
- __ CheckMap(reg, Handle<Map>(holder->map()),
- miss, DONT_DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
- }
-
- // Perform security check for access to the global object.
- ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
- if (holder->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(reg, scratch1, miss);
- }
-
- // If we've skipped any global objects, it's not enough to verify that
- // their maps haven't changed. We also need to check that the property
- // cell for the property is still empty.
- GenerateCheckPropertyCells(masm(), object, holder, name, scratch1, miss);
-
- // Return the register containing the holder.
- return reg;
-}
-
-
-void BaseLoadStubCompiler::HandlerFrontendFooter(Label* success,
- Label* miss) {
- if (!miss->is_unused()) {
- __ jmp(success);
- __ bind(miss);
- GenerateLoadMiss(masm(), kind());
- }
-}
-
-
-Register BaseLoadStubCompiler::CallbackHandlerFrontend(
- Handle<JSObject> object,
- Register object_reg,
- Handle<JSObject> holder,
- Handle<String> name,
- Label* success,
- Handle<ExecutableAccessorInfo> callback) {
- Label miss;
-
- Register reg = HandlerFrontendHeader(object, object_reg, holder, name, &miss);
-
- if (!holder->HasFastProperties() && !holder->IsJSGlobalObject()) {
- ASSERT(!reg.is(scratch2()));
- ASSERT(!reg.is(scratch3()));
- Register dictionary = scratch1();
- bool must_preserve_dictionary_reg = reg.is(dictionary);
-
- // Load the properties dictionary.
- if (must_preserve_dictionary_reg) {
- __ push(dictionary);
- }
- __ mov(dictionary, FieldOperand(reg, JSObject::kPropertiesOffset));
-
- // Probe the dictionary.
- Label probe_done, pop_and_miss;
- StringDictionaryLookupStub::GeneratePositiveLookup(masm(),
- &pop_and_miss,
- &probe_done,
- dictionary,
- this->name(),
- scratch2(),
- scratch3());
- __ bind(&pop_and_miss);
- if (must_preserve_dictionary_reg) {
- __ pop(dictionary);
- }
- __ jmp(&miss);
- __ bind(&probe_done);
-
- // If probing finds an entry in the dictionary, scratch2 contains the
- // index into the dictionary. Check that the value is the callback.
- Register index = scratch2();
- const int kElementsStartOffset =
- StringDictionary::kHeaderSize +
- StringDictionary::kElementsStartIndex * kPointerSize;
- const int kValueOffset = kElementsStartOffset + kPointerSize;
- __ mov(scratch3(),
- Operand(dictionary, index, times_4, kValueOffset - kHeapObjectTag));
- if (must_preserve_dictionary_reg) {
- __ pop(dictionary);
- }
- __ cmp(scratch3(), callback);
- __ j(not_equal, &miss);
- }
-
- HandlerFrontendFooter(success, &miss);
- return reg;
-}
-
-
-void BaseLoadStubCompiler::NonexistentHandlerFrontend(
- Handle<JSObject> object,
- Handle<JSObject> last,
- Handle<String> name,
- Label* success,
- Handle<GlobalObject> global) {
- Label miss;
-
- Register reg = HandlerFrontendHeader(object, receiver(), last, name, &miss);
-
- // If the last object in the prototype chain is a global object,
- // check that the global property cell is empty.
- if (!global.is_null()) {
- GenerateCheckPropertyCell(masm(), global, name, scratch2(), &miss);
- }
-
- if (!last->HasFastProperties()) {
- __ mov(scratch2(), FieldOperand(reg, HeapObject::kMapOffset));
- __ mov(scratch2(), FieldOperand(scratch2(), Map::kPrototypeOffset));
- __ cmp(scratch2(), isolate()->factory()->null_value());
- __ j(not_equal, &miss);
- }
-
- HandlerFrontendFooter(success, &miss);
-}
-
-
-void BaseLoadStubCompiler::GenerateLoadField(Register reg,
- Handle<JSObject> holder,
- PropertyIndex index) {
- // Get the value from the properties.
- GenerateFastPropertyLoad(masm(), eax, reg, holder, index);
- __ ret(0);
-}
-
-
-void BaseLoadStubCompiler::GenerateLoadCallback(
- Register reg,
- Handle<ExecutableAccessorInfo> callback) {
- // Insert additional parameters into the stack frame above return address.
- ASSERT(!scratch3().is(reg));
- __ pop(scratch3()); // Get return address to place it below.
-
- __ push(receiver()); // receiver
- __ mov(scratch2(), esp);
- ASSERT(!scratch2().is(reg));
- __ push(reg); // holder
- // Push data from ExecutableAccessorInfo.
- if (isolate()->heap()->InNewSpace(callback->data())) {
- __ mov(scratch1(), Immediate(callback));
- __ push(FieldOperand(scratch1(), ExecutableAccessorInfo::kDataOffset));
- } else {
- __ push(Immediate(Handle<Object>(callback->data(), isolate())));
- }
- __ push(Immediate(reinterpret_cast<int>(isolate())));
-
- // Save a pointer to where we pushed the arguments pointer. This will be
- // passed as the const ExecutableAccessorInfo& to the C++ callback.
- __ push(scratch2());
-
- __ push(name()); // name
- __ mov(ebx, esp); // esp points to reference to name (handler).
-
- __ push(scratch3()); // Restore return address.
-
- // 4 elements array for v8::Arguments::values_, handler for name and pointer
- // to the values (it considered as smi in GC).
- const int kStackSpace = 6;
- const int kApiArgc = 2;
-
- __ PrepareCallApiFunction(kApiArgc);
- __ mov(ApiParameterOperand(0), ebx); // name.
- __ add(ebx, Immediate(kPointerSize));
- __ mov(ApiParameterOperand(1), ebx); // arguments pointer.
-
- // Emitting a stub call may try to allocate (if the code is not
- // already generated). Do not allow the assembler to perform a
- // garbage collection but instead return the allocation failure
- // object.
- Address getter_address = v8::ToCData<Address>(callback->getter());
- __ CallApiFunctionAndReturn(getter_address, kStackSpace);
-}
-
-
-void BaseLoadStubCompiler::GenerateLoadConstant(Handle<JSFunction> value) {
- // Return the constant value.
- __ LoadHeapObject(eax, value);
- __ ret(0);
-}
-
-
-void BaseLoadStubCompiler::GenerateLoadInterceptor(
- Register holder_reg,
- Handle<JSObject> object,
- Handle<JSObject> interceptor_holder,
- LookupResult* lookup,
- Handle<String> name) {
- ASSERT(interceptor_holder->HasNamedInterceptor());
- ASSERT(!interceptor_holder->GetNamedInterceptor()->getter()->IsUndefined());
-
- // So far the most popular follow ups for interceptor loads are FIELD
- // and CALLBACKS, so inline only them, other cases may be added
- // later.
- bool compile_followup_inline = false;
- if (lookup->IsFound() && lookup->IsCacheable()) {
- if (lookup->IsField()) {
- compile_followup_inline = true;
- } else if (lookup->type() == CALLBACKS &&
- lookup->GetCallbackObject()->IsExecutableAccessorInfo()) {
- ExecutableAccessorInfo* callback =
- ExecutableAccessorInfo::cast(lookup->GetCallbackObject());
- compile_followup_inline = callback->getter() != NULL &&
- callback->IsCompatibleReceiver(*object);
- }
- }
-
- if (compile_followup_inline) {
- // Compile the interceptor call, followed by inline code to load the
- // property from further up the prototype chain if the call fails.
- // Check that the maps haven't changed.
- ASSERT(holder_reg.is(receiver()) || holder_reg.is(scratch1()));
-
- // Preserve the receiver register explicitly whenever it is different from
- // the holder and it is needed should the interceptor return without any
- // result. The CALLBACKS case needs the receiver to be passed into C++ code,
- // the FIELD case might cause a miss during the prototype check.
- bool must_perfrom_prototype_check = *interceptor_holder != lookup->holder();
- bool must_preserve_receiver_reg = !receiver().is(holder_reg) &&
- (lookup->type() == CALLBACKS || must_perfrom_prototype_check);
-
- // Save necessary data before invoking an interceptor.
- // Requires a frame to make GC aware of pushed pointers.
- {
- FrameScope frame_scope(masm(), StackFrame::INTERNAL);
-
- if (must_preserve_receiver_reg) {
- __ push(receiver());
- }
- __ push(holder_reg);
- __ push(this->name());
-
- // Invoke an interceptor. Note: map checks from receiver to
- // interceptor's holder has been compiled before (see a caller
- // of this method.)
- CompileCallLoadPropertyWithInterceptor(masm(),
- receiver(),
- holder_reg,
- this->name(),
- interceptor_holder);
-
- // Check if interceptor provided a value for property. If it's
- // the case, return immediately.
- Label interceptor_failed;
- __ cmp(eax, factory()->no_interceptor_result_sentinel());
- __ j(equal, &interceptor_failed);
- frame_scope.GenerateLeaveFrame();
- __ ret(0);
-
- // Clobber registers when generating debug-code to provoke errors.
- __ bind(&interceptor_failed);
- if (FLAG_debug_code) {
- __ mov(receiver(), Immediate(BitCast<int32_t>(kZapValue)));
- __ mov(holder_reg, Immediate(BitCast<int32_t>(kZapValue)));
- __ mov(this->name(), Immediate(BitCast<int32_t>(kZapValue)));
- }
-
- __ pop(this->name());
- __ pop(holder_reg);
- if (must_preserve_receiver_reg) {
- __ pop(receiver());
- }
-
- // Leave the internal frame.
- }
-
- GenerateLoadPostInterceptor(holder_reg, interceptor_holder, name, lookup);
- } else { // !compile_followup_inline
- // Call the runtime system to load the interceptor.
- // Check that the maps haven't changed.
- __ pop(scratch2()); // save old return address
- PushInterceptorArguments(masm(), receiver(), holder_reg,
- this->name(), interceptor_holder);
- __ push(scratch2()); // restore old return address
-
- ExternalReference ref =
- ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForLoad),
- isolate());
- __ TailCallExternalReference(ref, 6, 1);
- }
-}
-
-
-void CallStubCompiler::GenerateNameCheck(Handle<String> name, Label* miss) {
- if (kind_ == Code::KEYED_CALL_IC) {
- __ cmp(ecx, Immediate(name));
- __ j(not_equal, miss);
- }
-}
-
-
-void CallStubCompiler::GenerateGlobalReceiverCheck(Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<String> name,
- Label* miss) {
- ASSERT(holder->IsGlobalObject());
-
- // Get the number of arguments.
- const int argc = arguments().immediate();
-
- // Get the receiver from the stack.
- __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
-
-
- // Check that the maps haven't changed.
- __ JumpIfSmi(edx, miss);
- CheckPrototypes(object, edx, holder, ebx, eax, edi, name, miss);
-}
-
-
-void CallStubCompiler::GenerateLoadFunctionFromCell(
- Handle<JSGlobalPropertyCell> cell,
- Handle<JSFunction> function,
- Label* miss) {
- // Get the value from the cell.
- if (Serializer::enabled()) {
- __ mov(edi, Immediate(cell));
- __ mov(edi, FieldOperand(edi, JSGlobalPropertyCell::kValueOffset));
- } else {
- __ mov(edi, Operand::Cell(cell));
- }
-
- // Check that the cell contains the same function.
- if (isolate()->heap()->InNewSpace(*function)) {
- // We can't embed a pointer to a function in new space so we have
- // to verify that the shared function info is unchanged. This has
- // the nice side effect that multiple closures based on the same
- // function can all use this call IC. Before we load through the
- // function, we have to verify that it still is a function.
- __ JumpIfSmi(edi, miss);
- __ CmpObjectType(edi, JS_FUNCTION_TYPE, ebx);
- __ j(not_equal, miss);
-
- // Check the shared function info. Make sure it hasn't changed.
- __ cmp(FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset),
- Immediate(Handle<SharedFunctionInfo>(function->shared())));
- } else {
- __ cmp(edi, Immediate(function));
- }
- __ j(not_equal, miss);
-}
-
-
-void CallStubCompiler::GenerateMissBranch() {
- Handle<Code> code =
- isolate()->stub_cache()->ComputeCallMiss(arguments().immediate(),
- kind_,
- extra_state_);
- __ jmp(code, RelocInfo::CODE_TARGET);
-}
-
-
-Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object,
- Handle<JSObject> holder,
- PropertyIndex index,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- ...
- // -- esp[(argc + 1) * 4] : receiver
- // -----------------------------------
- Label miss;
-
- GenerateNameCheck(name, &miss);
-
- // Get the receiver from the stack.
- const int argc = arguments().immediate();
- __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(edx, &miss);
-
- // Do the right check and compute the holder register.
- Register reg = CheckPrototypes(object, edx, holder, ebx, eax, edi,
- name, &miss);
-
- GenerateFastPropertyLoad(masm(), edi, reg, holder, index);
-
- // Check that the function really is a function.
- __ JumpIfSmi(edi, &miss);
- __ CmpObjectType(edi, JS_FUNCTION_TYPE, ebx);
- __ j(not_equal, &miss);
-
- // Patch the receiver on the stack with the global proxy if
- // necessary.
- if (object->IsGlobalObject()) {
- __ mov(edx, FieldOperand(edx, GlobalObject::kGlobalReceiverOffset));
- __ mov(Operand(esp, (argc + 1) * kPointerSize), edx);
- }
-
- // Invoke the function.
- CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
- ? CALL_AS_FUNCTION
- : CALL_AS_METHOD;
- __ InvokeFunction(edi, arguments(), JUMP_FUNCTION,
- NullCallWrapper(), call_kind);
-
- // Handle call cache miss.
- __ bind(&miss);
- GenerateMissBranch();
-
- // Return the generated code.
- return GetCode(Code::FIELD, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileArrayPushCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
- Handle<JSFunction> function,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- ...
- // -- esp[(argc + 1) * 4] : receiver
- // -----------------------------------
-
- // If object is not an array, bail out to regular call.
- if (!object->IsJSArray() || !cell.is_null()) {
- return Handle<Code>::null();
- }
-
- Label miss;
-
- GenerateNameCheck(name, &miss);
-
- // Get the receiver from the stack.
- const int argc = arguments().immediate();
- __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(edx, &miss);
-
- CheckPrototypes(Handle<JSObject>::cast(object), edx, holder, ebx, eax, edi,
- name, &miss);
-
- if (argc == 0) {
- // Noop, return the length.
- __ mov(eax, FieldOperand(edx, JSArray::kLengthOffset));
- __ ret((argc + 1) * kPointerSize);
- } else {
- Label call_builtin;
-
- if (argc == 1) { // Otherwise fall through to call builtin.
- Label attempt_to_grow_elements, with_write_barrier, check_double;
-
- // Get the elements array of the object.
- __ mov(edi, FieldOperand(edx, JSArray::kElementsOffset));
-
- // Check that the elements are in fast mode and writable.
- __ cmp(FieldOperand(edi, HeapObject::kMapOffset),
- Immediate(factory()->fixed_array_map()));
- __ j(not_equal, &check_double);
-
- // Get the array's length into eax and calculate new length.
- __ mov(eax, FieldOperand(edx, JSArray::kLengthOffset));
- STATIC_ASSERT(kSmiTagSize == 1);
- STATIC_ASSERT(kSmiTag == 0);
- __ add(eax, Immediate(Smi::FromInt(argc)));
-
- // Get the elements' length into ecx.
- __ mov(ecx, FieldOperand(edi, FixedArray::kLengthOffset));
-
- // Check if we could survive without allocation.
- __ cmp(eax, ecx);
- __ j(greater, &attempt_to_grow_elements);
-
- // Check if value is a smi.
- __ mov(ecx, Operand(esp, argc * kPointerSize));
- __ JumpIfNotSmi(ecx, &with_write_barrier);
-
- // Save new length.
- __ mov(FieldOperand(edx, JSArray::kLengthOffset), eax);
-
- // Store the value.
- __ mov(FieldOperand(edi,
- eax,
- times_half_pointer_size,
- FixedArray::kHeaderSize - argc * kPointerSize),
- ecx);
-
- __ ret((argc + 1) * kPointerSize);
-
- __ bind(&check_double);
-
-
- // Check that the elements are in double mode.
- __ cmp(FieldOperand(edi, HeapObject::kMapOffset),
- Immediate(factory()->fixed_double_array_map()));
- __ j(not_equal, &call_builtin);
-
- // Get the array's length into eax and calculate new length.
- __ mov(eax, FieldOperand(edx, JSArray::kLengthOffset));
- STATIC_ASSERT(kSmiTagSize == 1);
- STATIC_ASSERT(kSmiTag == 0);
- __ add(eax, Immediate(Smi::FromInt(argc)));
-
- // Get the elements' length into ecx.
- __ mov(ecx, FieldOperand(edi, FixedArray::kLengthOffset));
-
- // Check if we could survive without allocation.
- __ cmp(eax, ecx);
- __ j(greater, &call_builtin);
-
- __ mov(ecx, Operand(esp, argc * kPointerSize));
- __ StoreNumberToDoubleElements(
- ecx, edi, eax, ecx, xmm0, &call_builtin, true, argc * kDoubleSize);
-
- // Save new length.
- __ mov(FieldOperand(edx, JSArray::kLengthOffset), eax);
- __ ret((argc + 1) * kPointerSize);
-
- __ bind(&with_write_barrier);
-
- __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
-
- if (FLAG_smi_only_arrays && !FLAG_trace_elements_transitions) {
- Label fast_object, not_fast_object;
- __ CheckFastObjectElements(ebx, &not_fast_object, Label::kNear);
- __ jmp(&fast_object);
- // In case of fast smi-only, convert to fast object, otherwise bail out.
- __ bind(&not_fast_object);
- __ CheckFastSmiElements(ebx, &call_builtin);
- __ cmp(FieldOperand(ecx, HeapObject::kMapOffset),
- Immediate(factory()->heap_number_map()));
- __ j(equal, &call_builtin);
- // edi: elements array
- // edx: receiver
- // ebx: map
- Label try_holey_map;
- __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- FAST_ELEMENTS,
- ebx,
- edi,
- &try_holey_map);
-
- ElementsTransitionGenerator::
- GenerateMapChangeElementsTransition(masm(),
- DONT_TRACK_ALLOCATION_SITE,
- NULL);
- // Restore edi.
- __ mov(edi, FieldOperand(edx, JSArray::kElementsOffset));
- __ jmp(&fast_object);
-
- __ bind(&try_holey_map);
- __ LoadTransitionedArrayMapConditional(FAST_HOLEY_SMI_ELEMENTS,
- FAST_HOLEY_ELEMENTS,
- ebx,
- edi,
- &call_builtin);
- ElementsTransitionGenerator::
- GenerateMapChangeElementsTransition(masm(),
- DONT_TRACK_ALLOCATION_SITE,
- NULL);
- // Restore edi.
- __ mov(edi, FieldOperand(edx, JSArray::kElementsOffset));
- __ bind(&fast_object);
- } else {
- __ CheckFastObjectElements(ebx, &call_builtin);
- }
-
- // Save new length.
- __ mov(FieldOperand(edx, JSArray::kLengthOffset), eax);
-
- // Store the value.
- __ lea(edx, FieldOperand(edi,
- eax, times_half_pointer_size,
- FixedArray::kHeaderSize - argc * kPointerSize));
- __ mov(Operand(edx, 0), ecx);
-
- __ RecordWrite(edi, edx, ecx, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
-
- __ ret((argc + 1) * kPointerSize);
-
- __ bind(&attempt_to_grow_elements);
- if (!FLAG_inline_new) {
- __ jmp(&call_builtin);
- }
-
- __ mov(ebx, Operand(esp, argc * kPointerSize));
- // Growing elements that are SMI-only requires special handling in case
- // the new element is non-Smi. For now, delegate to the builtin.
- Label no_fast_elements_check;
- __ JumpIfSmi(ebx, &no_fast_elements_check);
- __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
- __ CheckFastObjectElements(ecx, &call_builtin, Label::kFar);
- __ bind(&no_fast_elements_check);
-
- // We could be lucky and the elements array could be at the top of
- // new-space. In this case we can just grow it in place by moving the
- // allocation pointer up.
-
- ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address(isolate());
- ExternalReference new_space_allocation_limit =
- ExternalReference::new_space_allocation_limit_address(isolate());
-
- const int kAllocationDelta = 4;
- // Load top.
- __ mov(ecx, Operand::StaticVariable(new_space_allocation_top));
-
- // Check if it's the end of elements.
- __ lea(edx, FieldOperand(edi,
- eax, times_half_pointer_size,
- FixedArray::kHeaderSize - argc * kPointerSize));
- __ cmp(edx, ecx);
- __ j(not_equal, &call_builtin);
- __ add(ecx, Immediate(kAllocationDelta * kPointerSize));
- __ cmp(ecx, Operand::StaticVariable(new_space_allocation_limit));
- __ j(above, &call_builtin);
-
- // We fit and could grow elements.
- __ mov(Operand::StaticVariable(new_space_allocation_top), ecx);
-
- // Push the argument...
- __ mov(Operand(edx, 0), ebx);
- // ... and fill the rest with holes.
- for (int i = 1; i < kAllocationDelta; i++) {
- __ mov(Operand(edx, i * kPointerSize),
- Immediate(factory()->the_hole_value()));
- }
-
- // We know the elements array is in new space so we don't need the
- // remembered set, but we just pushed a value onto it so we may have to
- // tell the incremental marker to rescan the object that we just grew. We
- // don't need to worry about the holes because they are in old space and
- // already marked black.
- __ RecordWrite(edi, edx, ebx, kDontSaveFPRegs, OMIT_REMEMBERED_SET);
-
- // Restore receiver to edx as finish sequence assumes it's here.
- __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
-
- // Increment element's and array's sizes.
- __ add(FieldOperand(edi, FixedArray::kLengthOffset),
- Immediate(Smi::FromInt(kAllocationDelta)));
-
- // NOTE: This only happen in new-space, where we don't
- // care about the black-byte-count on pages. Otherwise we should
- // update that too if the object is black.
-
- __ mov(FieldOperand(edx, JSArray::kLengthOffset), eax);
-
- __ ret((argc + 1) * kPointerSize);
- }
-
- __ bind(&call_builtin);
- __ TailCallExternalReference(
- ExternalReference(Builtins::c_ArrayPush, isolate()),
- argc + 1,
- 1);
- }
-
- __ bind(&miss);
- GenerateMissBranch();
-
- // Return the generated code.
- return GetCode(function);
-}
-
-
-Handle<Code> CallStubCompiler::CompileArrayPopCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
- Handle<JSFunction> function,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- ...
- // -- esp[(argc + 1) * 4] : receiver
- // -----------------------------------
-
- // If object is not an array, bail out to regular call.
- if (!object->IsJSArray() || !cell.is_null()) {
- return Handle<Code>::null();
- }
-
- Label miss, return_undefined, call_builtin;
-
- GenerateNameCheck(name, &miss);
-
- // Get the receiver from the stack.
- const int argc = arguments().immediate();
- __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(edx, &miss);
- CheckPrototypes(Handle<JSObject>::cast(object), edx, holder, ebx, eax, edi,
- name, &miss);
-
- // Get the elements array of the object.
- __ mov(ebx, FieldOperand(edx, JSArray::kElementsOffset));
-
- // Check that the elements are in fast mode and writable.
- __ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
- Immediate(factory()->fixed_array_map()));
- __ j(not_equal, &call_builtin);
-
- // Get the array's length into ecx and calculate new length.
- __ mov(ecx, FieldOperand(edx, JSArray::kLengthOffset));
- __ sub(ecx, Immediate(Smi::FromInt(1)));
- __ j(negative, &return_undefined);
-
- // Get the last element.
- STATIC_ASSERT(kSmiTagSize == 1);
- STATIC_ASSERT(kSmiTag == 0);
- __ mov(eax, FieldOperand(ebx,
- ecx, times_half_pointer_size,
- FixedArray::kHeaderSize));
- __ cmp(eax, Immediate(factory()->the_hole_value()));
- __ j(equal, &call_builtin);
-
- // Set the array's length.
- __ mov(FieldOperand(edx, JSArray::kLengthOffset), ecx);
-
- // Fill with the hole.
- __ mov(FieldOperand(ebx,
- ecx, times_half_pointer_size,
- FixedArray::kHeaderSize),
- Immediate(factory()->the_hole_value()));
- __ ret((argc + 1) * kPointerSize);
-
- __ bind(&return_undefined);
- __ mov(eax, Immediate(factory()->undefined_value()));
- __ ret((argc + 1) * kPointerSize);
-
- __ bind(&call_builtin);
- __ TailCallExternalReference(
- ExternalReference(Builtins::c_ArrayPop, isolate()),
- argc + 1,
- 1);
-
- __ bind(&miss);
- GenerateMissBranch();
-
- // Return the generated code.
- return GetCode(function);
-}
-
-
-Handle<Code> CallStubCompiler::CompileStringCharCodeAtCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
- Handle<JSFunction> function,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- ecx : function name
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- ...
- // -- esp[(argc + 1) * 4] : receiver
- // -----------------------------------
-
- // If object is not a string, bail out to regular call.
- if (!object->IsString() || !cell.is_null()) {
- return Handle<Code>::null();
- }
-
- const int argc = arguments().immediate();
-
- Label miss;
- Label name_miss;
- Label index_out_of_range;
- Label* index_out_of_range_label = &index_out_of_range;
-
- if (kind_ == Code::CALL_IC &&
- (CallICBase::StringStubState::decode(extra_state_) ==
- DEFAULT_STRING_STUB)) {
- index_out_of_range_label = &miss;
- }
-
- GenerateNameCheck(name, &name_miss);
-
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(masm(),
- Context::STRING_FUNCTION_INDEX,
- eax,
- &miss);
- ASSERT(!object.is_identical_to(holder));
- CheckPrototypes(
- Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
- eax, holder, ebx, edx, edi, name, &miss);
-
- Register receiver = ebx;
- Register index = edi;
- Register result = eax;
- __ mov(receiver, Operand(esp, (argc + 1) * kPointerSize));
- if (argc > 0) {
- __ mov(index, Operand(esp, (argc - 0) * kPointerSize));
- } else {
- __ Set(index, Immediate(factory()->undefined_value()));
- }
-
- StringCharCodeAtGenerator generator(receiver,
- index,
- result,
- &miss, // When not a string.
- &miss, // When not a number.
- index_out_of_range_label,
- STRING_INDEX_IS_NUMBER);
- generator.GenerateFast(masm());
- __ ret((argc + 1) * kPointerSize);
-
- StubRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm(), call_helper);
-
- if (index_out_of_range.is_linked()) {
- __ bind(&index_out_of_range);
- __ Set(eax, Immediate(factory()->nan_value()));
- __ ret((argc + 1) * kPointerSize);
- }
-
- __ bind(&miss);
- // Restore function name in ecx.
- __ Set(ecx, Immediate(name));
- __ bind(&name_miss);
- GenerateMissBranch();
-
- // Return the generated code.
- return GetCode(function);
-}
-
-
-Handle<Code> CallStubCompiler::CompileStringCharAtCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
- Handle<JSFunction> function,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- ecx : function name
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- ...
- // -- esp[(argc + 1) * 4] : receiver
- // -----------------------------------
-
- // If object is not a string, bail out to regular call.
- if (!object->IsString() || !cell.is_null()) {
- return Handle<Code>::null();
- }
-
- const int argc = arguments().immediate();
-
- Label miss;
- Label name_miss;
- Label index_out_of_range;
- Label* index_out_of_range_label = &index_out_of_range;
-
- if (kind_ == Code::CALL_IC &&
- (CallICBase::StringStubState::decode(extra_state_) ==
- DEFAULT_STRING_STUB)) {
- index_out_of_range_label = &miss;
- }
-
- GenerateNameCheck(name, &name_miss);
-
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(masm(),
- Context::STRING_FUNCTION_INDEX,
- eax,
- &miss);
- ASSERT(!object.is_identical_to(holder));
- CheckPrototypes(
- Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
- eax, holder, ebx, edx, edi, name, &miss);
-
- Register receiver = eax;
- Register index = edi;
- Register scratch = edx;
- Register result = eax;
- __ mov(receiver, Operand(esp, (argc + 1) * kPointerSize));
- if (argc > 0) {
- __ mov(index, Operand(esp, (argc - 0) * kPointerSize));
- } else {
- __ Set(index, Immediate(factory()->undefined_value()));
- }
-
- StringCharAtGenerator generator(receiver,
- index,
- scratch,
- result,
- &miss, // When not a string.
- &miss, // When not a number.
- index_out_of_range_label,
- STRING_INDEX_IS_NUMBER);
- generator.GenerateFast(masm());
- __ ret((argc + 1) * kPointerSize);
-
- StubRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm(), call_helper);
-
- if (index_out_of_range.is_linked()) {
- __ bind(&index_out_of_range);
- __ Set(eax, Immediate(factory()->empty_string()));
- __ ret((argc + 1) * kPointerSize);
- }
-
- __ bind(&miss);
- // Restore function name in ecx.
- __ Set(ecx, Immediate(name));
- __ bind(&name_miss);
- GenerateMissBranch();
-
- // Return the generated code.
- return GetCode(function);
-}
-
-
-Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
- Handle<JSFunction> function,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- ecx : function name
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- ...
- // -- esp[(argc + 1) * 4] : receiver
- // -----------------------------------
-
- const int argc = arguments().immediate();
-
- // If the object is not a JSObject or we got an unexpected number of
- // arguments, bail out to the regular call.
- if (!object->IsJSObject() || argc != 1) {
- return Handle<Code>::null();
- }
-
- Label miss;
- GenerateNameCheck(name, &miss);
-
- if (cell.is_null()) {
- __ mov(edx, Operand(esp, 2 * kPointerSize));
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfSmi(edx, &miss);
- CheckPrototypes(Handle<JSObject>::cast(object), edx, holder, ebx, eax, edi,
- name, &miss);
- } else {
- ASSERT(cell->value() == *function);
- GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
- &miss);
- GenerateLoadFunctionFromCell(cell, function, &miss);
- }
-
- // Load the char code argument.
- Register code = ebx;
- __ mov(code, Operand(esp, 1 * kPointerSize));
-
- // Check the code is a smi.
- Label slow;
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfNotSmi(code, &slow);
-
- // Convert the smi code to uint16.
- __ and_(code, Immediate(Smi::FromInt(0xffff)));
-
- StringCharFromCodeGenerator generator(code, eax);
- generator.GenerateFast(masm());
- __ ret(2 * kPointerSize);
-
- StubRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm(), call_helper);
-
- // Tail call the full function. We do not have to patch the receiver
- // because the function makes no use of it.
- __ bind(&slow);
- CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
- ? CALL_AS_FUNCTION
- : CALL_AS_METHOD;
- __ InvokeFunction(function, arguments(), JUMP_FUNCTION,
- NullCallWrapper(), call_kind);
-
- __ bind(&miss);
- // ecx: function name.
- GenerateMissBranch();
-
- // Return the generated code.
- return cell.is_null() ? GetCode(function) : GetCode(Code::NORMAL, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileMathFloorCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
- Handle<JSFunction> function,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- ...
- // -- esp[(argc + 1) * 4] : receiver
- // -----------------------------------
-
- if (!CpuFeatures::IsSupported(SSE2)) {
- return Handle<Code>::null();
- }
-
- CpuFeatures::Scope use_sse2(SSE2);
-
- const int argc = arguments().immediate();
-
- // If the object is not a JSObject or we got an unexpected number of
- // arguments, bail out to the regular call.
- if (!object->IsJSObject() || argc != 1) {
- return Handle<Code>::null();
- }
-
- Label miss;
- GenerateNameCheck(name, &miss);
-
- if (cell.is_null()) {
- __ mov(edx, Operand(esp, 2 * kPointerSize));
-
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfSmi(edx, &miss);
-
- CheckPrototypes(Handle<JSObject>::cast(object), edx, holder, ebx, eax, edi,
- name, &miss);
- } else {
- ASSERT(cell->value() == *function);
- GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
- &miss);
- GenerateLoadFunctionFromCell(cell, function, &miss);
- }
-
- // Load the (only) argument into eax.
- __ mov(eax, Operand(esp, 1 * kPointerSize));
-
- // Check if the argument is a smi.
- Label smi;
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfSmi(eax, &smi);
-
- // Check if the argument is a heap number and load its value into xmm0.
- Label slow;
- __ CheckMap(eax, factory()->heap_number_map(), &slow, DONT_DO_SMI_CHECK);
- __ movdbl(xmm0, FieldOperand(eax, HeapNumber::kValueOffset));
-
- // Check if the argument is strictly positive. Note this also
- // discards NaN.
- __ xorpd(xmm1, xmm1);
- __ ucomisd(xmm0, xmm1);
- __ j(below_equal, &slow);
-
- // Do a truncating conversion.
- __ cvttsd2si(eax, Operand(xmm0));
-
- // Check if the result fits into a smi. Note this also checks for
- // 0x80000000 which signals a failed conversion.
- Label wont_fit_into_smi;
- __ test(eax, Immediate(0xc0000000));
- __ j(not_zero, &wont_fit_into_smi);
-
- // Smi tag and return.
- __ SmiTag(eax);
- __ bind(&smi);
- __ ret(2 * kPointerSize);
-
- // Check if the argument is < 2^kMantissaBits.
- Label already_round;
- __ bind(&wont_fit_into_smi);
- __ LoadPowerOf2(xmm1, ebx, HeapNumber::kMantissaBits);
- __ ucomisd(xmm0, xmm1);
- __ j(above_equal, &already_round);
-
- // Save a copy of the argument.
- __ movaps(xmm2, xmm0);
-
- // Compute (argument + 2^kMantissaBits) - 2^kMantissaBits.
- __ addsd(xmm0, xmm1);
- __ subsd(xmm0, xmm1);
-
- // Compare the argument and the tentative result to get the right mask:
- // if xmm2 < xmm0:
- // xmm2 = 1...1
- // else:
- // xmm2 = 0...0
- __ cmpltsd(xmm2, xmm0);
-
- // Subtract 1 if the argument was less than the tentative result.
- __ LoadPowerOf2(xmm1, ebx, 0);
- __ andpd(xmm1, xmm2);
- __ subsd(xmm0, xmm1);
-
- // Return a new heap number.
- __ AllocateHeapNumber(eax, ebx, edx, &slow);
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
- __ ret(2 * kPointerSize);
-
- // Return the argument (when it's an already round heap number).
- __ bind(&already_round);
- __ mov(eax, Operand(esp, 1 * kPointerSize));
- __ ret(2 * kPointerSize);
-
- // Tail call the full function. We do not have to patch the receiver
- // because the function makes no use of it.
- __ bind(&slow);
- __ InvokeFunction(function, arguments(), JUMP_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
-
- __ bind(&miss);
- // ecx: function name.
- GenerateMissBranch();
-
- // Return the generated code.
- return cell.is_null() ? GetCode(function) : GetCode(Code::NORMAL, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileMathAbsCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
- Handle<JSFunction> function,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- ...
- // -- esp[(argc + 1) * 4] : receiver
- // -----------------------------------
-
- const int argc = arguments().immediate();
-
- // If the object is not a JSObject or we got an unexpected number of
- // arguments, bail out to the regular call.
- if (!object->IsJSObject() || argc != 1) {
- return Handle<Code>::null();
- }
-
- Label miss;
- GenerateNameCheck(name, &miss);
-
- if (cell.is_null()) {
- __ mov(edx, Operand(esp, 2 * kPointerSize));
-
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfSmi(edx, &miss);
-
- CheckPrototypes(Handle<JSObject>::cast(object), edx, holder, ebx, eax, edi,
- name, &miss);
- } else {
- ASSERT(cell->value() == *function);
- GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
- &miss);
- GenerateLoadFunctionFromCell(cell, function, &miss);
- }
-
- // Load the (only) argument into eax.
- __ mov(eax, Operand(esp, 1 * kPointerSize));
-
- // Check if the argument is a smi.
- Label not_smi;
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfNotSmi(eax, &not_smi);
-
- // Set ebx to 1...1 (== -1) if the argument is negative, or to 0...0
- // otherwise.
- __ mov(ebx, eax);
- __ sar(ebx, kBitsPerInt - 1);
-
- // Do bitwise not or do nothing depending on ebx.
- __ xor_(eax, ebx);
-
- // Add 1 or do nothing depending on ebx.
- __ sub(eax, ebx);
-
- // If the result is still negative, go to the slow case.
- // This only happens for the most negative smi.
- Label slow;
- __ j(negative, &slow);
-
- // Smi case done.
- __ ret(2 * kPointerSize);
-
- // Check if the argument is a heap number and load its exponent and
- // sign into ebx.
- __ bind(&not_smi);
- __ CheckMap(eax, factory()->heap_number_map(), &slow, DONT_DO_SMI_CHECK);
- __ mov(ebx, FieldOperand(eax, HeapNumber::kExponentOffset));
-
- // Check the sign of the argument. If the argument is positive,
- // just return it.
- Label negative_sign;
- __ test(ebx, Immediate(HeapNumber::kSignMask));
- __ j(not_zero, &negative_sign);
- __ ret(2 * kPointerSize);
-
- // If the argument is negative, clear the sign, and return a new
- // number.
- __ bind(&negative_sign);
- __ and_(ebx, ~HeapNumber::kSignMask);
- __ mov(ecx, FieldOperand(eax, HeapNumber::kMantissaOffset));
- __ AllocateHeapNumber(eax, edi, edx, &slow);
- __ mov(FieldOperand(eax, HeapNumber::kExponentOffset), ebx);
- __ mov(FieldOperand(eax, HeapNumber::kMantissaOffset), ecx);
- __ ret(2 * kPointerSize);
-
- // Tail call the full function. We do not have to patch the receiver
- // because the function makes no use of it.
- __ bind(&slow);
- __ InvokeFunction(function, arguments(), JUMP_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
-
- __ bind(&miss);
- // ecx: function name.
- GenerateMissBranch();
-
- // Return the generated code.
- return cell.is_null() ? GetCode(function) : GetCode(Code::NORMAL, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileFastApiCall(
- const CallOptimization& optimization,
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
- Handle<JSFunction> function,
- Handle<String> name) {
- ASSERT(optimization.is_simple_api_call());
- // Bail out if object is a global object as we don't want to
- // repatch it to global receiver.
- if (object->IsGlobalObject()) return Handle<Code>::null();
- if (!cell.is_null()) return Handle<Code>::null();
- if (!object->IsJSObject()) return Handle<Code>::null();
- int depth = optimization.GetPrototypeDepthOfExpectedType(
- Handle<JSObject>::cast(object), holder);
- if (depth == kInvalidProtoDepth) return Handle<Code>::null();
-
- Label miss, miss_before_stack_reserved;
-
- GenerateNameCheck(name, &miss_before_stack_reserved);
-
- // Get the receiver from the stack.
- const int argc = arguments().immediate();
- __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(edx, &miss_before_stack_reserved);
-
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->call_const(), 1);
- __ IncrementCounter(counters->call_const_fast_api(), 1);
-
- // Allocate space for v8::Arguments implicit values. Must be initialized
- // before calling any runtime function.
- __ sub(esp, Immediate(kFastApiCallArguments * kPointerSize));
-
- // Check that the maps haven't changed and find a Holder as a side effect.
- CheckPrototypes(Handle<JSObject>::cast(object), edx, holder, ebx, eax, edi,
- name, depth, &miss);
-
- // Move the return address on top of the stack.
- __ mov(eax, Operand(esp, 4 * kPointerSize));
- __ mov(Operand(esp, 0 * kPointerSize), eax);
-
- // esp[2 * kPointerSize] is uninitialized, esp[3 * kPointerSize] contains
- // duplicate of return address and will be overwritten.
- GenerateFastApiCall(masm(), optimization, argc);
-
- __ bind(&miss);
- __ add(esp, Immediate(kFastApiCallArguments * kPointerSize));
-
- __ bind(&miss_before_stack_reserved);
- GenerateMissBranch();
-
- // Return the generated code.
- return GetCode(function);
-}
-
-
-void CallStubCompiler::CompileHandlerFrontend(Handle<Object> object,
- Handle<JSObject> holder,
- Handle<String> name,
- CheckType check,
- Label* success) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- ...
- // -- esp[(argc + 1) * 4] : receiver
- // -----------------------------------
- Label miss;
- GenerateNameCheck(name, &miss);
-
- // Get the receiver from the stack.
- const int argc = arguments().immediate();
- __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
-
- // Check that the receiver isn't a smi.
- if (check != NUMBER_CHECK) {
- __ JumpIfSmi(edx, &miss);
- }
-
- // Make sure that it's okay not to patch the on stack receiver
- // unless we're doing a receiver map check.
- ASSERT(!object->IsGlobalObject() || check == RECEIVER_MAP_CHECK);
- switch (check) {
- case RECEIVER_MAP_CHECK:
- __ IncrementCounter(isolate()->counters()->call_const(), 1);
-
- // Check that the maps haven't changed.
- CheckPrototypes(Handle<JSObject>::cast(object), edx, holder, ebx, eax,
- edi, name, &miss);
-
- // Patch the receiver on the stack with the global proxy if
- // necessary.
- if (object->IsGlobalObject()) {
- __ mov(edx, FieldOperand(edx, GlobalObject::kGlobalReceiverOffset));
- __ mov(Operand(esp, (argc + 1) * kPointerSize), edx);
- }
- break;
-
- case STRING_CHECK:
- // Check that the object is a string.
- __ CmpObjectType(edx, FIRST_NONSTRING_TYPE, eax);
- __ j(above_equal, &miss);
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::STRING_FUNCTION_INDEX, eax, &miss);
- CheckPrototypes(
- Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
- eax, holder, ebx, edx, edi, name, &miss);
- break;
-
- case SYMBOL_CHECK:
- // Check that the object is a symbol.
- __ CmpObjectType(edx, SYMBOL_TYPE, eax);
- __ j(not_equal, &miss);
- break;
-
- case NUMBER_CHECK: {
- Label fast;
- // Check that the object is a smi or a heap number.
- __ JumpIfSmi(edx, &fast);
- __ CmpObjectType(edx, HEAP_NUMBER_TYPE, eax);
- __ j(not_equal, &miss);
- __ bind(&fast);
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::NUMBER_FUNCTION_INDEX, eax, &miss);
- CheckPrototypes(
- Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
- eax, holder, ebx, edx, edi, name, &miss);
- break;
- }
- case BOOLEAN_CHECK: {
- Label fast;
- // Check that the object is a boolean.
- __ cmp(edx, factory()->true_value());
- __ j(equal, &fast);
- __ cmp(edx, factory()->false_value());
- __ j(not_equal, &miss);
- __ bind(&fast);
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::BOOLEAN_FUNCTION_INDEX, eax, &miss);
- CheckPrototypes(
- Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
- eax, holder, ebx, edx, edi, name, &miss);
- break;
- }
- }
-
- __ jmp(success);
-
- // Handle call cache miss.
- __ bind(&miss);
- GenerateMissBranch();
-}
-
-
-void CallStubCompiler::CompileHandlerBackend(Handle<JSFunction> function) {
- CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
- ? CALL_AS_FUNCTION
- : CALL_AS_METHOD;
- __ InvokeFunction(function, arguments(), JUMP_FUNCTION,
- NullCallWrapper(), call_kind);
-}
-
-
-Handle<Code> CallStubCompiler::CompileCallConstant(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<String> name,
- CheckType check,
- Handle<JSFunction> function) {
-
- if (HasCustomCallGenerator(function)) {
- Handle<Code> code = CompileCustomCall(object, holder,
- Handle<JSGlobalPropertyCell>::null(),
- function, name);
- // A null handle means bail out to the regular compiler code below.
- if (!code.is_null()) return code;
- }
-
- Label success;
-
- CompileHandlerFrontend(object, holder, name, check, &success);
- __ bind(&success);
- CompileHandlerBackend(function);
-
- // Return the generated code.
- return GetCode(function);
-}
-
-
-Handle<Code> CallStubCompiler::CompileCallInterceptor(Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- ...
- // -- esp[(argc + 1) * 4] : receiver
- // -----------------------------------
- Label miss;
-
- GenerateNameCheck(name, &miss);
-
- // Get the number of arguments.
- const int argc = arguments().immediate();
-
- LookupResult lookup(isolate());
- LookupPostInterceptor(holder, name, &lookup);
-
- // Get the receiver from the stack.
- __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
-
- CallInterceptorCompiler compiler(this, arguments(), ecx, extra_state_);
- compiler.Compile(masm(), object, holder, name, &lookup, edx, ebx, edi, eax,
- &miss);
-
- // Restore receiver.
- __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
-
- // Check that the function really is a function.
- __ JumpIfSmi(eax, &miss);
- __ CmpObjectType(eax, JS_FUNCTION_TYPE, ebx);
- __ j(not_equal, &miss);
-
- // Patch the receiver on the stack with the global proxy if
- // necessary.
- if (object->IsGlobalObject()) {
- __ mov(edx, FieldOperand(edx, GlobalObject::kGlobalReceiverOffset));
- __ mov(Operand(esp, (argc + 1) * kPointerSize), edx);
- }
-
- // Invoke the function.
- __ mov(edi, eax);
- CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
- ? CALL_AS_FUNCTION
- : CALL_AS_METHOD;
- __ InvokeFunction(edi, arguments(), JUMP_FUNCTION,
- NullCallWrapper(), call_kind);
-
- // Handle load cache miss.
- __ bind(&miss);
- GenerateMissBranch();
-
- // Return the generated code.
- return GetCode(Code::INTERCEPTOR, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileCallGlobal(
- Handle<JSObject> object,
- Handle<GlobalObject> holder,
- Handle<JSGlobalPropertyCell> cell,
- Handle<JSFunction> function,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- esp[0] : return address
- // -- esp[(argc - n) * 4] : arg[n] (zero-based)
- // -- ...
- // -- esp[(argc + 1) * 4] : receiver
- // -----------------------------------
-
- if (HasCustomCallGenerator(function)) {
- Handle<Code> code = CompileCustomCall(object, holder, cell, function, name);
- // A null handle means bail out to the regular compiler code below.
- if (!code.is_null()) return code;
- }
-
- Label miss;
- GenerateNameCheck(name, &miss);
-
- // Get the number of arguments.
- const int argc = arguments().immediate();
- GenerateGlobalReceiverCheck(object, holder, name, &miss);
- GenerateLoadFunctionFromCell(cell, function, &miss);
-
- // Patch the receiver on the stack with the global proxy.
- if (object->IsGlobalObject()) {
- __ mov(edx, FieldOperand(edx, GlobalObject::kGlobalReceiverOffset));
- __ mov(Operand(esp, (argc + 1) * kPointerSize), edx);
- }
-
- // Set up the context (function already in edi).
- __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
-
- // Jump to the cached code (tail call).
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->call_global_inline(), 1);
- ParameterCount expected(function->shared()->formal_parameter_count());
- CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
- ? CALL_AS_FUNCTION
- : CALL_AS_METHOD;
- // We call indirectly through the code field in the function to
- // allow recompilation to take effect without changing any of the
- // call sites.
- __ InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
- expected, arguments(), JUMP_FUNCTION,
- NullCallWrapper(), call_kind);
-
- // Handle call cache miss.
- __ bind(&miss);
- __ IncrementCounter(counters->call_global_inline_miss(), 1);
- GenerateMissBranch();
-
- // Return the generated code.
- return GetCode(Code::NORMAL, name);
-}
-
-
-Handle<Code> StoreStubCompiler::CompileStoreField(Handle<JSObject> object,
- int index,
- Handle<Map> transition,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- eax : value
- // -- ecx : name
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label miss;
-
- // Generate store field code. Trashes the name register.
- GenerateStoreField(masm(),
- object,
- index,
- transition,
- name,
- edx, ecx, ebx, edi,
- &miss);
- // Handle store cache miss.
- __ bind(&miss);
- __ mov(ecx, Immediate(name)); // restore name
- Handle<Code> ic = isolate()->builtins()->StoreIC_Miss();
- __ jmp(ic, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode(transition.is_null()
- ? Code::FIELD
- : Code::MAP_TRANSITION, name);
-}
-
-
-Handle<Code> StoreStubCompiler::CompileStoreCallback(
- Handle<String> name,
- Handle<JSObject> receiver,
- Handle<JSObject> holder,
- Handle<ExecutableAccessorInfo> callback) {
- // ----------- S t a t e -------------
- // -- eax : value
- // -- ecx : name
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label miss;
- // Check that the maps haven't changed, preserving the value register.
- __ push(eax);
- __ JumpIfSmi(edx, &miss);
- CheckPrototypes(receiver, edx, holder, ebx, eax, edi, name, &miss);
- __ pop(eax); // restore value
-
- // Stub never generated for non-global objects that require access checks.
- ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
-
- __ pop(ebx); // remove the return address
- __ push(edx); // receiver
- __ push(Immediate(callback)); // callback info
- __ push(ecx); // name
- __ push(eax); // value
- __ push(ebx); // restore return address
-
- // Do tail-call to the runtime system.
- ExternalReference store_callback_property =
- ExternalReference(IC_Utility(IC::kStoreCallbackProperty), isolate());
- __ TailCallExternalReference(store_callback_property, 4, 1);
-
- // Handle store cache miss.
- __ bind(&miss);
- __ pop(eax);
- Handle<Code> ic = isolate()->builtins()->StoreIC_Miss();
- __ jmp(ic, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode(Code::CALLBACKS, name);
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm)
-
-
-void StoreStubCompiler::GenerateStoreViaSetter(
- MacroAssembler* masm,
- Handle<JSFunction> setter) {
- // ----------- S t a t e -------------
- // -- eax : value
- // -- ecx : name
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Save value register, so we can restore it later.
- __ push(eax);
-
- if (!setter.is_null()) {
- // Call the JavaScript setter with receiver and value on the stack.
- __ push(edx);
- __ push(eax);
- ParameterCount actual(1);
- __ InvokeFunction(setter, actual, CALL_FUNCTION, NullCallWrapper(),
- CALL_AS_METHOD);
- } else {
- // If we generate a global code snippet for deoptimization only, remember
- // the place to continue after deoptimization.
- masm->isolate()->heap()->SetSetterStubDeoptPCOffset(masm->pc_offset());
- }
-
- // We have to return the passed value, not the return value of the setter.
- __ pop(eax);
-
- // Restore context register.
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- }
- __ ret(0);
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm())
-
-
-Handle<Code> StoreStubCompiler::CompileStoreViaSetter(
- Handle<String> name,
- Handle<JSObject> receiver,
- Handle<JSObject> holder,
- Handle<JSFunction> setter) {
- // ----------- S t a t e -------------
- // -- eax : value
- // -- ecx : name
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label miss;
-
- // Check that the maps haven't changed, preserving the name register.
- __ push(ecx);
- __ JumpIfSmi(edx, &miss);
- CheckPrototypes(receiver, edx, holder, ebx, ecx, edi, name, &miss);
- __ pop(ecx);
-
- GenerateStoreViaSetter(masm(), setter);
-
- __ bind(&miss);
- __ pop(ecx);
- Handle<Code> ic = isolate()->builtins()->StoreIC_Miss();
- __ jmp(ic, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode(Code::CALLBACKS, name);
-}
-
-
-Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
- Handle<JSObject> receiver,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- eax : value
- // -- ecx : name
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label miss;
-
- // Check that the map of the object hasn't changed.
- __ CheckMap(edx, Handle<Map>(receiver->map()),
- &miss, DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
-
- // Perform global security token check if needed.
- if (receiver->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(edx, ebx, &miss);
- }
-
- // Stub never generated for non-global objects that require access
- // checks.
- ASSERT(receiver->IsJSGlobalProxy() || !receiver->IsAccessCheckNeeded());
-
- __ pop(ebx); // remove the return address
- __ push(edx); // receiver
- __ push(ecx); // name
- __ push(eax); // value
- __ push(Immediate(Smi::FromInt(strict_mode_)));
- __ push(ebx); // restore return address
-
- // Do tail-call to the runtime system.
- ExternalReference store_ic_property =
- ExternalReference(IC_Utility(IC::kStoreInterceptorProperty), isolate());
- __ TailCallExternalReference(store_ic_property, 4, 1);
-
- // Handle store cache miss.
- __ bind(&miss);
- Handle<Code> ic = isolate()->builtins()->StoreIC_Miss();
- __ jmp(ic, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode(Code::INTERCEPTOR, name);
-}
-
-
-Handle<Code> StoreStubCompiler::CompileStoreGlobal(
- Handle<GlobalObject> object,
- Handle<JSGlobalPropertyCell> cell,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- eax : value
- // -- ecx : name
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label miss;
-
- // Check that the map of the global has not changed.
- __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
- Immediate(Handle<Map>(object->map())));
- __ j(not_equal, &miss);
-
- // Compute the cell operand to use.
- __ mov(ebx, Immediate(cell));
- Operand cell_operand = FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset);
-
- // Check that the value in the cell is not the hole. If it is, this
- // cell could have been deleted and reintroducing the global needs
- // to update the property details in the property dictionary of the
- // global object. We bail out to the runtime system to do that.
- __ cmp(cell_operand, factory()->the_hole_value());
- __ j(equal, &miss);
-
- // Store the value in the cell.
- __ mov(cell_operand, eax);
- // No write barrier here, because cells are always rescanned.
-
- // Return the value (register eax).
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->named_store_global_inline(), 1);
- __ ret(0);
-
- // Handle store cache miss.
- __ bind(&miss);
- __ IncrementCounter(counters->named_store_global_inline_miss(), 1);
- Handle<Code> ic = isolate()->builtins()->StoreIC_Miss();
- __ jmp(ic, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode(Code::NORMAL, name);
-}
-
-
-Handle<Code> KeyedStoreStubCompiler::CompileStoreField(Handle<JSObject> object,
- int index,
- Handle<Map> transition,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- eax : value
- // -- ecx : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label miss;
-
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->keyed_store_field(), 1);
-
- // Check that the name has not changed.
- __ cmp(ecx, Immediate(name));
- __ j(not_equal, &miss);
-
- // Generate store field code. Trashes the name register.
- GenerateStoreField(masm(),
- object,
- index,
- transition,
- name,
- edx, ecx, ebx, edi,
- &miss);
-
- // Handle store cache miss.
- __ bind(&miss);
- __ DecrementCounter(counters->keyed_store_field(), 1);
- Handle<Code> ic = isolate()->builtins()->KeyedStoreIC_Miss();
- __ jmp(ic, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode(transition.is_null()
- ? Code::FIELD
- : Code::MAP_TRANSITION, name);
-}
-
-
-Handle<Code> KeyedStoreStubCompiler::CompileStoreElement(
- Handle<Map> receiver_map) {
- // ----------- S t a t e -------------
- // -- eax : value
- // -- ecx : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- ElementsKind elements_kind = receiver_map->elements_kind();
- bool is_jsarray = receiver_map->instance_type() == JS_ARRAY_TYPE;
- Handle<Code> stub =
- KeyedStoreElementStub(is_jsarray,
- elements_kind,
- grow_mode_).GetCode(isolate());
-
- __ DispatchMap(edx, receiver_map, stub, DO_SMI_CHECK);
-
- Handle<Code> ic = isolate()->builtins()->KeyedStoreIC_Miss();
- __ jmp(ic, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode(Code::NORMAL, factory()->empty_string());
-}
-
-
-Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic(
- MapHandleList* receiver_maps,
- CodeHandleList* handler_stubs,
- MapHandleList* transitioned_maps) {
- // ----------- S t a t e -------------
- // -- eax : value
- // -- ecx : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label miss;
- __ JumpIfSmi(edx, &miss, Label::kNear);
- __ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
- // ebx: receiver->map().
- for (int i = 0; i < receiver_maps->length(); ++i) {
- __ cmp(edi, receiver_maps->at(i));
- if (transitioned_maps->at(i).is_null()) {
- __ j(equal, handler_stubs->at(i));
- } else {
- Label next_map;
- __ j(not_equal, &next_map, Label::kNear);
- __ mov(ebx, Immediate(transitioned_maps->at(i)));
- __ jmp(handler_stubs->at(i), RelocInfo::CODE_TARGET);
- __ bind(&next_map);
- }
- }
- __ bind(&miss);
- Handle<Code> miss_ic = isolate()->builtins()->KeyedStoreIC_Miss();
- __ jmp(miss_ic, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode(Code::NORMAL, factory()->empty_string(), POLYMORPHIC);
-}
-
-
-Handle<Code> LoadStubCompiler::CompileLoadNonexistent(
- Handle<JSObject> object,
- Handle<JSObject> last,
- Handle<String> name,
- Handle<GlobalObject> global) {
- Label success;
-
- NonexistentHandlerFrontend(object, last, name, &success, global);
-
- __ bind(&success);
- // Return undefined if maps of the full prototype chain are still the
- // same and no global property with this name contains a value.
- __ mov(eax, isolate()->factory()->undefined_value());
- __ ret(0);
-
- // Return the generated code.
- return GetCode(Code::HANDLER_FRAGMENT, Code::NONEXISTENT, name);
-}
-
-
-Register* LoadStubCompiler::registers() {
- // receiver, name, scratch1, scratch2, scratch3, scratch4.
- static Register registers[] = { edx, ecx, ebx, eax, edi, no_reg };
- return registers;
-}
-
-
-Register* KeyedLoadStubCompiler::registers() {
- // receiver, name, scratch1, scratch2, scratch3, scratch4.
- static Register registers[] = { edx, ecx, ebx, eax, edi, no_reg };
- return registers;
-}
-
-
-void KeyedLoadStubCompiler::GenerateNameCheck(Handle<String> name,
- Register name_reg,
- Label* miss) {
- __ cmp(name_reg, Immediate(name));
- __ j(not_equal, miss);
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm)
-
-
-void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
- Handle<JSFunction> getter) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- if (!getter.is_null()) {
- // Call the JavaScript getter with the receiver on the stack.
- __ push(edx);
- ParameterCount actual(0);
- __ InvokeFunction(getter, actual, CALL_FUNCTION, NullCallWrapper(),
- CALL_AS_METHOD);
- } else {
- // If we generate a global code snippet for deoptimization only, remember
- // the place to continue after deoptimization.
- masm->isolate()->heap()->SetGetterStubDeoptPCOffset(masm->pc_offset());
- }
-
- // Restore context register.
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- }
- __ ret(0);
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm())
-
-
-Handle<Code> LoadStubCompiler::CompileLoadGlobal(
- Handle<JSObject> object,
- Handle<GlobalObject> global,
- Handle<JSGlobalPropertyCell> cell,
- Handle<String> name,
- bool is_dont_delete) {
- Label success, miss;
-
- __ CheckMap(receiver(), Handle<Map>(object->map()), &miss, DO_SMI_CHECK);
- HandlerFrontendHeader(
- object, receiver(), Handle<JSObject>::cast(global), name, &miss);
- // Get the value from the cell.
- if (Serializer::enabled()) {
- __ mov(eax, Immediate(cell));
- __ mov(eax, FieldOperand(eax, JSGlobalPropertyCell::kValueOffset));
- } else {
- __ mov(eax, Operand::Cell(cell));
- }
-
- // Check for deleted property if property can actually be deleted.
- if (!is_dont_delete) {
- __ cmp(eax, factory()->the_hole_value());
- __ j(equal, &miss);
- } else if (FLAG_debug_code) {
- __ cmp(eax, factory()->the_hole_value());
- __ Check(not_equal, "DontDelete cells can't contain the hole");
- }
-
- HandlerFrontendFooter(&success, &miss);
- __ bind(&success);
-
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->named_load_global_stub(), 1);
- // The code above already loads the result into the return register.
- __ ret(0);
-
- // Return the generated code.
- return GetCode(Code::IC_FRAGMENT, Code::NORMAL, name);
-}
-
-
-Handle<Code> KeyedLoadStubCompiler::CompileLoadElement(
- Handle<Map> receiver_map) {
- // ----------- S t a t e -------------
- // -- ecx : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
-
- ElementsKind elements_kind = receiver_map->elements_kind();
- if (receiver_map->has_fast_elements() ||
- receiver_map->has_external_array_elements()) {
- Handle<Code> stub = KeyedLoadFastElementStub(
- receiver_map->instance_type() == JS_ARRAY_TYPE,
- elements_kind).GetCode(isolate());
- __ DispatchMap(edx, receiver_map, stub, DO_SMI_CHECK);
- } else {
- Handle<Code> stub =
- KeyedLoadDictionaryElementStub().GetCode(isolate());
- __ DispatchMap(edx, receiver_map, stub, DO_SMI_CHECK);
- }
-
- GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
-
- // Return the generated code.
- return GetCode(Code::IC_FRAGMENT, Code::NORMAL, factory()->empty_string());
-}
-
-
-Handle<Code> BaseLoadStubCompiler::CompilePolymorphicIC(
- MapHandleList* receiver_maps,
- CodeHandleList* handlers,
- Handle<String> name,
- Code::StubType type,
- IcCheckType check) {
- Label miss;
-
- if (check == PROPERTY) {
- GenerateNameCheck(name, this->name(), &miss);
- }
-
- __ JumpIfSmi(receiver(), &miss);
- Register map_reg = scratch1();
- __ mov(map_reg, FieldOperand(receiver(), HeapObject::kMapOffset));
- int receiver_count = receiver_maps->length();
- for (int current = 0; current < receiver_count; ++current) {
- __ cmp(map_reg, receiver_maps->at(current));
- __ j(equal, handlers->at(current));
- }
-
- __ bind(&miss);
- GenerateLoadMiss(masm(), kind());
-
- // Return the generated code.
- InlineCacheState state =
- receiver_maps->length() > 1 ? POLYMORPHIC : MONOMORPHIC;
- return GetCode(Code::IC_FRAGMENT, type, name, state);
-}
-
-
-// Specialized stub for constructing objects from functions which only have only
-// simple assignments of the form this.x = ...; in their body.
-Handle<Code> ConstructStubCompiler::CompileConstructStub(
- Handle<JSFunction> function) {
- // ----------- S t a t e -------------
- // -- eax : argc
- // -- edi : constructor
- // -- esp[0] : return address
- // -- esp[4] : last argument
- // -----------------------------------
- Label generic_stub_call;
-#ifdef ENABLE_DEBUGGER_SUPPORT
- // Check to see whether there are any break points in the function code. If
- // there are jump to the generic constructor stub which calls the actual
- // code for the function thereby hitting the break points.
- __ mov(ebx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ mov(ebx, FieldOperand(ebx, SharedFunctionInfo::kDebugInfoOffset));
- __ cmp(ebx, factory()->undefined_value());
- __ j(not_equal, &generic_stub_call);
-#endif
-
- // Load the initial map and verify that it is in fact a map.
- // edi: constructor
- __ mov(ebx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
- // Will both indicate a NULL and a Smi.
- __ JumpIfSmi(ebx, &generic_stub_call);
- __ CmpObjectType(ebx, MAP_TYPE, ecx);
- __ j(not_equal, &generic_stub_call);
-
-#ifdef DEBUG
- // Cannot construct functions this way.
- // ebx: initial map
- __ CmpInstanceType(ebx, JS_FUNCTION_TYPE);
- __ Check(not_equal, "Function constructed by construct stub.");
-#endif
-
- // Now allocate the JSObject on the heap by moving the new space allocation
- // top forward.
- // ebx: initial map
- ASSERT(function->has_initial_map());
- int instance_size = function->initial_map()->instance_size();
-#ifdef DEBUG
- __ movzx_b(ecx, FieldOperand(ebx, Map::kInstanceSizeOffset));
- __ shl(ecx, kPointerSizeLog2);
- __ cmp(ecx, Immediate(instance_size));
- __ Check(equal, "Instance size of initial map changed.");
-#endif
- __ AllocateInNewSpace(instance_size, edx, ecx, no_reg,
- &generic_stub_call, NO_ALLOCATION_FLAGS);
-
- // Allocated the JSObject, now initialize the fields and add the heap tag.
- // ebx: initial map
- // edx: JSObject (untagged)
- __ mov(Operand(edx, JSObject::kMapOffset), ebx);
- __ mov(ebx, factory()->empty_fixed_array());
- __ mov(Operand(edx, JSObject::kPropertiesOffset), ebx);
- __ mov(Operand(edx, JSObject::kElementsOffset), ebx);
-
- // Push the allocated object to the stack. This is the object that will be
- // returned (after it is tagged).
- __ push(edx);
-
- // eax: argc
- // edx: JSObject (untagged)
- // Load the address of the first in-object property into edx.
- __ lea(edx, Operand(edx, JSObject::kHeaderSize));
- // Calculate the location of the first argument. The stack contains the
- // allocated object and the return address on top of the argc arguments.
- __ lea(ecx, Operand(esp, eax, times_4, 1 * kPointerSize));
-
- // Use edi for holding undefined which is used in several places below.
- __ mov(edi, factory()->undefined_value());
-
- // eax: argc
- // ecx: first argument
- // edx: first in-object property of the JSObject
- // edi: undefined
- // Fill the initialized properties with a constant value or a passed argument
- // depending on the this.x = ...; assignment in the function.
- Handle<SharedFunctionInfo> shared(function->shared());
- for (int i = 0; i < shared->this_property_assignments_count(); i++) {
- if (shared->IsThisPropertyAssignmentArgument(i)) {
- // Check if the argument assigned to the property is actually passed.
- // If argument is not passed the property is set to undefined,
- // otherwise find it on the stack.
- int arg_number = shared->GetThisPropertyAssignmentArgument(i);
- __ mov(ebx, edi);
- __ cmp(eax, arg_number);
- if (CpuFeatures::IsSupported(CMOV)) {
- CpuFeatures::Scope use_cmov(CMOV);
- __ cmov(above, ebx, Operand(ecx, arg_number * -kPointerSize));
- } else {
- Label not_passed;
- __ j(below_equal, &not_passed);
- __ mov(ebx, Operand(ecx, arg_number * -kPointerSize));
- __ bind(&not_passed);
- }
- // Store value in the property.
- __ mov(Operand(edx, i * kPointerSize), ebx);
- } else {
- // Set the property to the constant value.
- Handle<Object> constant(shared->GetThisPropertyAssignmentConstant(i),
- isolate());
- __ mov(Operand(edx, i * kPointerSize), Immediate(constant));
- }
- }
-
- // Fill the unused in-object property fields with undefined.
- for (int i = shared->this_property_assignments_count();
- i < function->initial_map()->inobject_properties();
- i++) {
- __ mov(Operand(edx, i * kPointerSize), edi);
- }
-
- // Move argc to ebx and retrieve and tag the JSObject to return.
- __ mov(ebx, eax);
- __ pop(eax);
- __ or_(eax, Immediate(kHeapObjectTag));
-
- // Remove caller arguments and receiver from the stack and return.
- __ pop(ecx);
- __ lea(esp, Operand(esp, ebx, times_pointer_size, 1 * kPointerSize));
- __ push(ecx);
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->constructed_objects(), 1);
- __ IncrementCounter(counters->constructed_objects_stub(), 1);
- __ ret(0);
-
- // Jump to the generic stub in case the specialized code cannot handle the
- // construction.
- __ bind(&generic_stub_call);
- Handle<Code> code = isolate()->builtins()->JSConstructStubGeneric();
- __ jmp(code, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode();
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm)
-
-
-void KeyedLoadStubCompiler::GenerateLoadDictionaryElement(
- MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- ecx : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label slow, miss_force_generic;
-
- // This stub is meant to be tail-jumped to, the receiver must already
- // have been verified by the caller to not be a smi.
- __ JumpIfNotSmi(ecx, &miss_force_generic);
- __ mov(ebx, ecx);
- __ SmiUntag(ebx);
- __ mov(eax, FieldOperand(edx, JSObject::kElementsOffset));
-
- // Push receiver on the stack to free up a register for the dictionary
- // probing.
- __ push(edx);
- __ LoadFromNumberDictionary(&slow, eax, ecx, ebx, edx, edi, eax);
- // Pop receiver before returning.
- __ pop(edx);
- __ ret(0);
-
- __ bind(&slow);
- __ pop(edx);
-
- // ----------- S t a t e -------------
- // -- ecx : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
-
- Handle<Code> slow_ic =
- masm->isolate()->builtins()->KeyedLoadIC_Slow();
- __ jmp(slow_ic, RelocInfo::CODE_TARGET);
-
- __ bind(&miss_force_generic);
- // ----------- S t a t e -------------
- // -- ecx : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
-
- Handle<Code> miss_force_generic_ic =
- masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
- __ jmp(miss_force_generic_ic, RelocInfo::CODE_TARGET);
-}
-
-
-static void GenerateSmiKeyCheck(MacroAssembler* masm,
- Register key,
- Register scratch,
- XMMRegister xmm_scratch0,
- XMMRegister xmm_scratch1,
- Label* fail) {
- // Check that key is a smi and if SSE2 is available a heap number
- // containing a smi and branch if the check fails.
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope use_sse2(SSE2);
- Label key_ok;
- __ JumpIfSmi(key, &key_ok);
- __ cmp(FieldOperand(key, HeapObject::kMapOffset),
- Immediate(Handle<Map>(masm->isolate()->heap()->heap_number_map())));
- __ j(not_equal, fail);
- __ movdbl(xmm_scratch0, FieldOperand(key, HeapNumber::kValueOffset));
- __ cvttsd2si(scratch, Operand(xmm_scratch0));
- __ cvtsi2sd(xmm_scratch1, scratch);
- __ ucomisd(xmm_scratch1, xmm_scratch0);
- __ j(not_equal, fail);
- __ j(parity_even, fail); // NaN.
- // Check if the key fits in the smi range.
- __ cmp(scratch, 0xc0000000);
- __ j(sign, fail);
- __ SmiTag(scratch);
- __ mov(key, scratch);
- __ bind(&key_ok);
- } else {
- __ JumpIfNotSmi(key, fail);
- }
-}
-
-
-void KeyedStoreStubCompiler::GenerateStoreExternalArray(
- MacroAssembler* masm,
- ElementsKind elements_kind) {
- // ----------- S t a t e -------------
- // -- eax : value
- // -- ecx : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label miss_force_generic, slow, check_heap_number;
-
- // This stub is meant to be tail-jumped to, the receiver must already
- // have been verified by the caller to not be a smi.
-
- // Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, ecx, ebx, xmm0, xmm1, &miss_force_generic);
-
- // Check that the index is in range.
- __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
- __ cmp(ecx, FieldOperand(edi, ExternalArray::kLengthOffset));
- // Unsigned comparison catches both negative and too-large values.
- __ j(above_equal, &slow);
-
- // Handle both smis and HeapNumbers in the fast path. Go to the
- // runtime for all other kinds of values.
- // eax: value
- // edx: receiver
- // ecx: key
- // edi: elements array
- if (elements_kind == EXTERNAL_PIXEL_ELEMENTS) {
- __ JumpIfNotSmi(eax, &slow);
- } else {
- __ JumpIfNotSmi(eax, &check_heap_number);
- }
-
- // smi case
- __ mov(ebx, eax); // Preserve the value in eax as the return value.
- __ SmiUntag(ebx);
- __ mov(edi, FieldOperand(edi, ExternalArray::kExternalPointerOffset));
- // edi: base pointer of external storage
- switch (elements_kind) {
- case EXTERNAL_PIXEL_ELEMENTS:
- __ ClampUint8(ebx);
- __ SmiUntag(ecx);
- __ mov_b(Operand(edi, ecx, times_1, 0), ebx);
- break;
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- __ SmiUntag(ecx);
- __ mov_b(Operand(edi, ecx, times_1, 0), ebx);
- break;
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- __ mov_w(Operand(edi, ecx, times_1, 0), ebx);
- break;
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- __ mov(Operand(edi, ecx, times_2, 0), ebx);
- break;
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
- // Need to perform int-to-float conversion.
- __ push(ebx);
- __ fild_s(Operand(esp, 0));
- __ pop(ebx);
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
- __ fstp_s(Operand(edi, ecx, times_2, 0));
- } else { // elements_kind == EXTERNAL_DOUBLE_ELEMENTS.
- __ fstp_d(Operand(edi, ecx, times_4, 0));
- }
- break;
- default:
- UNREACHABLE();
- break;
- }
- __ ret(0); // Return the original value.
-
- // TODO(danno): handle heap number -> pixel array conversion
- if (elements_kind != EXTERNAL_PIXEL_ELEMENTS) {
- __ bind(&check_heap_number);
- // eax: value
- // edx: receiver
- // ecx: key
- // edi: elements array
- __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
- Immediate(masm->isolate()->factory()->heap_number_map()));
- __ j(not_equal, &slow);
-
- // The WebGL specification leaves the behavior of storing NaN and
- // +/-Infinity into integer arrays basically undefined. For more
- // reproducible behavior, convert these to zero.
- __ mov(edi, FieldOperand(edi, ExternalArray::kExternalPointerOffset));
- // edi: base pointer of external storage
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
- __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
- __ fstp_s(Operand(edi, ecx, times_2, 0));
- __ ret(0);
- } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
- __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
- __ fstp_d(Operand(edi, ecx, times_4, 0));
- __ ret(0);
- } else {
- // Perform float-to-int conversion with truncation (round-to-zero)
- // behavior.
-
- // For the moment we make the slow call to the runtime on
- // processors that don't support SSE2. The code in IntegerConvert
- // (code-stubs-ia32.cc) is roughly what is needed here though the
- // conversion failure case does not need to be handled.
- if (CpuFeatures::IsSupported(SSE2)) {
- if ((elements_kind == EXTERNAL_INT_ELEMENTS ||
- elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) &&
- CpuFeatures::IsSupported(SSE3)) {
- CpuFeatures::Scope scope(SSE3);
- // fisttp stores values as signed integers. To represent the
- // entire range of int and unsigned int arrays, store as a
- // 64-bit int and discard the high 32 bits.
- __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
- __ sub(esp, Immediate(2 * kPointerSize));
- __ fisttp_d(Operand(esp, 0));
-
- // If conversion failed (NaN, infinity, or a number outside
- // signed int64 range), the result is 0x8000000000000000, and
- // we must handle this case in the runtime.
- Label ok;
- __ cmp(Operand(esp, kPointerSize), Immediate(0x80000000u));
- __ j(not_equal, &ok);
- __ cmp(Operand(esp, 0), Immediate(0));
- __ j(not_equal, &ok);
- __ add(esp, Immediate(2 * kPointerSize)); // Restore the stack.
- __ jmp(&slow);
-
- __ bind(&ok);
- __ pop(ebx);
- __ add(esp, Immediate(kPointerSize));
- __ mov(Operand(edi, ecx, times_2, 0), ebx);
- } else {
- ASSERT(CpuFeatures::IsSupported(SSE2));
- CpuFeatures::Scope scope(SSE2);
- __ cvttsd2si(ebx, FieldOperand(eax, HeapNumber::kValueOffset));
- __ cmp(ebx, 0x80000000u);
- __ j(equal, &slow);
- // ebx: untagged integer value
- switch (elements_kind) {
- case EXTERNAL_PIXEL_ELEMENTS:
- __ ClampUint8(ebx);
- // Fall through.
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- __ SmiUntag(ecx);
- __ mov_b(Operand(edi, ecx, times_1, 0), ebx);
- break;
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- __ mov_w(Operand(edi, ecx, times_1, 0), ebx);
- break;
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- __ mov(Operand(edi, ecx, times_2, 0), ebx);
- break;
- default:
- UNREACHABLE();
- break;
- }
- }
- __ ret(0); // Return original value.
- }
- }
- }
-
- // Slow case: call runtime.
- __ bind(&slow);
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->keyed_store_external_array_slow(), 1);
-
- // ----------- S t a t e -------------
- // -- eax : value
- // -- ecx : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
-
- Handle<Code> ic = masm->isolate()->builtins()->KeyedStoreIC_Slow();
- __ jmp(ic, RelocInfo::CODE_TARGET);
-
- // ----------- S t a t e -------------
- // -- eax : value
- // -- ecx : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
-
- __ bind(&miss_force_generic);
- Handle<Code> miss_ic =
- masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
- __ jmp(miss_ic, RelocInfo::CODE_TARGET);
-}
-
-
-void KeyedStoreStubCompiler::GenerateStoreFastElement(
- MacroAssembler* masm,
- bool is_js_array,
- ElementsKind elements_kind,
- KeyedAccessGrowMode grow_mode) {
- // ----------- S t a t e -------------
- // -- eax : value
- // -- ecx : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label miss_force_generic, grow, slow, transition_elements_kind;
- Label check_capacity, prepare_slow, finish_store, commit_backing_store;
-
- // This stub is meant to be tail-jumped to, the receiver must already
- // have been verified by the caller to not be a smi.
-
- // Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, ecx, ebx, xmm0, xmm1, &miss_force_generic);
-
- if (IsFastSmiElementsKind(elements_kind)) {
- __ JumpIfNotSmi(eax, &transition_elements_kind);
- }
-
- // Get the elements array and make sure it is a fast element array, not 'cow'.
- __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
- if (is_js_array) {
- // Check that the key is within bounds.
- __ cmp(ecx, FieldOperand(edx, JSArray::kLengthOffset)); // smis.
- if (grow_mode == ALLOW_JSARRAY_GROWTH) {
- __ j(above_equal, &grow);
- } else {
- __ j(above_equal, &miss_force_generic);
- }
- } else {
- // Check that the key is within bounds.
- __ cmp(ecx, FieldOperand(edi, FixedArray::kLengthOffset)); // smis.
- __ j(above_equal, &miss_force_generic);
- }
-
- __ cmp(FieldOperand(edi, HeapObject::kMapOffset),
- Immediate(masm->isolate()->factory()->fixed_array_map()));
- __ j(not_equal, &miss_force_generic);
-
- __ bind(&finish_store);
- if (IsFastSmiElementsKind(elements_kind)) {
- // ecx is a smi, use times_half_pointer_size instead of
- // times_pointer_size
- __ mov(FieldOperand(edi,
- ecx,
- times_half_pointer_size,
- FixedArray::kHeaderSize), eax);
- } else {
- ASSERT(IsFastObjectElementsKind(elements_kind));
- // Do the store and update the write barrier.
- // ecx is a smi, use times_half_pointer_size instead of
- // times_pointer_size
- __ lea(ecx, FieldOperand(edi,
- ecx,
- times_half_pointer_size,
- FixedArray::kHeaderSize));
- __ mov(Operand(ecx, 0), eax);
- // Make sure to preserve the value in register eax.
- __ mov(ebx, eax);
- __ RecordWrite(edi, ecx, ebx, kDontSaveFPRegs);
- }
-
- // Done.
- __ ret(0);
-
- // Handle store cache miss, replacing the ic with the generic stub.
- __ bind(&miss_force_generic);
- Handle<Code> ic_force_generic =
- masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
- __ jmp(ic_force_generic, RelocInfo::CODE_TARGET);
-
- // Handle transition to other elements kinds without using the generic stub.
- __ bind(&transition_elements_kind);
- Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss();
- __ jmp(ic_miss, RelocInfo::CODE_TARGET);
-
- if (is_js_array && grow_mode == ALLOW_JSARRAY_GROWTH) {
- // Handle transition requiring the array to grow.
- __ bind(&grow);
-
- // Make sure the array is only growing by a single element, anything else
- // must be handled by the runtime. Flags are already set by previous
- // compare.
- __ j(not_equal, &miss_force_generic);
-
- // Check for the empty array, and preallocate a small backing store if
- // possible.
- __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
- __ cmp(edi, Immediate(masm->isolate()->factory()->empty_fixed_array()));
- __ j(not_equal, &check_capacity);
-
- int size = FixedArray::SizeFor(JSArray::kPreallocatedArrayElements);
- __ AllocateInNewSpace(size, edi, ebx, ecx, &prepare_slow, TAG_OBJECT);
- // Restore the key, which is known to be the array length.
-
- // eax: value
- // ecx: key
- // edx: receiver
- // edi: elements
- // Make sure that the backing store can hold additional elements.
- __ mov(FieldOperand(edi, JSObject::kMapOffset),
- Immediate(masm->isolate()->factory()->fixed_array_map()));
- __ mov(FieldOperand(edi, FixedArray::kLengthOffset),
- Immediate(Smi::FromInt(JSArray::kPreallocatedArrayElements)));
- __ mov(ebx, Immediate(masm->isolate()->factory()->the_hole_value()));
- for (int i = 1; i < JSArray::kPreallocatedArrayElements; ++i) {
- __ mov(FieldOperand(edi, FixedArray::SizeFor(i)), ebx);
- }
-
- // Store the element at index zero.
- __ mov(FieldOperand(edi, FixedArray::SizeFor(0)), eax);
-
- // Install the new backing store in the JSArray.
- __ mov(FieldOperand(edx, JSObject::kElementsOffset), edi);
- __ RecordWriteField(edx, JSObject::kElementsOffset, edi, ebx,
- kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-
- // Increment the length of the array.
- __ mov(FieldOperand(edx, JSArray::kLengthOffset),
- Immediate(Smi::FromInt(1)));
- __ ret(0);
-
- __ bind(&check_capacity);
- __ cmp(FieldOperand(edi, HeapObject::kMapOffset),
- Immediate(masm->isolate()->factory()->fixed_cow_array_map()));
- __ j(equal, &miss_force_generic);
-
- // eax: value
- // ecx: key
- // edx: receiver
- // edi: elements
- // Make sure that the backing store can hold additional elements.
- __ cmp(ecx, FieldOperand(edi, FixedArray::kLengthOffset));
- __ j(above_equal, &slow);
-
- // Grow the array and finish the store.
- __ add(FieldOperand(edx, JSArray::kLengthOffset),
- Immediate(Smi::FromInt(1)));
- __ jmp(&finish_store);
-
- __ bind(&prepare_slow);
- // Restore the key, which is known to be the array length.
- __ mov(ecx, Immediate(0));
-
- __ bind(&slow);
- Handle<Code> ic_slow = masm->isolate()->builtins()->KeyedStoreIC_Slow();
- __ jmp(ic_slow, RelocInfo::CODE_TARGET);
- }
-}
-
-
-void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
- MacroAssembler* masm,
- bool is_js_array,
- KeyedAccessGrowMode grow_mode) {
- // ----------- S t a t e -------------
- // -- eax : value
- // -- ecx : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label miss_force_generic, transition_elements_kind, grow, slow;
- Label check_capacity, prepare_slow, finish_store, commit_backing_store;
-
- // This stub is meant to be tail-jumped to, the receiver must already
- // have been verified by the caller to not be a smi.
-
- // Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, ecx, ebx, xmm0, xmm1, &miss_force_generic);
-
- // Get the elements array.
- __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
- __ AssertFastElements(edi);
-
- if (is_js_array) {
- // Check that the key is within bounds.
- __ cmp(ecx, FieldOperand(edx, JSArray::kLengthOffset)); // smis.
- if (grow_mode == ALLOW_JSARRAY_GROWTH) {
- __ j(above_equal, &grow);
- } else {
- __ j(above_equal, &miss_force_generic);
- }
- } else {
- // Check that the key is within bounds.
- __ cmp(ecx, FieldOperand(edi, FixedArray::kLengthOffset)); // smis.
- __ j(above_equal, &miss_force_generic);
- }
-
- __ bind(&finish_store);
- __ StoreNumberToDoubleElements(eax, edi, ecx, edx, xmm0,
- &transition_elements_kind, true);
- __ ret(0);
-
- // Handle store cache miss, replacing the ic with the generic stub.
- __ bind(&miss_force_generic);
- Handle<Code> ic_force_generic =
- masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
- __ jmp(ic_force_generic, RelocInfo::CODE_TARGET);
-
- // Handle transition to other elements kinds without using the generic stub.
- __ bind(&transition_elements_kind);
- Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss();
- __ jmp(ic_miss, RelocInfo::CODE_TARGET);
-
- if (is_js_array && grow_mode == ALLOW_JSARRAY_GROWTH) {
- // Handle transition requiring the array to grow.
- __ bind(&grow);
-
- // Make sure the array is only growing by a single element, anything else
- // must be handled by the runtime. Flags are already set by previous
- // compare.
- __ j(not_equal, &miss_force_generic);
-
- // Transition on values that can't be stored in a FixedDoubleArray.
- Label value_is_smi;
- __ JumpIfSmi(eax, &value_is_smi);
- __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
- Immediate(Handle<Map>(masm->isolate()->heap()->heap_number_map())));
- __ j(not_equal, &transition_elements_kind);
- __ bind(&value_is_smi);
-
- // Check for the empty array, and preallocate a small backing store if
- // possible.
- __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
- __ cmp(edi, Immediate(masm->isolate()->factory()->empty_fixed_array()));
- __ j(not_equal, &check_capacity);
-
- int size = FixedDoubleArray::SizeFor(JSArray::kPreallocatedArrayElements);
- __ AllocateInNewSpace(size, edi, ebx, ecx, &prepare_slow, TAG_OBJECT);
-
- // Restore the key, which is known to be the array length.
- __ mov(ecx, Immediate(0));
-
- // eax: value
- // ecx: key
- // edx: receiver
- // edi: elements
- // Initialize the new FixedDoubleArray.
- __ mov(FieldOperand(edi, JSObject::kMapOffset),
- Immediate(masm->isolate()->factory()->fixed_double_array_map()));
- __ mov(FieldOperand(edi, FixedDoubleArray::kLengthOffset),
- Immediate(Smi::FromInt(JSArray::kPreallocatedArrayElements)));
-
- __ StoreNumberToDoubleElements(eax, edi, ecx, ebx, xmm0,
- &transition_elements_kind, true);
-
- for (int i = 1; i < JSArray::kPreallocatedArrayElements; i++) {
- int offset = FixedDoubleArray::OffsetOfElementAt(i);
- __ mov(FieldOperand(edi, offset), Immediate(kHoleNanLower32));
- __ mov(FieldOperand(edi, offset + kPointerSize),
- Immediate(kHoleNanUpper32));
- }
-
- // Install the new backing store in the JSArray.
- __ mov(FieldOperand(edx, JSObject::kElementsOffset), edi);
- __ RecordWriteField(edx, JSObject::kElementsOffset, edi, ebx,
- kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-
- // Increment the length of the array.
- __ add(FieldOperand(edx, JSArray::kLengthOffset),
- Immediate(Smi::FromInt(1)));
- __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
- __ ret(0);
-
- __ bind(&check_capacity);
- // eax: value
- // ecx: key
- // edx: receiver
- // edi: elements
- // Make sure that the backing store can hold additional elements.
- __ cmp(ecx, FieldOperand(edi, FixedDoubleArray::kLengthOffset));
- __ j(above_equal, &slow);
-
- // Grow the array and finish the store.
- __ add(FieldOperand(edx, JSArray::kLengthOffset),
- Immediate(Smi::FromInt(1)));
- __ jmp(&finish_store);
-
- __ bind(&prepare_slow);
- // Restore the key, which is known to be the array length.
- __ mov(ecx, Immediate(0));
-
- __ bind(&slow);
- Handle<Code> ic_slow = masm->isolate()->builtins()->KeyedStoreIC_Slow();
- __ jmp(ic_slow, RelocInfo::CODE_TARGET);
- }
-}
-
-
-#undef __
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_IA32