summaryrefslogtreecommitdiffstats
path: root/src/3rdparty/v8/src/x64
diff options
context:
space:
mode:
Diffstat (limited to 'src/3rdparty/v8/src/x64')
-rw-r--r--src/3rdparty/v8/src/x64/assembler-x64-inl.h521
-rw-r--r--src/3rdparty/v8/src/x64/assembler-x64.cc3064
-rw-r--r--src/3rdparty/v8/src/x64/assembler-x64.h1678
-rw-r--r--src/3rdparty/v8/src/x64/builtins-x64.cc1884
-rw-r--r--src/3rdparty/v8/src/x64/code-stubs-x64.cc6940
-rw-r--r--src/3rdparty/v8/src/x64/code-stubs-x64.h623
-rw-r--r--src/3rdparty/v8/src/x64/codegen-x64.cc785
-rw-r--r--src/3rdparty/v8/src/x64/codegen-x64.h108
-rw-r--r--src/3rdparty/v8/src/x64/cpu-x64.cc89
-rw-r--r--src/3rdparty/v8/src/x64/debug-x64.cc354
-rw-r--r--src/3rdparty/v8/src/x64/deoptimizer-x64.cc1076
-rw-r--r--src/3rdparty/v8/src/x64/disasm-x64.cc1869
-rw-r--r--src/3rdparty/v8/src/x64/frames-x64.cc45
-rw-r--r--src/3rdparty/v8/src/x64/frames-x64.h122
-rw-r--r--src/3rdparty/v8/src/x64/full-codegen-x64.cc4594
-rw-r--r--src/3rdparty/v8/src/x64/ic-x64.cc1690
-rw-r--r--src/3rdparty/v8/src/x64/lithium-codegen-x64.cc5846
-rw-r--r--src/3rdparty/v8/src/x64/lithium-codegen-x64.h450
-rw-r--r--src/3rdparty/v8/src/x64/lithium-gap-resolver-x64.cc322
-rw-r--r--src/3rdparty/v8/src/x64/lithium-gap-resolver-x64.h74
-rw-r--r--src/3rdparty/v8/src/x64/lithium-x64.cc2438
-rw-r--r--src/3rdparty/v8/src/x64/lithium-x64.h2641
-rw-r--r--src/3rdparty/v8/src/x64/macro-assembler-x64.cc4637
-rw-r--r--src/3rdparty/v8/src/x64/macro-assembler-x64.h1508
-rw-r--r--src/3rdparty/v8/src/x64/regexp-macro-assembler-x64.cc1553
-rw-r--r--src/3rdparty/v8/src/x64/regexp-macro-assembler-x64.h304
-rw-r--r--src/3rdparty/v8/src/x64/simulator-x64.cc27
-rw-r--r--src/3rdparty/v8/src/x64/simulator-x64.h72
-rw-r--r--src/3rdparty/v8/src/x64/stub-cache-x64.cc3613
29 files changed, 0 insertions, 48927 deletions
diff --git a/src/3rdparty/v8/src/x64/assembler-x64-inl.h b/src/3rdparty/v8/src/x64/assembler-x64-inl.h
deleted file mode 100644
index 67acbf0..0000000
--- a/src/3rdparty/v8/src/x64/assembler-x64-inl.h
+++ /dev/null
@@ -1,521 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_X64_ASSEMBLER_X64_INL_H_
-#define V8_X64_ASSEMBLER_X64_INL_H_
-
-#include "x64/assembler-x64.h"
-
-#include "cpu.h"
-#include "debug.h"
-#include "v8memory.h"
-
-namespace v8 {
-namespace internal {
-
-
-// -----------------------------------------------------------------------------
-// Implementation of Assembler
-
-
-static const byte kCallOpcode = 0xE8;
-
-
-void Assembler::emitl(uint32_t x) {
- Memory::uint32_at(pc_) = x;
- pc_ += sizeof(uint32_t);
-}
-
-
-void Assembler::emitq(uint64_t x, RelocInfo::Mode rmode) {
- Memory::uint64_at(pc_) = x;
- if (!RelocInfo::IsNone(rmode)) {
- RecordRelocInfo(rmode, x);
- }
- pc_ += sizeof(uint64_t);
-}
-
-
-void Assembler::emitw(uint16_t x) {
- Memory::uint16_at(pc_) = x;
- pc_ += sizeof(uint16_t);
-}
-
-
-void Assembler::emit_code_target(Handle<Code> target,
- RelocInfo::Mode rmode,
- TypeFeedbackId ast_id) {
- ASSERT(RelocInfo::IsCodeTarget(rmode));
- if (rmode == RelocInfo::CODE_TARGET && !ast_id.IsNone()) {
- RecordRelocInfo(RelocInfo::CODE_TARGET_WITH_ID, ast_id.ToInt());
- } else {
- RecordRelocInfo(rmode);
- }
- int current = code_targets_.length();
- if (current > 0 && code_targets_.last().is_identical_to(target)) {
- // Optimization if we keep jumping to the same code target.
- emitl(current - 1);
- } else {
- code_targets_.Add(target);
- emitl(current);
- }
-}
-
-
-void Assembler::emit_rex_64(Register reg, Register rm_reg) {
- emit(0x48 | reg.high_bit() << 2 | rm_reg.high_bit());
-}
-
-
-void Assembler::emit_rex_64(XMMRegister reg, Register rm_reg) {
- emit(0x48 | (reg.code() & 0x8) >> 1 | rm_reg.code() >> 3);
-}
-
-
-void Assembler::emit_rex_64(Register reg, XMMRegister rm_reg) {
- emit(0x48 | (reg.code() & 0x8) >> 1 | rm_reg.code() >> 3);
-}
-
-
-void Assembler::emit_rex_64(Register reg, const Operand& op) {
- emit(0x48 | reg.high_bit() << 2 | op.rex_);
-}
-
-
-void Assembler::emit_rex_64(XMMRegister reg, const Operand& op) {
- emit(0x48 | (reg.code() & 0x8) >> 1 | op.rex_);
-}
-
-
-void Assembler::emit_rex_64(Register rm_reg) {
- ASSERT_EQ(rm_reg.code() & 0xf, rm_reg.code());
- emit(0x48 | rm_reg.high_bit());
-}
-
-
-void Assembler::emit_rex_64(const Operand& op) {
- emit(0x48 | op.rex_);
-}
-
-
-void Assembler::emit_rex_32(Register reg, Register rm_reg) {
- emit(0x40 | reg.high_bit() << 2 | rm_reg.high_bit());
-}
-
-
-void Assembler::emit_rex_32(Register reg, const Operand& op) {
- emit(0x40 | reg.high_bit() << 2 | op.rex_);
-}
-
-
-void Assembler::emit_rex_32(Register rm_reg) {
- emit(0x40 | rm_reg.high_bit());
-}
-
-
-void Assembler::emit_rex_32(const Operand& op) {
- emit(0x40 | op.rex_);
-}
-
-
-void Assembler::emit_optional_rex_32(Register reg, Register rm_reg) {
- byte rex_bits = reg.high_bit() << 2 | rm_reg.high_bit();
- if (rex_bits != 0) emit(0x40 | rex_bits);
-}
-
-
-void Assembler::emit_optional_rex_32(Register reg, const Operand& op) {
- byte rex_bits = reg.high_bit() << 2 | op.rex_;
- if (rex_bits != 0) emit(0x40 | rex_bits);
-}
-
-
-void Assembler::emit_optional_rex_32(XMMRegister reg, const Operand& op) {
- byte rex_bits = (reg.code() & 0x8) >> 1 | op.rex_;
- if (rex_bits != 0) emit(0x40 | rex_bits);
-}
-
-
-void Assembler::emit_optional_rex_32(XMMRegister reg, XMMRegister base) {
- byte rex_bits = (reg.code() & 0x8) >> 1 | (base.code() & 0x8) >> 3;
- if (rex_bits != 0) emit(0x40 | rex_bits);
-}
-
-
-void Assembler::emit_optional_rex_32(XMMRegister reg, Register base) {
- byte rex_bits = (reg.code() & 0x8) >> 1 | (base.code() & 0x8) >> 3;
- if (rex_bits != 0) emit(0x40 | rex_bits);
-}
-
-
-void Assembler::emit_optional_rex_32(Register reg, XMMRegister base) {
- byte rex_bits = (reg.code() & 0x8) >> 1 | (base.code() & 0x8) >> 3;
- if (rex_bits != 0) emit(0x40 | rex_bits);
-}
-
-
-void Assembler::emit_optional_rex_32(Register rm_reg) {
- if (rm_reg.high_bit()) emit(0x41);
-}
-
-
-void Assembler::emit_optional_rex_32(const Operand& op) {
- if (op.rex_ != 0) emit(0x40 | op.rex_);
-}
-
-
-Address Assembler::target_address_at(Address pc) {
- return Memory::int32_at(pc) + pc + 4;
-}
-
-
-void Assembler::set_target_address_at(Address pc, Address target) {
- Memory::int32_at(pc) = static_cast<int32_t>(target - pc - 4);
- CPU::FlushICache(pc, sizeof(int32_t));
-}
-
-
-Address Assembler::target_address_from_return_address(Address pc) {
- return pc - kCallTargetAddressOffset;
-}
-
-
-Handle<Object> Assembler::code_target_object_handle_at(Address pc) {
- return code_targets_[Memory::int32_at(pc)];
-}
-
-// -----------------------------------------------------------------------------
-// Implementation of RelocInfo
-
-// The modes possibly affected by apply must be in kApplyMask.
-void RelocInfo::apply(intptr_t delta) {
- if (IsInternalReference(rmode_)) {
- // absolute code pointer inside code object moves with the code object.
- Memory::Address_at(pc_) += static_cast<int32_t>(delta);
- CPU::FlushICache(pc_, sizeof(Address));
- } else if (IsCodeTarget(rmode_)) {
- Memory::int32_at(pc_) -= static_cast<int32_t>(delta);
- CPU::FlushICache(pc_, sizeof(int32_t));
- } else if (rmode_ == CODE_AGE_SEQUENCE) {
- if (*pc_ == kCallOpcode) {
- int32_t* p = reinterpret_cast<int32_t*>(pc_ + 1);
- *p -= static_cast<int32_t>(delta); // Relocate entry.
- CPU::FlushICache(p, sizeof(uint32_t));
- }
- }
-}
-
-
-Address RelocInfo::target_address() {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
- if (IsCodeTarget(rmode_)) {
- return Assembler::target_address_at(pc_);
- } else {
- return Memory::Address_at(pc_);
- }
-}
-
-
-Address RelocInfo::target_address_address() {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY
- || rmode_ == EMBEDDED_OBJECT
- || rmode_ == EXTERNAL_REFERENCE);
- return reinterpret_cast<Address>(pc_);
-}
-
-
-int RelocInfo::target_address_size() {
- if (IsCodedSpecially()) {
- return Assembler::kSpecialTargetSize;
- } else {
- return kPointerSize;
- }
-}
-
-
-void RelocInfo::set_target_address(Address target, WriteBarrierMode mode) {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
- if (IsCodeTarget(rmode_)) {
- Assembler::set_target_address_at(pc_, target);
- Object* target_code = Code::GetCodeFromTargetAddress(target);
- if (mode == UPDATE_WRITE_BARRIER && host() != NULL) {
- host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
- host(), this, HeapObject::cast(target_code));
- }
- } else {
- Memory::Address_at(pc_) = target;
- CPU::FlushICache(pc_, sizeof(Address));
- }
-}
-
-
-Object* RelocInfo::target_object() {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- return Memory::Object_at(pc_);
-}
-
-
-Handle<Object> RelocInfo::target_object_handle(Assembler* origin) {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- if (rmode_ == EMBEDDED_OBJECT) {
- return Memory::Object_Handle_at(pc_);
- } else {
- return origin->code_target_object_handle_at(pc_);
- }
-}
-
-
-Object** RelocInfo::target_object_address() {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- return reinterpret_cast<Object**>(pc_);
-}
-
-
-Address* RelocInfo::target_reference_address() {
- ASSERT(rmode_ == RelocInfo::EXTERNAL_REFERENCE);
- return reinterpret_cast<Address*>(pc_);
-}
-
-
-void RelocInfo::set_target_object(Object* target, WriteBarrierMode mode) {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- Memory::Object_at(pc_) = target;
- CPU::FlushICache(pc_, sizeof(Address));
- if (mode == UPDATE_WRITE_BARRIER &&
- host() != NULL &&
- target->IsHeapObject()) {
- host()->GetHeap()->incremental_marking()->RecordWrite(
- host(), &Memory::Object_at(pc_), HeapObject::cast(target));
- }
-}
-
-
-Handle<JSGlobalPropertyCell> RelocInfo::target_cell_handle() {
- ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
- Address address = Memory::Address_at(pc_);
- return Handle<JSGlobalPropertyCell>(
- reinterpret_cast<JSGlobalPropertyCell**>(address));
-}
-
-
-JSGlobalPropertyCell* RelocInfo::target_cell() {
- ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
- return JSGlobalPropertyCell::FromValueAddress(Memory::Address_at(pc_));
-}
-
-
-void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell,
- WriteBarrierMode mode) {
- ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
- Address address = cell->address() + JSGlobalPropertyCell::kValueOffset;
- Memory::Address_at(pc_) = address;
- CPU::FlushICache(pc_, sizeof(Address));
- if (mode == UPDATE_WRITE_BARRIER &&
- host() != NULL) {
- // TODO(1550) We are passing NULL as a slot because cell can never be on
- // evacuation candidate.
- host()->GetHeap()->incremental_marking()->RecordWrite(
- host(), NULL, cell);
- }
-}
-
-
-bool RelocInfo::IsPatchedReturnSequence() {
- // The recognized call sequence is:
- // movq(kScratchRegister, immediate64); call(kScratchRegister);
- // It only needs to be distinguished from a return sequence
- // movq(rsp, rbp); pop(rbp); ret(n); int3 *6
- // The 11th byte is int3 (0xCC) in the return sequence and
- // REX.WB (0x48+register bit) for the call sequence.
-#ifdef ENABLE_DEBUGGER_SUPPORT
- return pc_[10] != 0xCC;
-#else
- return false;
-#endif
-}
-
-
-bool RelocInfo::IsPatchedDebugBreakSlotSequence() {
- return !Assembler::IsNop(pc());
-}
-
-
-Code* RelocInfo::code_age_stub() {
- ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
- ASSERT(*pc_ == kCallOpcode);
- return Code::GetCodeFromTargetAddress(
- Assembler::target_address_at(pc_ + 1));
-}
-
-
-void RelocInfo::set_code_age_stub(Code* stub) {
- ASSERT(*pc_ == kCallOpcode);
- ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
- Assembler::set_target_address_at(pc_ + 1, stub->instruction_start());
-}
-
-
-Address RelocInfo::call_address() {
- ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
- (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
- return Memory::Address_at(
- pc_ + Assembler::kRealPatchReturnSequenceAddressOffset);
-}
-
-
-void RelocInfo::set_call_address(Address target) {
- ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
- (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
- Memory::Address_at(pc_ + Assembler::kRealPatchReturnSequenceAddressOffset) =
- target;
- CPU::FlushICache(pc_ + Assembler::kRealPatchReturnSequenceAddressOffset,
- sizeof(Address));
- if (host() != NULL) {
- Object* target_code = Code::GetCodeFromTargetAddress(target);
- host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
- host(), this, HeapObject::cast(target_code));
- }
-}
-
-
-Object* RelocInfo::call_object() {
- return *call_object_address();
-}
-
-
-void RelocInfo::set_call_object(Object* target) {
- *call_object_address() = target;
-}
-
-
-Object** RelocInfo::call_object_address() {
- ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
- (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
- return reinterpret_cast<Object**>(
- pc_ + Assembler::kPatchReturnSequenceAddressOffset);
-}
-
-
-void RelocInfo::Visit(ObjectVisitor* visitor) {
- RelocInfo::Mode mode = rmode();
- if (mode == RelocInfo::EMBEDDED_OBJECT) {
- visitor->VisitEmbeddedPointer(this);
- CPU::FlushICache(pc_, sizeof(Address));
- } else if (RelocInfo::IsCodeTarget(mode)) {
- visitor->VisitCodeTarget(this);
- } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
- visitor->VisitGlobalPropertyCell(this);
- } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
- visitor->VisitExternalReference(this);
- CPU::FlushICache(pc_, sizeof(Address));
- } else if (RelocInfo::IsCodeAgeSequence(mode)) {
- visitor->VisitCodeAgeSequence(this);
-#ifdef ENABLE_DEBUGGER_SUPPORT
- // TODO(isolates): Get a cached isolate below.
- } else if (((RelocInfo::IsJSReturn(mode) &&
- IsPatchedReturnSequence()) ||
- (RelocInfo::IsDebugBreakSlot(mode) &&
- IsPatchedDebugBreakSlotSequence())) &&
- Isolate::Current()->debug()->has_break_points()) {
- visitor->VisitDebugTarget(this);
-#endif
- } else if (mode == RelocInfo::RUNTIME_ENTRY) {
- visitor->VisitRuntimeEntry(this);
- }
-}
-
-
-template<typename StaticVisitor>
-void RelocInfo::Visit(Heap* heap) {
- RelocInfo::Mode mode = rmode();
- if (mode == RelocInfo::EMBEDDED_OBJECT) {
- StaticVisitor::VisitEmbeddedPointer(heap, this);
- CPU::FlushICache(pc_, sizeof(Address));
- } else if (RelocInfo::IsCodeTarget(mode)) {
- StaticVisitor::VisitCodeTarget(heap, this);
- } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
- StaticVisitor::VisitGlobalPropertyCell(heap, this);
- } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
- StaticVisitor::VisitExternalReference(this);
- CPU::FlushICache(pc_, sizeof(Address));
- } else if (RelocInfo::IsCodeAgeSequence(mode)) {
- StaticVisitor::VisitCodeAgeSequence(heap, this);
-#ifdef ENABLE_DEBUGGER_SUPPORT
- } else if (heap->isolate()->debug()->has_break_points() &&
- ((RelocInfo::IsJSReturn(mode) &&
- IsPatchedReturnSequence()) ||
- (RelocInfo::IsDebugBreakSlot(mode) &&
- IsPatchedDebugBreakSlotSequence()))) {
- StaticVisitor::VisitDebugTarget(heap, this);
-#endif
- } else if (mode == RelocInfo::RUNTIME_ENTRY) {
- StaticVisitor::VisitRuntimeEntry(this);
- }
-}
-
-
-// -----------------------------------------------------------------------------
-// Implementation of Operand
-
-void Operand::set_modrm(int mod, Register rm_reg) {
- ASSERT(is_uint2(mod));
- buf_[0] = mod << 6 | rm_reg.low_bits();
- // Set REX.B to the high bit of rm.code().
- rex_ |= rm_reg.high_bit();
-}
-
-
-void Operand::set_sib(ScaleFactor scale, Register index, Register base) {
- ASSERT(len_ == 1);
- ASSERT(is_uint2(scale));
- // Use SIB with no index register only for base rsp or r12. Otherwise we
- // would skip the SIB byte entirely.
- ASSERT(!index.is(rsp) || base.is(rsp) || base.is(r12));
- buf_[1] = (scale << 6) | (index.low_bits() << 3) | base.low_bits();
- rex_ |= index.high_bit() << 1 | base.high_bit();
- len_ = 2;
-}
-
-void Operand::set_disp8(int disp) {
- ASSERT(is_int8(disp));
- ASSERT(len_ == 1 || len_ == 2);
- int8_t* p = reinterpret_cast<int8_t*>(&buf_[len_]);
- *p = disp;
- len_ += sizeof(int8_t);
-}
-
-void Operand::set_disp32(int disp) {
- ASSERT(len_ == 1 || len_ == 2);
- int32_t* p = reinterpret_cast<int32_t*>(&buf_[len_]);
- *p = disp;
- len_ += sizeof(int32_t);
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_X64_ASSEMBLER_X64_INL_H_
diff --git a/src/3rdparty/v8/src/x64/assembler-x64.cc b/src/3rdparty/v8/src/x64/assembler-x64.cc
deleted file mode 100644
index 0ac0862..0000000
--- a/src/3rdparty/v8/src/x64/assembler-x64.cc
+++ /dev/null
@@ -1,3064 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_X64)
-
-#include "macro-assembler.h"
-#include "serialize.h"
-
-namespace v8 {
-namespace internal {
-
-// -----------------------------------------------------------------------------
-// Implementation of CpuFeatures
-
-
-#ifdef DEBUG
-bool CpuFeatures::initialized_ = false;
-#endif
-uint64_t CpuFeatures::supported_ = CpuFeatures::kDefaultCpuFeatures;
-uint64_t CpuFeatures::found_by_runtime_probing_ = 0;
-
-
-ExternalReference ExternalReference::cpu_features() {
- ASSERT(CpuFeatures::initialized_);
- return ExternalReference(&CpuFeatures::supported_);
-}
-
-
-void CpuFeatures::Probe() {
- ASSERT(supported_ == CpuFeatures::kDefaultCpuFeatures);
-#ifdef DEBUG
- initialized_ = true;
-#endif
- supported_ = kDefaultCpuFeatures;
- if (Serializer::enabled()) {
- supported_ |= OS::CpuFeaturesImpliedByPlatform();
- return; // No features if we might serialize.
- }
-
- const int kBufferSize = 4 * KB;
- VirtualMemory* memory = new VirtualMemory(kBufferSize);
- if (!memory->IsReserved()) {
- delete memory;
- return;
- }
- ASSERT(memory->size() >= static_cast<size_t>(kBufferSize));
- if (!memory->Commit(memory->address(), kBufferSize, true/*executable*/)) {
- delete memory;
- return;
- }
-
- Assembler assm(NULL, memory->address(), kBufferSize);
- Label cpuid, done;
-#define __ assm.
- // Save old rsp, since we are going to modify the stack.
- __ push(rbp);
- __ pushfq();
- __ push(rdi);
- __ push(rcx);
- __ push(rbx);
- __ movq(rbp, rsp);
-
- // If we can modify bit 21 of the EFLAGS register, then CPUID is supported.
- __ pushfq();
- __ pop(rax);
- __ movq(rdx, rax);
- __ xor_(rax, Immediate(0x200000)); // Flip bit 21.
- __ push(rax);
- __ popfq();
- __ pushfq();
- __ pop(rax);
- __ xor_(rax, rdx); // Different if CPUID is supported.
- __ j(not_zero, &cpuid);
-
- // CPUID not supported. Clear the supported features in rax.
- __ xor_(rax, rax);
- __ jmp(&done);
-
- // Invoke CPUID with 1 in eax to get feature information in
- // ecx:edx. Temporarily enable CPUID support because we know it's
- // safe here.
- __ bind(&cpuid);
- __ movl(rax, Immediate(1));
- supported_ = kDefaultCpuFeatures | (1 << CPUID);
- { Scope fscope(CPUID);
- __ cpuid();
- // Move the result from ecx:edx to rdi.
- __ movl(rdi, rdx); // Zero-extended to 64 bits.
- __ shl(rcx, Immediate(32));
- __ or_(rdi, rcx);
-
- // Get the sahf supported flag, from CPUID(0x80000001)
- __ movq(rax, 0x80000001, RelocInfo::NONE64);
- __ cpuid();
- }
- supported_ = kDefaultCpuFeatures;
-
- // Put the CPU flags in rax.
- // rax = (rcx & 1) | (rdi & ~1) | (1 << CPUID).
- __ movl(rax, Immediate(1));
- __ and_(rcx, rax); // Bit 0 is set if SAHF instruction supported.
- __ not_(rax);
- __ and_(rax, rdi);
- __ or_(rax, rcx);
- __ or_(rax, Immediate(1 << CPUID));
-
- // Done.
- __ bind(&done);
- __ movq(rsp, rbp);
- __ pop(rbx);
- __ pop(rcx);
- __ pop(rdi);
- __ popfq();
- __ pop(rbp);
- __ ret(0);
-#undef __
-
- typedef uint64_t (*F0)();
- F0 probe = FUNCTION_CAST<F0>(reinterpret_cast<Address>(memory->address()));
- supported_ = probe();
- found_by_runtime_probing_ = supported_;
- found_by_runtime_probing_ &= ~kDefaultCpuFeatures;
- uint64_t os_guarantees = OS::CpuFeaturesImpliedByPlatform();
- supported_ |= os_guarantees;
- found_by_runtime_probing_ &= ~os_guarantees;
- // SSE2 and CMOV must be available on an X64 CPU.
- ASSERT(IsSupported(CPUID));
- ASSERT(IsSupported(SSE2));
- ASSERT(IsSupported(CMOV));
-
- delete memory;
-}
-
-
-// -----------------------------------------------------------------------------
-// Implementation of RelocInfo
-
-// Patch the code at the current PC with a call to the target address.
-// Additional guard int3 instructions can be added if required.
-void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
- // Load register with immediate 64 and call through a register instructions
- // takes up 13 bytes and int3 takes up one byte.
- static const int kCallCodeSize = 13;
- int code_size = kCallCodeSize + guard_bytes;
-
- // Create a code patcher.
- CodePatcher patcher(pc_, code_size);
-
- // Add a label for checking the size of the code used for returning.
-#ifdef DEBUG
- Label check_codesize;
- patcher.masm()->bind(&check_codesize);
-#endif
-
- // Patch the code.
- patcher.masm()->movq(r10, target, RelocInfo::NONE64);
- patcher.masm()->call(r10);
-
- // Check that the size of the code generated is as expected.
- ASSERT_EQ(kCallCodeSize,
- patcher.masm()->SizeOfCodeGeneratedSince(&check_codesize));
-
- // Add the requested number of int3 instructions after the call.
- for (int i = 0; i < guard_bytes; i++) {
- patcher.masm()->int3();
- }
-}
-
-
-void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
- // Patch the code at the current address with the supplied instructions.
- for (int i = 0; i < instruction_count; i++) {
- *(pc_ + i) = *(instructions + i);
- }
-
- // Indicate that code has changed.
- CPU::FlushICache(pc_, instruction_count);
-}
-
-
-// -----------------------------------------------------------------------------
-// Register constants.
-
-const int
- Register::kRegisterCodeByAllocationIndex[kMaxNumAllocatableRegisters] = {
- // rax, rbx, rdx, rcx, rdi, r8, r9, r11, r14, r15
- 0, 3, 2, 1, 7, 8, 9, 11, 14, 15
-};
-
-const int Register::kAllocationIndexByRegisterCode[kNumRegisters] = {
- 0, 3, 2, 1, -1, -1, -1, 4, 5, 6, -1, 7, -1, -1, 8, 9
-};
-
-
-// -----------------------------------------------------------------------------
-// Implementation of Operand
-
-Operand::Operand(Register base, int32_t disp) : rex_(0) {
- len_ = 1;
- if (base.is(rsp) || base.is(r12)) {
- // SIB byte is needed to encode (rsp + offset) or (r12 + offset).
- set_sib(times_1, rsp, base);
- }
-
- if (disp == 0 && !base.is(rbp) && !base.is(r13)) {
- set_modrm(0, base);
- } else if (is_int8(disp)) {
- set_modrm(1, base);
- set_disp8(disp);
- } else {
- set_modrm(2, base);
- set_disp32(disp);
- }
-}
-
-
-Operand::Operand(Register base,
- Register index,
- ScaleFactor scale,
- int32_t disp) : rex_(0) {
- ASSERT(!index.is(rsp));
- len_ = 1;
- set_sib(scale, index, base);
- if (disp == 0 && !base.is(rbp) && !base.is(r13)) {
- // This call to set_modrm doesn't overwrite the REX.B (or REX.X) bits
- // possibly set by set_sib.
- set_modrm(0, rsp);
- } else if (is_int8(disp)) {
- set_modrm(1, rsp);
- set_disp8(disp);
- } else {
- set_modrm(2, rsp);
- set_disp32(disp);
- }
-}
-
-
-Operand::Operand(Register index,
- ScaleFactor scale,
- int32_t disp) : rex_(0) {
- ASSERT(!index.is(rsp));
- len_ = 1;
- set_modrm(0, rsp);
- set_sib(scale, index, rbp);
- set_disp32(disp);
-}
-
-
-Operand::Operand(const Operand& operand, int32_t offset) {
- ASSERT(operand.len_ >= 1);
- // Operand encodes REX ModR/M [SIB] [Disp].
- byte modrm = operand.buf_[0];
- ASSERT(modrm < 0xC0); // Disallow mode 3 (register target).
- bool has_sib = ((modrm & 0x07) == 0x04);
- byte mode = modrm & 0xC0;
- int disp_offset = has_sib ? 2 : 1;
- int base_reg = (has_sib ? operand.buf_[1] : modrm) & 0x07;
- // Mode 0 with rbp/r13 as ModR/M or SIB base register always has a 32-bit
- // displacement.
- bool is_baseless = (mode == 0) && (base_reg == 0x05); // No base or RIP base.
- int32_t disp_value = 0;
- if (mode == 0x80 || is_baseless) {
- // Mode 2 or mode 0 with rbp/r13 as base: Word displacement.
- disp_value = *BitCast<const int32_t*>(&operand.buf_[disp_offset]);
- } else if (mode == 0x40) {
- // Mode 1: Byte displacement.
- disp_value = static_cast<signed char>(operand.buf_[disp_offset]);
- }
-
- // Write new operand with same registers, but with modified displacement.
- ASSERT(offset >= 0 ? disp_value + offset > disp_value
- : disp_value + offset < disp_value); // No overflow.
- disp_value += offset;
- rex_ = operand.rex_;
- if (!is_int8(disp_value) || is_baseless) {
- // Need 32 bits of displacement, mode 2 or mode 1 with register rbp/r13.
- buf_[0] = (modrm & 0x3f) | (is_baseless ? 0x00 : 0x80);
- len_ = disp_offset + 4;
- Memory::int32_at(&buf_[disp_offset]) = disp_value;
- } else if (disp_value != 0 || (base_reg == 0x05)) {
- // Need 8 bits of displacement.
- buf_[0] = (modrm & 0x3f) | 0x40; // Mode 1.
- len_ = disp_offset + 1;
- buf_[disp_offset] = static_cast<byte>(disp_value);
- } else {
- // Need no displacement.
- buf_[0] = (modrm & 0x3f); // Mode 0.
- len_ = disp_offset;
- }
- if (has_sib) {
- buf_[1] = operand.buf_[1];
- }
-}
-
-
-bool Operand::AddressUsesRegister(Register reg) const {
- int code = reg.code();
- ASSERT((buf_[0] & 0xC0) != 0xC0); // Always a memory operand.
- // Start with only low three bits of base register. Initial decoding doesn't
- // distinguish on the REX.B bit.
- int base_code = buf_[0] & 0x07;
- if (base_code == rsp.code()) {
- // SIB byte present in buf_[1].
- // Check the index register from the SIB byte + REX.X prefix.
- int index_code = ((buf_[1] >> 3) & 0x07) | ((rex_ & 0x02) << 2);
- // Index code (including REX.X) of 0x04 (rsp) means no index register.
- if (index_code != rsp.code() && index_code == code) return true;
- // Add REX.B to get the full base register code.
- base_code = (buf_[1] & 0x07) | ((rex_ & 0x01) << 3);
- // A base register of 0x05 (rbp) with mod = 0 means no base register.
- if (base_code == rbp.code() && ((buf_[0] & 0xC0) == 0)) return false;
- return code == base_code;
- } else {
- // A base register with low bits of 0x05 (rbp or r13) and mod = 0 means
- // no base register.
- if (base_code == rbp.code() && ((buf_[0] & 0xC0) == 0)) return false;
- base_code |= ((rex_ & 0x01) << 3);
- return code == base_code;
- }
-}
-
-
-// -----------------------------------------------------------------------------
-// Implementation of Assembler.
-
-#ifdef GENERATED_CODE_COVERAGE
-static void InitCoverageLog();
-#endif
-
-Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
- : AssemblerBase(isolate, buffer, buffer_size),
- code_targets_(100),
- positions_recorder_(this) {
- // Clear the buffer in debug mode unless it was provided by the
- // caller in which case we can't be sure it's okay to overwrite
- // existing code in it.
-#ifdef DEBUG
- if (own_buffer_) {
- memset(buffer_, 0xCC, buffer_size_); // int3
- }
-#endif
-
- reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
-
-
-#ifdef GENERATED_CODE_COVERAGE
- InitCoverageLog();
-#endif
-}
-
-
-void Assembler::GetCode(CodeDesc* desc) {
- // Finalize code (at this point overflow() may be true, but the gap ensures
- // that we are still not overlapping instructions and relocation info).
- ASSERT(pc_ <= reloc_info_writer.pos()); // No overlap.
- // Set up code descriptor.
- desc->buffer = buffer_;
- desc->buffer_size = buffer_size_;
- desc->instr_size = pc_offset();
- ASSERT(desc->instr_size > 0); // Zero-size code objects upset the system.
- desc->reloc_size =
- static_cast<int>((buffer_ + buffer_size_) - reloc_info_writer.pos());
- desc->origin = this;
-}
-
-
-void Assembler::Align(int m) {
- ASSERT(IsPowerOf2(m));
- int delta = (m - (pc_offset() & (m - 1))) & (m - 1);
- Nop(delta);
-}
-
-
-void Assembler::CodeTargetAlign() {
- Align(16); // Preferred alignment of jump targets on x64.
-}
-
-
-bool Assembler::IsNop(Address addr) {
- Address a = addr;
- while (*a == 0x66) a++;
- if (*a == 0x90) return true;
- if (a[0] == 0xf && a[1] == 0x1f) return true;
- return false;
-}
-
-
-void Assembler::bind_to(Label* L, int pos) {
- ASSERT(!L->is_bound()); // Label may only be bound once.
- ASSERT(0 <= pos && pos <= pc_offset()); // Position must be valid.
- if (L->is_linked()) {
- int current = L->pos();
- int next = long_at(current);
- while (next != current) {
- // Relative address, relative to point after address.
- int imm32 = pos - (current + sizeof(int32_t));
- long_at_put(current, imm32);
- current = next;
- next = long_at(next);
- }
- // Fix up last fixup on linked list.
- int last_imm32 = pos - (current + sizeof(int32_t));
- long_at_put(current, last_imm32);
- }
- while (L->is_near_linked()) {
- int fixup_pos = L->near_link_pos();
- int offset_to_next =
- static_cast<int>(*reinterpret_cast<int8_t*>(addr_at(fixup_pos)));
- ASSERT(offset_to_next <= 0);
- int disp = pos - (fixup_pos + sizeof(int8_t));
- CHECK(is_int8(disp));
- set_byte_at(fixup_pos, disp);
- if (offset_to_next < 0) {
- L->link_to(fixup_pos + offset_to_next, Label::kNear);
- } else {
- L->UnuseNear();
- }
- }
- L->bind_to(pos);
-}
-
-
-void Assembler::bind(Label* L) {
- bind_to(L, pc_offset());
-}
-
-
-void Assembler::GrowBuffer() {
- ASSERT(buffer_overflow());
- if (!own_buffer_) FATAL("external code buffer is too small");
-
- // Compute new buffer size.
- CodeDesc desc; // the new buffer
- if (buffer_size_ < 4*KB) {
- desc.buffer_size = 4*KB;
- } else {
- desc.buffer_size = 2*buffer_size_;
- }
- // Some internal data structures overflow for very large buffers,
- // they must ensure that kMaximalBufferSize is not too large.
- if ((desc.buffer_size > kMaximalBufferSize) ||
- (desc.buffer_size > HEAP->MaxOldGenerationSize())) {
- V8::FatalProcessOutOfMemory("Assembler::GrowBuffer");
- }
-
- // Set up new buffer.
- desc.buffer = NewArray<byte>(desc.buffer_size);
- desc.instr_size = pc_offset();
- desc.reloc_size =
- static_cast<int>((buffer_ + buffer_size_) - (reloc_info_writer.pos()));
-
- // Clear the buffer in debug mode. Use 'int3' instructions to make
- // sure to get into problems if we ever run uninitialized code.
-#ifdef DEBUG
- memset(desc.buffer, 0xCC, desc.buffer_size);
-#endif
-
- // Copy the data.
- intptr_t pc_delta = desc.buffer - buffer_;
- intptr_t rc_delta = (desc.buffer + desc.buffer_size) -
- (buffer_ + buffer_size_);
- memmove(desc.buffer, buffer_, desc.instr_size);
- memmove(rc_delta + reloc_info_writer.pos(),
- reloc_info_writer.pos(), desc.reloc_size);
-
- // Switch buffers.
- if (isolate() != NULL &&
- isolate()->assembler_spare_buffer() == NULL &&
- buffer_size_ == kMinimalBufferSize) {
- isolate()->set_assembler_spare_buffer(buffer_);
- } else {
- DeleteArray(buffer_);
- }
- buffer_ = desc.buffer;
- buffer_size_ = desc.buffer_size;
- pc_ += pc_delta;
- reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
- reloc_info_writer.last_pc() + pc_delta);
-
- // Relocate runtime entries.
- for (RelocIterator it(desc); !it.done(); it.next()) {
- RelocInfo::Mode rmode = it.rinfo()->rmode();
- if (rmode == RelocInfo::INTERNAL_REFERENCE) {
- intptr_t* p = reinterpret_cast<intptr_t*>(it.rinfo()->pc());
- if (*p != 0) { // 0 means uninitialized.
- *p += pc_delta;
- }
- }
- }
-
- ASSERT(!buffer_overflow());
-}
-
-
-void Assembler::emit_operand(int code, const Operand& adr) {
- ASSERT(is_uint3(code));
- const unsigned length = adr.len_;
- ASSERT(length > 0);
-
- // Emit updated ModR/M byte containing the given register.
- ASSERT((adr.buf_[0] & 0x38) == 0);
- pc_[0] = adr.buf_[0] | code << 3;
-
- // Emit the rest of the encoded operand.
- for (unsigned i = 1; i < length; i++) pc_[i] = adr.buf_[i];
- pc_ += length;
-}
-
-
-// Assembler Instruction implementations.
-
-void Assembler::arithmetic_op(byte opcode, Register reg, const Operand& op) {
- EnsureSpace ensure_space(this);
- emit_rex_64(reg, op);
- emit(opcode);
- emit_operand(reg, op);
-}
-
-
-void Assembler::arithmetic_op(byte opcode, Register reg, Register rm_reg) {
- EnsureSpace ensure_space(this);
- ASSERT((opcode & 0xC6) == 2);
- if (rm_reg.low_bits() == 4) { // Forces SIB byte.
- // Swap reg and rm_reg and change opcode operand order.
- emit_rex_64(rm_reg, reg);
- emit(opcode ^ 0x02);
- emit_modrm(rm_reg, reg);
- } else {
- emit_rex_64(reg, rm_reg);
- emit(opcode);
- emit_modrm(reg, rm_reg);
- }
-}
-
-
-void Assembler::arithmetic_op_16(byte opcode, Register reg, Register rm_reg) {
- EnsureSpace ensure_space(this);
- ASSERT((opcode & 0xC6) == 2);
- if (rm_reg.low_bits() == 4) { // Forces SIB byte.
- // Swap reg and rm_reg and change opcode operand order.
- emit(0x66);
- emit_optional_rex_32(rm_reg, reg);
- emit(opcode ^ 0x02);
- emit_modrm(rm_reg, reg);
- } else {
- emit(0x66);
- emit_optional_rex_32(reg, rm_reg);
- emit(opcode);
- emit_modrm(reg, rm_reg);
- }
-}
-
-
-void Assembler::arithmetic_op_16(byte opcode,
- Register reg,
- const Operand& rm_reg) {
- EnsureSpace ensure_space(this);
- emit(0x66);
- emit_optional_rex_32(reg, rm_reg);
- emit(opcode);
- emit_operand(reg, rm_reg);
-}
-
-
-void Assembler::arithmetic_op_32(byte opcode, Register reg, Register rm_reg) {
- EnsureSpace ensure_space(this);
- ASSERT((opcode & 0xC6) == 2);
- if (rm_reg.low_bits() == 4) { // Forces SIB byte.
- // Swap reg and rm_reg and change opcode operand order.
- emit_optional_rex_32(rm_reg, reg);
- emit(opcode ^ 0x02); // E.g. 0x03 -> 0x01 for ADD.
- emit_modrm(rm_reg, reg);
- } else {
- emit_optional_rex_32(reg, rm_reg);
- emit(opcode);
- emit_modrm(reg, rm_reg);
- }
-}
-
-
-void Assembler::arithmetic_op_32(byte opcode,
- Register reg,
- const Operand& rm_reg) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(reg, rm_reg);
- emit(opcode);
- emit_operand(reg, rm_reg);
-}
-
-
-void Assembler::immediate_arithmetic_op(byte subcode,
- Register dst,
- Immediate src) {
- EnsureSpace ensure_space(this);
- emit_rex_64(dst);
- if (is_int8(src.value_)) {
- emit(0x83);
- emit_modrm(subcode, dst);
- emit(src.value_);
- } else if (dst.is(rax)) {
- emit(0x05 | (subcode << 3));
- emitl(src.value_);
- } else {
- emit(0x81);
- emit_modrm(subcode, dst);
- emitl(src.value_);
- }
-}
-
-void Assembler::immediate_arithmetic_op(byte subcode,
- const Operand& dst,
- Immediate src) {
- EnsureSpace ensure_space(this);
- emit_rex_64(dst);
- if (is_int8(src.value_)) {
- emit(0x83);
- emit_operand(subcode, dst);
- emit(src.value_);
- } else {
- emit(0x81);
- emit_operand(subcode, dst);
- emitl(src.value_);
- }
-}
-
-
-void Assembler::immediate_arithmetic_op_16(byte subcode,
- Register dst,
- Immediate src) {
- EnsureSpace ensure_space(this);
- emit(0x66); // Operand size override prefix.
- emit_optional_rex_32(dst);
- if (is_int8(src.value_)) {
- emit(0x83);
- emit_modrm(subcode, dst);
- emit(src.value_);
- } else if (dst.is(rax)) {
- emit(0x05 | (subcode << 3));
- emitw(src.value_);
- } else {
- emit(0x81);
- emit_modrm(subcode, dst);
- emitw(src.value_);
- }
-}
-
-
-void Assembler::immediate_arithmetic_op_16(byte subcode,
- const Operand& dst,
- Immediate src) {
- EnsureSpace ensure_space(this);
- emit(0x66); // Operand size override prefix.
- emit_optional_rex_32(dst);
- if (is_int8(src.value_)) {
- emit(0x83);
- emit_operand(subcode, dst);
- emit(src.value_);
- } else {
- emit(0x81);
- emit_operand(subcode, dst);
- emitw(src.value_);
- }
-}
-
-
-void Assembler::immediate_arithmetic_op_32(byte subcode,
- Register dst,
- Immediate src) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst);
- if (is_int8(src.value_)) {
- emit(0x83);
- emit_modrm(subcode, dst);
- emit(src.value_);
- } else if (dst.is(rax)) {
- emit(0x05 | (subcode << 3));
- emitl(src.value_);
- } else {
- emit(0x81);
- emit_modrm(subcode, dst);
- emitl(src.value_);
- }
-}
-
-
-void Assembler::immediate_arithmetic_op_32(byte subcode,
- const Operand& dst,
- Immediate src) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst);
- if (is_int8(src.value_)) {
- emit(0x83);
- emit_operand(subcode, dst);
- emit(src.value_);
- } else {
- emit(0x81);
- emit_operand(subcode, dst);
- emitl(src.value_);
- }
-}
-
-
-void Assembler::immediate_arithmetic_op_8(byte subcode,
- const Operand& dst,
- Immediate src) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst);
- ASSERT(is_int8(src.value_) || is_uint8(src.value_));
- emit(0x80);
- emit_operand(subcode, dst);
- emit(src.value_);
-}
-
-
-void Assembler::immediate_arithmetic_op_8(byte subcode,
- Register dst,
- Immediate src) {
- EnsureSpace ensure_space(this);
- if (!dst.is_byte_register()) {
- // Use 64-bit mode byte registers.
- emit_rex_64(dst);
- }
- ASSERT(is_int8(src.value_) || is_uint8(src.value_));
- emit(0x80);
- emit_modrm(subcode, dst);
- emit(src.value_);
-}
-
-
-void Assembler::shift(Register dst, Immediate shift_amount, int subcode) {
- EnsureSpace ensure_space(this);
- ASSERT(is_uint6(shift_amount.value_)); // illegal shift count
- if (shift_amount.value_ == 1) {
- emit_rex_64(dst);
- emit(0xD1);
- emit_modrm(subcode, dst);
- } else {
- emit_rex_64(dst);
- emit(0xC1);
- emit_modrm(subcode, dst);
- emit(shift_amount.value_);
- }
-}
-
-
-void Assembler::shift(Register dst, int subcode) {
- EnsureSpace ensure_space(this);
- emit_rex_64(dst);
- emit(0xD3);
- emit_modrm(subcode, dst);
-}
-
-
-void Assembler::shift_32(Register dst, int subcode) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst);
- emit(0xD3);
- emit_modrm(subcode, dst);
-}
-
-
-void Assembler::shift_32(Register dst, Immediate shift_amount, int subcode) {
- EnsureSpace ensure_space(this);
- ASSERT(is_uint5(shift_amount.value_)); // illegal shift count
- if (shift_amount.value_ == 1) {
- emit_optional_rex_32(dst);
- emit(0xD1);
- emit_modrm(subcode, dst);
- } else {
- emit_optional_rex_32(dst);
- emit(0xC1);
- emit_modrm(subcode, dst);
- emit(shift_amount.value_);
- }
-}
-
-
-void Assembler::bt(const Operand& dst, Register src) {
- EnsureSpace ensure_space(this);
- emit_rex_64(src, dst);
- emit(0x0F);
- emit(0xA3);
- emit_operand(src, dst);
-}
-
-
-void Assembler::bts(const Operand& dst, Register src) {
- EnsureSpace ensure_space(this);
- emit_rex_64(src, dst);
- emit(0x0F);
- emit(0xAB);
- emit_operand(src, dst);
-}
-
-
-void Assembler::call(Label* L) {
- positions_recorder()->WriteRecordedPositions();
- EnsureSpace ensure_space(this);
- // 1110 1000 #32-bit disp.
- emit(0xE8);
- if (L->is_bound()) {
- int offset = L->pos() - pc_offset() - sizeof(int32_t);
- ASSERT(offset <= 0);
- emitl(offset);
- } else if (L->is_linked()) {
- emitl(L->pos());
- L->link_to(pc_offset() - sizeof(int32_t));
- } else {
- ASSERT(L->is_unused());
- int32_t current = pc_offset();
- emitl(current);
- L->link_to(current);
- }
-}
-
-
-void Assembler::call(Handle<Code> target,
- RelocInfo::Mode rmode,
- TypeFeedbackId ast_id) {
- positions_recorder()->WriteRecordedPositions();
- EnsureSpace ensure_space(this);
- // 1110 1000 #32-bit disp.
- emit(0xE8);
- emit_code_target(target, rmode, ast_id);
-}
-
-
-void Assembler::call(Register adr) {
- positions_recorder()->WriteRecordedPositions();
- EnsureSpace ensure_space(this);
- // Opcode: FF /2 r64.
- emit_optional_rex_32(adr);
- emit(0xFF);
- emit_modrm(0x2, adr);
-}
-
-
-void Assembler::call(const Operand& op) {
- positions_recorder()->WriteRecordedPositions();
- EnsureSpace ensure_space(this);
- // Opcode: FF /2 m64.
- emit_optional_rex_32(op);
- emit(0xFF);
- emit_operand(0x2, op);
-}
-
-
-// Calls directly to the given address using a relative offset.
-// Should only ever be used in Code objects for calls within the
-// same Code object. Should not be used when generating new code (use labels),
-// but only when patching existing code.
-void Assembler::call(Address target) {
- positions_recorder()->WriteRecordedPositions();
- EnsureSpace ensure_space(this);
- // 1110 1000 #32-bit disp.
- emit(0xE8);
- Address source = pc_ + 4;
- intptr_t displacement = target - source;
- ASSERT(is_int32(displacement));
- emitl(static_cast<int32_t>(displacement));
-}
-
-
-void Assembler::clc() {
- EnsureSpace ensure_space(this);
- emit(0xF8);
-}
-
-void Assembler::cld() {
- EnsureSpace ensure_space(this);
- emit(0xFC);
-}
-
-void Assembler::cdq() {
- EnsureSpace ensure_space(this);
- emit(0x99);
-}
-
-
-void Assembler::cmovq(Condition cc, Register dst, Register src) {
- if (cc == always) {
- movq(dst, src);
- } else if (cc == never) {
- return;
- }
- // No need to check CpuInfo for CMOV support, it's a required part of the
- // 64-bit architecture.
- ASSERT(cc >= 0); // Use mov for unconditional moves.
- EnsureSpace ensure_space(this);
- // Opcode: REX.W 0f 40 + cc /r.
- emit_rex_64(dst, src);
- emit(0x0f);
- emit(0x40 + cc);
- emit_modrm(dst, src);
-}
-
-
-void Assembler::cmovq(Condition cc, Register dst, const Operand& src) {
- if (cc == always) {
- movq(dst, src);
- } else if (cc == never) {
- return;
- }
- ASSERT(cc >= 0);
- EnsureSpace ensure_space(this);
- // Opcode: REX.W 0f 40 + cc /r.
- emit_rex_64(dst, src);
- emit(0x0f);
- emit(0x40 + cc);
- emit_operand(dst, src);
-}
-
-
-void Assembler::cmovl(Condition cc, Register dst, Register src) {
- if (cc == always) {
- movl(dst, src);
- } else if (cc == never) {
- return;
- }
- ASSERT(cc >= 0);
- EnsureSpace ensure_space(this);
- // Opcode: 0f 40 + cc /r.
- emit_optional_rex_32(dst, src);
- emit(0x0f);
- emit(0x40 + cc);
- emit_modrm(dst, src);
-}
-
-
-void Assembler::cmovl(Condition cc, Register dst, const Operand& src) {
- if (cc == always) {
- movl(dst, src);
- } else if (cc == never) {
- return;
- }
- ASSERT(cc >= 0);
- EnsureSpace ensure_space(this);
- // Opcode: 0f 40 + cc /r.
- emit_optional_rex_32(dst, src);
- emit(0x0f);
- emit(0x40 + cc);
- emit_operand(dst, src);
-}
-
-
-void Assembler::cmpb_al(Immediate imm8) {
- ASSERT(is_int8(imm8.value_) || is_uint8(imm8.value_));
- EnsureSpace ensure_space(this);
- emit(0x3c);
- emit(imm8.value_);
-}
-
-
-void Assembler::cpuid() {
- ASSERT(CpuFeatures::IsEnabled(CPUID));
- EnsureSpace ensure_space(this);
- emit(0x0F);
- emit(0xA2);
-}
-
-
-void Assembler::cqo() {
- EnsureSpace ensure_space(this);
- emit_rex_64();
- emit(0x99);
-}
-
-
-void Assembler::decq(Register dst) {
- EnsureSpace ensure_space(this);
- emit_rex_64(dst);
- emit(0xFF);
- emit_modrm(0x1, dst);
-}
-
-
-void Assembler::decq(const Operand& dst) {
- EnsureSpace ensure_space(this);
- emit_rex_64(dst);
- emit(0xFF);
- emit_operand(1, dst);
-}
-
-
-void Assembler::decl(Register dst) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst);
- emit(0xFF);
- emit_modrm(0x1, dst);
-}
-
-
-void Assembler::decl(const Operand& dst) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst);
- emit(0xFF);
- emit_operand(1, dst);
-}
-
-
-void Assembler::decb(Register dst) {
- EnsureSpace ensure_space(this);
- if (!dst.is_byte_register()) {
- // Register is not one of al, bl, cl, dl. Its encoding needs REX.
- emit_rex_32(dst);
- }
- emit(0xFE);
- emit_modrm(0x1, dst);
-}
-
-
-void Assembler::decb(const Operand& dst) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst);
- emit(0xFE);
- emit_operand(1, dst);
-}
-
-
-void Assembler::enter(Immediate size) {
- EnsureSpace ensure_space(this);
- emit(0xC8);
- emitw(size.value_); // 16 bit operand, always.
- emit(0);
-}
-
-
-void Assembler::hlt() {
- EnsureSpace ensure_space(this);
- emit(0xF4);
-}
-
-
-void Assembler::idivq(Register src) {
- EnsureSpace ensure_space(this);
- emit_rex_64(src);
- emit(0xF7);
- emit_modrm(0x7, src);
-}
-
-
-void Assembler::idivl(Register src) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(src);
- emit(0xF7);
- emit_modrm(0x7, src);
-}
-
-
-void Assembler::imul(Register src) {
- EnsureSpace ensure_space(this);
- emit_rex_64(src);
- emit(0xF7);
- emit_modrm(0x5, src);
-}
-
-
-void Assembler::imul(Register dst, Register src) {
- EnsureSpace ensure_space(this);
- emit_rex_64(dst, src);
- emit(0x0F);
- emit(0xAF);
- emit_modrm(dst, src);
-}
-
-
-void Assembler::imul(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- emit_rex_64(dst, src);
- emit(0x0F);
- emit(0xAF);
- emit_operand(dst, src);
-}
-
-
-void Assembler::imul(Register dst, Register src, Immediate imm) {
- EnsureSpace ensure_space(this);
- emit_rex_64(dst, src);
- if (is_int8(imm.value_)) {
- emit(0x6B);
- emit_modrm(dst, src);
- emit(imm.value_);
- } else {
- emit(0x69);
- emit_modrm(dst, src);
- emitl(imm.value_);
- }
-}
-
-
-void Assembler::imull(Register dst, Register src) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0xAF);
- emit_modrm(dst, src);
-}
-
-
-void Assembler::imull(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0xAF);
- emit_operand(dst, src);
-}
-
-
-void Assembler::imull(Register dst, Register src, Immediate imm) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst, src);
- if (is_int8(imm.value_)) {
- emit(0x6B);
- emit_modrm(dst, src);
- emit(imm.value_);
- } else {
- emit(0x69);
- emit_modrm(dst, src);
- emitl(imm.value_);
- }
-}
-
-
-void Assembler::incq(Register dst) {
- EnsureSpace ensure_space(this);
- emit_rex_64(dst);
- emit(0xFF);
- emit_modrm(0x0, dst);
-}
-
-
-void Assembler::incq(const Operand& dst) {
- EnsureSpace ensure_space(this);
- emit_rex_64(dst);
- emit(0xFF);
- emit_operand(0, dst);
-}
-
-
-void Assembler::incl(const Operand& dst) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst);
- emit(0xFF);
- emit_operand(0, dst);
-}
-
-
-void Assembler::incl(Register dst) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst);
- emit(0xFF);
- emit_modrm(0, dst);
-}
-
-
-void Assembler::int3() {
- EnsureSpace ensure_space(this);
- emit(0xCC);
-}
-
-
-void Assembler::j(Condition cc, Label* L, Label::Distance distance) {
- if (cc == always) {
- jmp(L);
- return;
- } else if (cc == never) {
- return;
- }
- EnsureSpace ensure_space(this);
- ASSERT(is_uint4(cc));
- if (L->is_bound()) {
- const int short_size = 2;
- const int long_size = 6;
- int offs = L->pos() - pc_offset();
- ASSERT(offs <= 0);
- // Determine whether we can use 1-byte offsets for backwards branches,
- // which have a max range of 128 bytes.
-
- // We also need to check predictable_code_size() flag here, because on x64,
- // when the full code generator recompiles code for debugging, some places
- // need to be padded out to a certain size. The debugger is keeping track of
- // how often it did this so that it can adjust return addresses on the
- // stack, but if the size of jump instructions can also change, that's not
- // enough and the calculated offsets would be incorrect.
- if (is_int8(offs - short_size) && !predictable_code_size()) {
- // 0111 tttn #8-bit disp.
- emit(0x70 | cc);
- emit((offs - short_size) & 0xFF);
- } else {
- // 0000 1111 1000 tttn #32-bit disp.
- emit(0x0F);
- emit(0x80 | cc);
- emitl(offs - long_size);
- }
- } else if (distance == Label::kNear) {
- // 0111 tttn #8-bit disp
- emit(0x70 | cc);
- byte disp = 0x00;
- if (L->is_near_linked()) {
- int offset = L->near_link_pos() - pc_offset();
- ASSERT(is_int8(offset));
- disp = static_cast<byte>(offset & 0xFF);
- }
- L->link_to(pc_offset(), Label::kNear);
- emit(disp);
- } else if (L->is_linked()) {
- // 0000 1111 1000 tttn #32-bit disp.
- emit(0x0F);
- emit(0x80 | cc);
- emitl(L->pos());
- L->link_to(pc_offset() - sizeof(int32_t));
- } else {
- ASSERT(L->is_unused());
- emit(0x0F);
- emit(0x80 | cc);
- int32_t current = pc_offset();
- emitl(current);
- L->link_to(current);
- }
-}
-
-
-void Assembler::j(Condition cc,
- Handle<Code> target,
- RelocInfo::Mode rmode) {
- EnsureSpace ensure_space(this);
- ASSERT(is_uint4(cc));
- // 0000 1111 1000 tttn #32-bit disp.
- emit(0x0F);
- emit(0x80 | cc);
- emit_code_target(target, rmode);
-}
-
-
-void Assembler::jmp(Label* L, Label::Distance distance) {
- EnsureSpace ensure_space(this);
- const int short_size = sizeof(int8_t);
- const int long_size = sizeof(int32_t);
- if (L->is_bound()) {
- int offs = L->pos() - pc_offset() - 1;
- ASSERT(offs <= 0);
- if (is_int8(offs - short_size) && !predictable_code_size()) {
- // 1110 1011 #8-bit disp.
- emit(0xEB);
- emit((offs - short_size) & 0xFF);
- } else {
- // 1110 1001 #32-bit disp.
- emit(0xE9);
- emitl(offs - long_size);
- }
- } else if (distance == Label::kNear) {
- emit(0xEB);
- byte disp = 0x00;
- if (L->is_near_linked()) {
- int offset = L->near_link_pos() - pc_offset();
- ASSERT(is_int8(offset));
- disp = static_cast<byte>(offset & 0xFF);
- }
- L->link_to(pc_offset(), Label::kNear);
- emit(disp);
- } else if (L->is_linked()) {
- // 1110 1001 #32-bit disp.
- emit(0xE9);
- emitl(L->pos());
- L->link_to(pc_offset() - long_size);
- } else {
- // 1110 1001 #32-bit disp.
- ASSERT(L->is_unused());
- emit(0xE9);
- int32_t current = pc_offset();
- emitl(current);
- L->link_to(current);
- }
-}
-
-
-void Assembler::jmp(Handle<Code> target, RelocInfo::Mode rmode) {
- EnsureSpace ensure_space(this);
- // 1110 1001 #32-bit disp.
- emit(0xE9);
- emit_code_target(target, rmode);
-}
-
-
-void Assembler::jmp(Register target) {
- EnsureSpace ensure_space(this);
- // Opcode FF/4 r64.
- emit_optional_rex_32(target);
- emit(0xFF);
- emit_modrm(0x4, target);
-}
-
-
-void Assembler::jmp(const Operand& src) {
- EnsureSpace ensure_space(this);
- // Opcode FF/4 m64.
- emit_optional_rex_32(src);
- emit(0xFF);
- emit_operand(0x4, src);
-}
-
-
-void Assembler::lea(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- emit_rex_64(dst, src);
- emit(0x8D);
- emit_operand(dst, src);
-}
-
-
-void Assembler::leal(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst, src);
- emit(0x8D);
- emit_operand(dst, src);
-}
-
-
-void Assembler::load_rax(void* value, RelocInfo::Mode mode) {
- EnsureSpace ensure_space(this);
- emit(0x48); // REX.W
- emit(0xA1);
- emitq(reinterpret_cast<uintptr_t>(value), mode);
-}
-
-
-void Assembler::load_rax(ExternalReference ref) {
- load_rax(ref.address(), RelocInfo::EXTERNAL_REFERENCE);
-}
-
-
-void Assembler::leave() {
- EnsureSpace ensure_space(this);
- emit(0xC9);
-}
-
-
-void Assembler::movb(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- if (!dst.is_byte_register()) {
- // Register is not one of al, bl, cl, dl. Its encoding needs REX.
- emit_rex_32(dst, src);
- } else {
- emit_optional_rex_32(dst, src);
- }
- emit(0x8A);
- emit_operand(dst, src);
-}
-
-
-void Assembler::movb(Register dst, Immediate imm) {
- EnsureSpace ensure_space(this);
- if (!dst.is_byte_register()) {
- emit_rex_32(dst);
- }
- emit(0xB0 + dst.low_bits());
- emit(imm.value_);
-}
-
-
-void Assembler::movb(const Operand& dst, Register src) {
- EnsureSpace ensure_space(this);
- if (!src.is_byte_register()) {
- emit_rex_32(src, dst);
- } else {
- emit_optional_rex_32(src, dst);
- }
- emit(0x88);
- emit_operand(src, dst);
-}
-
-
-void Assembler::movw(const Operand& dst, Register src) {
- EnsureSpace ensure_space(this);
- emit(0x66);
- emit_optional_rex_32(src, dst);
- emit(0x89);
- emit_operand(src, dst);
-}
-
-
-void Assembler::movl(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst, src);
- emit(0x8B);
- emit_operand(dst, src);
-}
-
-
-void Assembler::movl(Register dst, Register src) {
- EnsureSpace ensure_space(this);
- if (src.low_bits() == 4) {
- emit_optional_rex_32(src, dst);
- emit(0x89);
- emit_modrm(src, dst);
- } else {
- emit_optional_rex_32(dst, src);
- emit(0x8B);
- emit_modrm(dst, src);
- }
-}
-
-
-void Assembler::movl(const Operand& dst, Register src) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(src, dst);
- emit(0x89);
- emit_operand(src, dst);
-}
-
-
-void Assembler::movl(const Operand& dst, Immediate value) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst);
- emit(0xC7);
- emit_operand(0x0, dst);
- emit(value);
-}
-
-
-void Assembler::movl(Register dst, Immediate value) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst);
- emit(0xB8 + dst.low_bits());
- emit(value);
-}
-
-
-void Assembler::movq(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- emit_rex_64(dst, src);
- emit(0x8B);
- emit_operand(dst, src);
-}
-
-
-void Assembler::movq(Register dst, Register src) {
- EnsureSpace ensure_space(this);
- if (src.low_bits() == 4) {
- emit_rex_64(src, dst);
- emit(0x89);
- emit_modrm(src, dst);
- } else {
- emit_rex_64(dst, src);
- emit(0x8B);
- emit_modrm(dst, src);
- }
-}
-
-
-void Assembler::movq(Register dst, Immediate value) {
- EnsureSpace ensure_space(this);
- emit_rex_64(dst);
- emit(0xC7);
- emit_modrm(0x0, dst);
- emit(value); // Only 32-bit immediates are possible, not 8-bit immediates.
-}
-
-
-void Assembler::movq(const Operand& dst, Register src) {
- EnsureSpace ensure_space(this);
- emit_rex_64(src, dst);
- emit(0x89);
- emit_operand(src, dst);
-}
-
-
-void Assembler::movq(Register dst, void* value, RelocInfo::Mode rmode) {
- // This method must not be used with heap object references. The stored
- // address is not GC safe. Use the handle version instead.
- ASSERT(rmode > RelocInfo::LAST_GCED_ENUM);
- EnsureSpace ensure_space(this);
- emit_rex_64(dst);
- emit(0xB8 | dst.low_bits());
- emitq(reinterpret_cast<uintptr_t>(value), rmode);
-}
-
-
-void Assembler::movq(Register dst, int64_t value, RelocInfo::Mode rmode) {
- // Non-relocatable values might not need a 64-bit representation.
- if (RelocInfo::IsNone(rmode)) {
- if (is_uint32(value)) {
- movl(dst, Immediate(static_cast<int32_t>(value)));
- return;
- } else if (is_int32(value)) {
- movq(dst, Immediate(static_cast<int32_t>(value)));
- return;
- }
- // Value cannot be represented by 32 bits, so do a full 64 bit immediate
- // value.
- }
- EnsureSpace ensure_space(this);
- emit_rex_64(dst);
- emit(0xB8 | dst.low_bits());
- emitq(value, rmode);
-}
-
-
-void Assembler::movq(Register dst, ExternalReference ref) {
- int64_t value = reinterpret_cast<int64_t>(ref.address());
- movq(dst, value, RelocInfo::EXTERNAL_REFERENCE);
-}
-
-
-void Assembler::movq(const Operand& dst, Immediate value) {
- EnsureSpace ensure_space(this);
- emit_rex_64(dst);
- emit(0xC7);
- emit_operand(0, dst);
- emit(value);
-}
-
-
-// Loads the ip-relative location of the src label into the target location
-// (as a 32-bit offset sign extended to 64-bit).
-void Assembler::movl(const Operand& dst, Label* src) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst);
- emit(0xC7);
- emit_operand(0, dst);
- if (src->is_bound()) {
- int offset = src->pos() - pc_offset() - sizeof(int32_t);
- ASSERT(offset <= 0);
- emitl(offset);
- } else if (src->is_linked()) {
- emitl(src->pos());
- src->link_to(pc_offset() - sizeof(int32_t));
- } else {
- ASSERT(src->is_unused());
- int32_t current = pc_offset();
- emitl(current);
- src->link_to(current);
- }
-}
-
-
-void Assembler::movq(Register dst, Handle<Object> value, RelocInfo::Mode mode) {
- // If there is no relocation info, emit the value of the handle efficiently
- // (possibly using less that 8 bytes for the value).
- if (RelocInfo::IsNone(mode)) {
- // There is no possible reason to store a heap pointer without relocation
- // info, so it must be a smi.
- ASSERT(value->IsSmi());
- movq(dst, reinterpret_cast<int64_t>(*value), RelocInfo::NONE64);
- } else {
- EnsureSpace ensure_space(this);
- ASSERT(value->IsHeapObject());
- ASSERT(!HEAP->InNewSpace(*value));
- emit_rex_64(dst);
- emit(0xB8 | dst.low_bits());
- emitq(reinterpret_cast<uintptr_t>(value.location()), mode);
- }
-}
-
-
-void Assembler::movsxbq(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- emit_rex_64(dst, src);
- emit(0x0F);
- emit(0xBE);
- emit_operand(dst, src);
-}
-
-
-void Assembler::movsxwq(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- emit_rex_64(dst, src);
- emit(0x0F);
- emit(0xBF);
- emit_operand(dst, src);
-}
-
-
-void Assembler::movsxlq(Register dst, Register src) {
- EnsureSpace ensure_space(this);
- emit_rex_64(dst, src);
- emit(0x63);
- emit_modrm(dst, src);
-}
-
-
-void Assembler::movsxlq(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- emit_rex_64(dst, src);
- emit(0x63);
- emit_operand(dst, src);
-}
-
-
-void Assembler::movzxbq(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- // 32 bit operations zero the top 32 bits of 64 bit registers. Therefore
- // there is no need to make this a 64 bit operation.
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0xB6);
- emit_operand(dst, src);
-}
-
-
-void Assembler::movzxbl(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0xB6);
- emit_operand(dst, src);
-}
-
-
-void Assembler::movzxwq(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0xB7);
- emit_operand(dst, src);
-}
-
-
-void Assembler::movzxwl(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0xB7);
- emit_operand(dst, src);
-}
-
-
-void Assembler::movzxwl(Register dst, Register src) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0xB7);
- emit_modrm(dst, src);
-}
-
-
-void Assembler::repmovsb() {
- EnsureSpace ensure_space(this);
- emit(0xF3);
- emit(0xA4);
-}
-
-
-void Assembler::repmovsw() {
- EnsureSpace ensure_space(this);
- emit(0x66); // Operand size override.
- emit(0xF3);
- emit(0xA4);
-}
-
-
-void Assembler::repmovsl() {
- EnsureSpace ensure_space(this);
- emit(0xF3);
- emit(0xA5);
-}
-
-
-void Assembler::repmovsq() {
- EnsureSpace ensure_space(this);
- emit(0xF3);
- emit_rex_64();
- emit(0xA5);
-}
-
-
-void Assembler::mul(Register src) {
- EnsureSpace ensure_space(this);
- emit_rex_64(src);
- emit(0xF7);
- emit_modrm(0x4, src);
-}
-
-
-void Assembler::neg(Register dst) {
- EnsureSpace ensure_space(this);
- emit_rex_64(dst);
- emit(0xF7);
- emit_modrm(0x3, dst);
-}
-
-
-void Assembler::negl(Register dst) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst);
- emit(0xF7);
- emit_modrm(0x3, dst);
-}
-
-
-void Assembler::neg(const Operand& dst) {
- EnsureSpace ensure_space(this);
- emit_rex_64(dst);
- emit(0xF7);
- emit_operand(3, dst);
-}
-
-
-void Assembler::nop() {
- EnsureSpace ensure_space(this);
- emit(0x90);
-}
-
-
-void Assembler::not_(Register dst) {
- EnsureSpace ensure_space(this);
- emit_rex_64(dst);
- emit(0xF7);
- emit_modrm(0x2, dst);
-}
-
-
-void Assembler::not_(const Operand& dst) {
- EnsureSpace ensure_space(this);
- emit_rex_64(dst);
- emit(0xF7);
- emit_operand(2, dst);
-}
-
-
-void Assembler::notl(Register dst) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst);
- emit(0xF7);
- emit_modrm(0x2, dst);
-}
-
-
-void Assembler::Nop(int n) {
- // The recommended muti-byte sequences of NOP instructions from the Intel 64
- // and IA-32 Architectures Software Developer's Manual.
- //
- // Length Assembly Byte Sequence
- // 2 bytes 66 NOP 66 90H
- // 3 bytes NOP DWORD ptr [EAX] 0F 1F 00H
- // 4 bytes NOP DWORD ptr [EAX + 00H] 0F 1F 40 00H
- // 5 bytes NOP DWORD ptr [EAX + EAX*1 + 00H] 0F 1F 44 00 00H
- // 6 bytes 66 NOP DWORD ptr [EAX + EAX*1 + 00H] 66 0F 1F 44 00 00H
- // 7 bytes NOP DWORD ptr [EAX + 00000000H] 0F 1F 80 00 00 00 00H
- // 8 bytes NOP DWORD ptr [EAX + EAX*1 + 00000000H] 0F 1F 84 00 00 00 00 00H
- // 9 bytes 66 NOP DWORD ptr [EAX + EAX*1 + 66 0F 1F 84 00 00 00 00
- // 00000000H] 00H
-
- EnsureSpace ensure_space(this);
- while (n > 0) {
- switch (n) {
- case 2:
- emit(0x66);
- case 1:
- emit(0x90);
- return;
- case 3:
- emit(0x0f);
- emit(0x1f);
- emit(0x00);
- return;
- case 4:
- emit(0x0f);
- emit(0x1f);
- emit(0x40);
- emit(0x00);
- return;
- case 6:
- emit(0x66);
- case 5:
- emit(0x0f);
- emit(0x1f);
- emit(0x44);
- emit(0x00);
- emit(0x00);
- return;
- case 7:
- emit(0x0f);
- emit(0x1f);
- emit(0x80);
- emit(0x00);
- emit(0x00);
- emit(0x00);
- emit(0x00);
- return;
- default:
- case 11:
- emit(0x66);
- n--;
- case 10:
- emit(0x66);
- n--;
- case 9:
- emit(0x66);
- n--;
- case 8:
- emit(0x0f);
- emit(0x1f);
- emit(0x84);
- emit(0x00);
- emit(0x00);
- emit(0x00);
- emit(0x00);
- emit(0x00);
- n -= 8;
- }
- }
-}
-
-
-void Assembler::pop(Register dst) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst);
- emit(0x58 | dst.low_bits());
-}
-
-
-void Assembler::pop(const Operand& dst) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst);
- emit(0x8F);
- emit_operand(0, dst);
-}
-
-
-void Assembler::popfq() {
- EnsureSpace ensure_space(this);
- emit(0x9D);
-}
-
-
-void Assembler::push(Register src) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(src);
- emit(0x50 | src.low_bits());
-}
-
-
-void Assembler::push(const Operand& src) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(src);
- emit(0xFF);
- emit_operand(6, src);
-}
-
-
-void Assembler::push(Immediate value) {
- EnsureSpace ensure_space(this);
- if (is_int8(value.value_)) {
- emit(0x6A);
- emit(value.value_); // Emit low byte of value.
- } else {
- emit(0x68);
- emitl(value.value_);
- }
-}
-
-
-void Assembler::push_imm32(int32_t imm32) {
- EnsureSpace ensure_space(this);
- emit(0x68);
- emitl(imm32);
-}
-
-
-void Assembler::pushfq() {
- EnsureSpace ensure_space(this);
- emit(0x9C);
-}
-
-
-void Assembler::rdtsc() {
- EnsureSpace ensure_space(this);
- emit(0x0F);
- emit(0x31);
-}
-
-
-void Assembler::ret(int imm16) {
- EnsureSpace ensure_space(this);
- ASSERT(is_uint16(imm16));
- if (imm16 == 0) {
- emit(0xC3);
- } else {
- emit(0xC2);
- emit(imm16 & 0xFF);
- emit((imm16 >> 8) & 0xFF);
- }
-}
-
-
-void Assembler::setcc(Condition cc, Register reg) {
- if (cc > last_condition) {
- movb(reg, Immediate(cc == always ? 1 : 0));
- return;
- }
- EnsureSpace ensure_space(this);
- ASSERT(is_uint4(cc));
- if (!reg.is_byte_register()) { // Use x64 byte registers, where different.
- emit_rex_32(reg);
- }
- emit(0x0F);
- emit(0x90 | cc);
- emit_modrm(0x0, reg);
-}
-
-
-void Assembler::shld(Register dst, Register src) {
- EnsureSpace ensure_space(this);
- emit_rex_64(src, dst);
- emit(0x0F);
- emit(0xA5);
- emit_modrm(src, dst);
-}
-
-
-void Assembler::shrd(Register dst, Register src) {
- EnsureSpace ensure_space(this);
- emit_rex_64(src, dst);
- emit(0x0F);
- emit(0xAD);
- emit_modrm(src, dst);
-}
-
-
-void Assembler::xchg(Register dst, Register src) {
- EnsureSpace ensure_space(this);
- if (src.is(rax) || dst.is(rax)) { // Single-byte encoding
- Register other = src.is(rax) ? dst : src;
- emit_rex_64(other);
- emit(0x90 | other.low_bits());
- } else if (dst.low_bits() == 4) {
- emit_rex_64(dst, src);
- emit(0x87);
- emit_modrm(dst, src);
- } else {
- emit_rex_64(src, dst);
- emit(0x87);
- emit_modrm(src, dst);
- }
-}
-
-
-void Assembler::store_rax(void* dst, RelocInfo::Mode mode) {
- EnsureSpace ensure_space(this);
- emit(0x48); // REX.W
- emit(0xA3);
- emitq(reinterpret_cast<uintptr_t>(dst), mode);
-}
-
-
-void Assembler::store_rax(ExternalReference ref) {
- store_rax(ref.address(), RelocInfo::EXTERNAL_REFERENCE);
-}
-
-
-void Assembler::testb(Register dst, Register src) {
- EnsureSpace ensure_space(this);
- if (src.low_bits() == 4) {
- emit_rex_32(src, dst);
- emit(0x84);
- emit_modrm(src, dst);
- } else {
- if (!dst.is_byte_register() || !src.is_byte_register()) {
- // Register is not one of al, bl, cl, dl. Its encoding needs REX.
- emit_rex_32(dst, src);
- }
- emit(0x84);
- emit_modrm(dst, src);
- }
-}
-
-
-void Assembler::testb(Register reg, Immediate mask) {
- ASSERT(is_int8(mask.value_) || is_uint8(mask.value_));
- EnsureSpace ensure_space(this);
- if (reg.is(rax)) {
- emit(0xA8);
- emit(mask.value_); // Low byte emitted.
- } else {
- if (!reg.is_byte_register()) {
- // Register is not one of al, bl, cl, dl. Its encoding needs REX.
- emit_rex_32(reg);
- }
- emit(0xF6);
- emit_modrm(0x0, reg);
- emit(mask.value_); // Low byte emitted.
- }
-}
-
-
-void Assembler::testb(const Operand& op, Immediate mask) {
- ASSERT(is_int8(mask.value_) || is_uint8(mask.value_));
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(rax, op);
- emit(0xF6);
- emit_operand(rax, op); // Operation code 0
- emit(mask.value_); // Low byte emitted.
-}
-
-
-void Assembler::testb(const Operand& op, Register reg) {
- EnsureSpace ensure_space(this);
- if (!reg.is_byte_register()) {
- // Register is not one of al, bl, cl, dl. Its encoding needs REX.
- emit_rex_32(reg, op);
- } else {
- emit_optional_rex_32(reg, op);
- }
- emit(0x84);
- emit_operand(reg, op);
-}
-
-
-void Assembler::testl(Register dst, Register src) {
- EnsureSpace ensure_space(this);
- if (src.low_bits() == 4) {
- emit_optional_rex_32(src, dst);
- emit(0x85);
- emit_modrm(src, dst);
- } else {
- emit_optional_rex_32(dst, src);
- emit(0x85);
- emit_modrm(dst, src);
- }
-}
-
-
-void Assembler::testl(Register reg, Immediate mask) {
- // testl with a mask that fits in the low byte is exactly testb.
- if (is_uint8(mask.value_)) {
- testb(reg, mask);
- return;
- }
- EnsureSpace ensure_space(this);
- if (reg.is(rax)) {
- emit(0xA9);
- emit(mask);
- } else {
- emit_optional_rex_32(rax, reg);
- emit(0xF7);
- emit_modrm(0x0, reg);
- emit(mask);
- }
-}
-
-
-void Assembler::testl(const Operand& op, Immediate mask) {
- // testl with a mask that fits in the low byte is exactly testb.
- if (is_uint8(mask.value_)) {
- testb(op, mask);
- return;
- }
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(rax, op);
- emit(0xF7);
- emit_operand(rax, op); // Operation code 0
- emit(mask);
-}
-
-
-void Assembler::testq(const Operand& op, Register reg) {
- EnsureSpace ensure_space(this);
- emit_rex_64(reg, op);
- emit(0x85);
- emit_operand(reg, op);
-}
-
-
-void Assembler::testq(Register dst, Register src) {
- EnsureSpace ensure_space(this);
- if (src.low_bits() == 4) {
- emit_rex_64(src, dst);
- emit(0x85);
- emit_modrm(src, dst);
- } else {
- emit_rex_64(dst, src);
- emit(0x85);
- emit_modrm(dst, src);
- }
-}
-
-
-void Assembler::testq(Register dst, Immediate mask) {
- EnsureSpace ensure_space(this);
- if (dst.is(rax)) {
- emit_rex_64();
- emit(0xA9);
- emit(mask);
- } else {
- emit_rex_64(dst);
- emit(0xF7);
- emit_modrm(0, dst);
- emit(mask);
- }
-}
-
-
-// FPU instructions.
-
-
-void Assembler::fld(int i) {
- EnsureSpace ensure_space(this);
- emit_farith(0xD9, 0xC0, i);
-}
-
-
-void Assembler::fld1() {
- EnsureSpace ensure_space(this);
- emit(0xD9);
- emit(0xE8);
-}
-
-
-void Assembler::fldz() {
- EnsureSpace ensure_space(this);
- emit(0xD9);
- emit(0xEE);
-}
-
-
-void Assembler::fldpi() {
- EnsureSpace ensure_space(this);
- emit(0xD9);
- emit(0xEB);
-}
-
-
-void Assembler::fldln2() {
- EnsureSpace ensure_space(this);
- emit(0xD9);
- emit(0xED);
-}
-
-
-void Assembler::fld_s(const Operand& adr) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(adr);
- emit(0xD9);
- emit_operand(0, adr);
-}
-
-
-void Assembler::fld_d(const Operand& adr) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(adr);
- emit(0xDD);
- emit_operand(0, adr);
-}
-
-
-void Assembler::fstp_s(const Operand& adr) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(adr);
- emit(0xD9);
- emit_operand(3, adr);
-}
-
-
-void Assembler::fstp_d(const Operand& adr) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(adr);
- emit(0xDD);
- emit_operand(3, adr);
-}
-
-
-void Assembler::fstp(int index) {
- ASSERT(is_uint3(index));
- EnsureSpace ensure_space(this);
- emit_farith(0xDD, 0xD8, index);
-}
-
-
-void Assembler::fild_s(const Operand& adr) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(adr);
- emit(0xDB);
- emit_operand(0, adr);
-}
-
-
-void Assembler::fild_d(const Operand& adr) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(adr);
- emit(0xDF);
- emit_operand(5, adr);
-}
-
-
-void Assembler::fistp_s(const Operand& adr) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(adr);
- emit(0xDB);
- emit_operand(3, adr);
-}
-
-
-void Assembler::fisttp_s(const Operand& adr) {
- ASSERT(CpuFeatures::IsEnabled(SSE3));
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(adr);
- emit(0xDB);
- emit_operand(1, adr);
-}
-
-
-void Assembler::fisttp_d(const Operand& adr) {
- ASSERT(CpuFeatures::IsEnabled(SSE3));
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(adr);
- emit(0xDD);
- emit_operand(1, adr);
-}
-
-
-void Assembler::fist_s(const Operand& adr) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(adr);
- emit(0xDB);
- emit_operand(2, adr);
-}
-
-
-void Assembler::fistp_d(const Operand& adr) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(adr);
- emit(0xDF);
- emit_operand(7, adr);
-}
-
-
-void Assembler::fabs() {
- EnsureSpace ensure_space(this);
- emit(0xD9);
- emit(0xE1);
-}
-
-
-void Assembler::fchs() {
- EnsureSpace ensure_space(this);
- emit(0xD9);
- emit(0xE0);
-}
-
-
-void Assembler::fcos() {
- EnsureSpace ensure_space(this);
- emit(0xD9);
- emit(0xFF);
-}
-
-
-void Assembler::fsin() {
- EnsureSpace ensure_space(this);
- emit(0xD9);
- emit(0xFE);
-}
-
-
-void Assembler::fptan() {
- EnsureSpace ensure_space(this);
- emit(0xD9);
- emit(0xF2);
-}
-
-
-void Assembler::fyl2x() {
- EnsureSpace ensure_space(this);
- emit(0xD9);
- emit(0xF1);
-}
-
-
-void Assembler::f2xm1() {
- EnsureSpace ensure_space(this);
- emit(0xD9);
- emit(0xF0);
-}
-
-
-void Assembler::fscale() {
- EnsureSpace ensure_space(this);
- emit(0xD9);
- emit(0xFD);
-}
-
-
-void Assembler::fninit() {
- EnsureSpace ensure_space(this);
- emit(0xDB);
- emit(0xE3);
-}
-
-
-void Assembler::fadd(int i) {
- EnsureSpace ensure_space(this);
- emit_farith(0xDC, 0xC0, i);
-}
-
-
-void Assembler::fsub(int i) {
- EnsureSpace ensure_space(this);
- emit_farith(0xDC, 0xE8, i);
-}
-
-
-void Assembler::fisub_s(const Operand& adr) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(adr);
- emit(0xDA);
- emit_operand(4, adr);
-}
-
-
-void Assembler::fmul(int i) {
- EnsureSpace ensure_space(this);
- emit_farith(0xDC, 0xC8, i);
-}
-
-
-void Assembler::fdiv(int i) {
- EnsureSpace ensure_space(this);
- emit_farith(0xDC, 0xF8, i);
-}
-
-
-void Assembler::faddp(int i) {
- EnsureSpace ensure_space(this);
- emit_farith(0xDE, 0xC0, i);
-}
-
-
-void Assembler::fsubp(int i) {
- EnsureSpace ensure_space(this);
- emit_farith(0xDE, 0xE8, i);
-}
-
-
-void Assembler::fsubrp(int i) {
- EnsureSpace ensure_space(this);
- emit_farith(0xDE, 0xE0, i);
-}
-
-
-void Assembler::fmulp(int i) {
- EnsureSpace ensure_space(this);
- emit_farith(0xDE, 0xC8, i);
-}
-
-
-void Assembler::fdivp(int i) {
- EnsureSpace ensure_space(this);
- emit_farith(0xDE, 0xF8, i);
-}
-
-
-void Assembler::fprem() {
- EnsureSpace ensure_space(this);
- emit(0xD9);
- emit(0xF8);
-}
-
-
-void Assembler::fprem1() {
- EnsureSpace ensure_space(this);
- emit(0xD9);
- emit(0xF5);
-}
-
-
-void Assembler::fxch(int i) {
- EnsureSpace ensure_space(this);
- emit_farith(0xD9, 0xC8, i);
-}
-
-
-void Assembler::fincstp() {
- EnsureSpace ensure_space(this);
- emit(0xD9);
- emit(0xF7);
-}
-
-
-void Assembler::ffree(int i) {
- EnsureSpace ensure_space(this);
- emit_farith(0xDD, 0xC0, i);
-}
-
-
-void Assembler::ftst() {
- EnsureSpace ensure_space(this);
- emit(0xD9);
- emit(0xE4);
-}
-
-
-void Assembler::fucomp(int i) {
- EnsureSpace ensure_space(this);
- emit_farith(0xDD, 0xE8, i);
-}
-
-
-void Assembler::fucompp() {
- EnsureSpace ensure_space(this);
- emit(0xDA);
- emit(0xE9);
-}
-
-
-void Assembler::fucomi(int i) {
- EnsureSpace ensure_space(this);
- emit(0xDB);
- emit(0xE8 + i);
-}
-
-
-void Assembler::fucomip() {
- EnsureSpace ensure_space(this);
- emit(0xDF);
- emit(0xE9);
-}
-
-
-void Assembler::fcompp() {
- EnsureSpace ensure_space(this);
- emit(0xDE);
- emit(0xD9);
-}
-
-
-void Assembler::fnstsw_ax() {
- EnsureSpace ensure_space(this);
- emit(0xDF);
- emit(0xE0);
-}
-
-
-void Assembler::fwait() {
- EnsureSpace ensure_space(this);
- emit(0x9B);
-}
-
-
-void Assembler::frndint() {
- EnsureSpace ensure_space(this);
- emit(0xD9);
- emit(0xFC);
-}
-
-
-void Assembler::fnclex() {
- EnsureSpace ensure_space(this);
- emit(0xDB);
- emit(0xE2);
-}
-
-
-void Assembler::sahf() {
- // TODO(X64): Test for presence. Not all 64-bit intel CPU's have sahf
- // in 64-bit mode. Test CpuID.
- EnsureSpace ensure_space(this);
- emit(0x9E);
-}
-
-
-void Assembler::emit_farith(int b1, int b2, int i) {
- ASSERT(is_uint8(b1) && is_uint8(b2)); // wrong opcode
- ASSERT(is_uint3(i)); // illegal stack offset
- emit(b1);
- emit(b2 + i);
-}
-
-// SSE 2 operations.
-
-void Assembler::movd(XMMRegister dst, Register src) {
- EnsureSpace ensure_space(this);
- emit(0x66);
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0x6E);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::movd(Register dst, XMMRegister src) {
- EnsureSpace ensure_space(this);
- emit(0x66);
- emit_optional_rex_32(src, dst);
- emit(0x0F);
- emit(0x7E);
- emit_sse_operand(src, dst);
-}
-
-
-void Assembler::movq(XMMRegister dst, Register src) {
- EnsureSpace ensure_space(this);
- emit(0x66);
- emit_rex_64(dst, src);
- emit(0x0F);
- emit(0x6E);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::movq(Register dst, XMMRegister src) {
- EnsureSpace ensure_space(this);
- emit(0x66);
- emit_rex_64(src, dst);
- emit(0x0F);
- emit(0x7E);
- emit_sse_operand(src, dst);
-}
-
-
-void Assembler::movq(XMMRegister dst, XMMRegister src) {
- EnsureSpace ensure_space(this);
- if (dst.low_bits() == 4) {
- // Avoid unnecessary SIB byte.
- emit(0xf3);
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0x7e);
- emit_sse_operand(dst, src);
- } else {
- emit(0x66);
- emit_optional_rex_32(src, dst);
- emit(0x0F);
- emit(0xD6);
- emit_sse_operand(src, dst);
- }
-}
-
-void Assembler::movdqa(const Operand& dst, XMMRegister src) {
- EnsureSpace ensure_space(this);
- emit(0x66);
- emit_rex_64(src, dst);
- emit(0x0F);
- emit(0x7F);
- emit_sse_operand(src, dst);
-}
-
-
-void Assembler::movdqa(XMMRegister dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- emit(0x66);
- emit_rex_64(dst, src);
- emit(0x0F);
- emit(0x6F);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::extractps(Register dst, XMMRegister src, byte imm8) {
- ASSERT(CpuFeatures::IsSupported(SSE4_1));
- ASSERT(is_uint8(imm8));
- EnsureSpace ensure_space(this);
- emit(0x66);
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0x3A);
- emit(0x17);
- emit_sse_operand(dst, src);
- emit(imm8);
-}
-
-
-void Assembler::movsd(const Operand& dst, XMMRegister src) {
- EnsureSpace ensure_space(this);
- emit(0xF2); // double
- emit_optional_rex_32(src, dst);
- emit(0x0F);
- emit(0x11); // store
- emit_sse_operand(src, dst);
-}
-
-
-void Assembler::movsd(XMMRegister dst, XMMRegister src) {
- EnsureSpace ensure_space(this);
- emit(0xF2); // double
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0x10); // load
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::movsd(XMMRegister dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- emit(0xF2); // double
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0x10); // load
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::movaps(XMMRegister dst, XMMRegister src) {
- EnsureSpace ensure_space(this);
- if (src.low_bits() == 4) {
- // Try to avoid an unnecessary SIB byte.
- emit_optional_rex_32(src, dst);
- emit(0x0F);
- emit(0x29);
- emit_sse_operand(src, dst);
- } else {
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0x28);
- emit_sse_operand(dst, src);
- }
-}
-
-
-void Assembler::movapd(XMMRegister dst, XMMRegister src) {
- EnsureSpace ensure_space(this);
- if (src.low_bits() == 4) {
- // Try to avoid an unnecessary SIB byte.
- emit(0x66);
- emit_optional_rex_32(src, dst);
- emit(0x0F);
- emit(0x29);
- emit_sse_operand(src, dst);
- } else {
- emit(0x66);
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0x28);
- emit_sse_operand(dst, src);
- }
-}
-
-
-void Assembler::movss(XMMRegister dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- emit(0xF3); // single
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0x10); // load
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::movss(const Operand& src, XMMRegister dst) {
- EnsureSpace ensure_space(this);
- emit(0xF3); // single
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0x11); // store
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::cvttss2si(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- emit(0xF3);
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0x2C);
- emit_operand(dst, src);
-}
-
-
-void Assembler::cvttss2si(Register dst, XMMRegister src) {
- EnsureSpace ensure_space(this);
- emit(0xF3);
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0x2C);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::cvttsd2si(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- emit(0xF2);
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0x2C);
- emit_operand(dst, src);
-}
-
-
-void Assembler::cvttsd2si(Register dst, XMMRegister src) {
- EnsureSpace ensure_space(this);
- emit(0xF2);
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0x2C);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::cvttsd2siq(Register dst, XMMRegister src) {
- EnsureSpace ensure_space(this);
- emit(0xF2);
- emit_rex_64(dst, src);
- emit(0x0F);
- emit(0x2C);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::cvtlsi2sd(XMMRegister dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- emit(0xF2);
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0x2A);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::cvtlsi2sd(XMMRegister dst, Register src) {
- EnsureSpace ensure_space(this);
- emit(0xF2);
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0x2A);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::cvtlsi2ss(XMMRegister dst, Register src) {
- EnsureSpace ensure_space(this);
- emit(0xF3);
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0x2A);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::cvtqsi2sd(XMMRegister dst, Register src) {
- EnsureSpace ensure_space(this);
- emit(0xF2);
- emit_rex_64(dst, src);
- emit(0x0F);
- emit(0x2A);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::cvtss2sd(XMMRegister dst, XMMRegister src) {
- EnsureSpace ensure_space(this);
- emit(0xF3);
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0x5A);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::cvtss2sd(XMMRegister dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- emit(0xF3);
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0x5A);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::cvtsd2ss(XMMRegister dst, XMMRegister src) {
- EnsureSpace ensure_space(this);
- emit(0xF2);
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0x5A);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::cvtsd2si(Register dst, XMMRegister src) {
- EnsureSpace ensure_space(this);
- emit(0xF2);
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0x2D);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::cvtsd2siq(Register dst, XMMRegister src) {
- EnsureSpace ensure_space(this);
- emit(0xF2);
- emit_rex_64(dst, src);
- emit(0x0F);
- emit(0x2D);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::addsd(XMMRegister dst, XMMRegister src) {
- EnsureSpace ensure_space(this);
- emit(0xF2);
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0x58);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::addsd(XMMRegister dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- emit(0xF2);
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0x58);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::mulsd(XMMRegister dst, XMMRegister src) {
- EnsureSpace ensure_space(this);
- emit(0xF2);
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0x59);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::mulsd(XMMRegister dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- emit(0xF2);
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0x59);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::subsd(XMMRegister dst, XMMRegister src) {
- EnsureSpace ensure_space(this);
- emit(0xF2);
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0x5C);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::divsd(XMMRegister dst, XMMRegister src) {
- EnsureSpace ensure_space(this);
- emit(0xF2);
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0x5E);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::andpd(XMMRegister dst, XMMRegister src) {
- EnsureSpace ensure_space(this);
- emit(0x66);
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0x54);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::orpd(XMMRegister dst, XMMRegister src) {
- EnsureSpace ensure_space(this);
- emit(0x66);
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0x56);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::xorpd(XMMRegister dst, XMMRegister src) {
- EnsureSpace ensure_space(this);
- emit(0x66);
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0x57);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::xorps(XMMRegister dst, XMMRegister src) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0x57);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::sqrtsd(XMMRegister dst, XMMRegister src) {
- EnsureSpace ensure_space(this);
- emit(0xF2);
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0x51);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::ucomisd(XMMRegister dst, XMMRegister src) {
- EnsureSpace ensure_space(this);
- emit(0x66);
- emit_optional_rex_32(dst, src);
- emit(0x0f);
- emit(0x2e);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::ucomisd(XMMRegister dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- emit(0x66);
- emit_optional_rex_32(dst, src);
- emit(0x0f);
- emit(0x2e);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::roundsd(XMMRegister dst, XMMRegister src,
- Assembler::RoundingMode mode) {
- ASSERT(CpuFeatures::IsEnabled(SSE4_1));
- EnsureSpace ensure_space(this);
- emit(0x66);
- emit_optional_rex_32(dst, src);
- emit(0x0f);
- emit(0x3a);
- emit(0x0b);
- emit_sse_operand(dst, src);
- // Mask precision exeption.
- emit(static_cast<byte>(mode) | 0x8);
-}
-
-
-void Assembler::movmskpd(Register dst, XMMRegister src) {
- EnsureSpace ensure_space(this);
- emit(0x66);
- emit_optional_rex_32(dst, src);
- emit(0x0f);
- emit(0x50);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::movmskps(Register dst, XMMRegister src) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst, src);
- emit(0x0f);
- emit(0x50);
- emit_sse_operand(dst, src);
-}
-
-
-void Assembler::emit_sse_operand(XMMRegister reg, const Operand& adr) {
- Register ireg = { reg.code() };
- emit_operand(ireg, adr);
-}
-
-
-void Assembler::emit_sse_operand(XMMRegister dst, XMMRegister src) {
- emit(0xC0 | (dst.low_bits() << 3) | src.low_bits());
-}
-
-void Assembler::emit_sse_operand(XMMRegister dst, Register src) {
- emit(0xC0 | (dst.low_bits() << 3) | src.low_bits());
-}
-
-void Assembler::emit_sse_operand(Register dst, XMMRegister src) {
- emit(0xC0 | (dst.low_bits() << 3) | src.low_bits());
-}
-
-
-void Assembler::db(uint8_t data) {
- EnsureSpace ensure_space(this);
- emit(data);
-}
-
-
-void Assembler::dd(uint32_t data) {
- EnsureSpace ensure_space(this);
- emitl(data);
-}
-
-
-// Relocation information implementations.
-
-void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
- ASSERT(!RelocInfo::IsNone(rmode));
- // Don't record external references unless the heap will be serialized.
- if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
-#ifdef DEBUG
- if (!Serializer::enabled()) {
- Serializer::TooLateToEnableNow();
- }
-#endif
- if (!Serializer::enabled() && !emit_debug_code()) {
- return;
- }
- }
- RelocInfo rinfo(pc_, rmode, data, NULL);
- reloc_info_writer.Write(&rinfo);
-}
-
-void Assembler::RecordJSReturn() {
- positions_recorder()->WriteRecordedPositions();
- EnsureSpace ensure_space(this);
- RecordRelocInfo(RelocInfo::JS_RETURN);
-}
-
-
-void Assembler::RecordDebugBreakSlot() {
- positions_recorder()->WriteRecordedPositions();
- EnsureSpace ensure_space(this);
- RecordRelocInfo(RelocInfo::DEBUG_BREAK_SLOT);
-}
-
-
-void Assembler::RecordComment(const char* msg, bool force) {
- if (FLAG_code_comments || force) {
- EnsureSpace ensure_space(this);
- RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
- }
-}
-
-
-const int RelocInfo::kApplyMask = RelocInfo::kCodeTargetMask |
- 1 << RelocInfo::INTERNAL_REFERENCE |
- 1 << RelocInfo::CODE_AGE_SEQUENCE;
-
-
-bool RelocInfo::IsCodedSpecially() {
- // The deserializer needs to know whether a pointer is specially coded. Being
- // specially coded on x64 means that it is a relative 32 bit address, as used
- // by branch instructions.
- return (1 << rmode_) & kApplyMask;
-}
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_X64
diff --git a/src/3rdparty/v8/src/x64/assembler-x64.h b/src/3rdparty/v8/src/x64/assembler-x64.h
deleted file mode 100644
index 69eeb8e..0000000
--- a/src/3rdparty/v8/src/x64/assembler-x64.h
+++ /dev/null
@@ -1,1678 +0,0 @@
-// Copyright (c) 1994-2006 Sun Microsystems Inc.
-// All Rights Reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// - Redistributions of source code must retain the above copyright notice,
-// this list of conditions and the following disclaimer.
-//
-// - Redistribution in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// - Neither the name of Sun Microsystems or the names of contributors may
-// be used to endorse or promote products derived from this software without
-// specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
-// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
-// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// The original source code covered by the above license above has been
-// modified significantly by Google Inc.
-// Copyright 2012 the V8 project authors. All rights reserved.
-
-// A lightweight X64 Assembler.
-
-#ifndef V8_X64_ASSEMBLER_X64_H_
-#define V8_X64_ASSEMBLER_X64_H_
-
-#include "serialize.h"
-
-namespace v8 {
-namespace internal {
-
-// Utility functions
-
-// Test whether a 64-bit value is in a specific range.
-inline bool is_uint32(int64_t x) {
- static const uint64_t kMaxUInt32 = V8_UINT64_C(0xffffffff);
- return static_cast<uint64_t>(x) <= kMaxUInt32;
-}
-
-inline bool is_int32(int64_t x) {
- static const int64_t kMinInt32 = -V8_INT64_C(0x80000000);
- return is_uint32(x - kMinInt32);
-}
-
-inline bool uint_is_int32(uint64_t x) {
- static const uint64_t kMaxInt32 = V8_UINT64_C(0x7fffffff);
- return x <= kMaxInt32;
-}
-
-inline bool is_uint32(uint64_t x) {
- static const uint64_t kMaxUInt32 = V8_UINT64_C(0xffffffff);
- return x <= kMaxUInt32;
-}
-
-// CPU Registers.
-//
-// 1) We would prefer to use an enum, but enum values are assignment-
-// compatible with int, which has caused code-generation bugs.
-//
-// 2) We would prefer to use a class instead of a struct but we don't like
-// the register initialization to depend on the particular initialization
-// order (which appears to be different on OS X, Linux, and Windows for the
-// installed versions of C++ we tried). Using a struct permits C-style
-// "initialization". Also, the Register objects cannot be const as this
-// forces initialization stubs in MSVC, making us dependent on initialization
-// order.
-//
-// 3) By not using an enum, we are possibly preventing the compiler from
-// doing certain constant folds, which may significantly reduce the
-// code generated for some assembly instructions (because they boil down
-// to a few constants). If this is a problem, we could change the code
-// such that we use an enum in optimized mode, and the struct in debug
-// mode. This way we get the compile-time error checking in debug mode
-// and best performance in optimized code.
-//
-
-struct Register {
- // The non-allocatable registers are:
- // rsp - stack pointer
- // rbp - frame pointer
- // rsi - context register
- // r10 - fixed scratch register
- // r12 - smi constant register
- // r13 - root register
- static const int kMaxNumAllocatableRegisters = 10;
- static int NumAllocatableRegisters() {
- return kMaxNumAllocatableRegisters;
- }
- static const int kNumRegisters = 16;
-
- static int ToAllocationIndex(Register reg) {
- return kAllocationIndexByRegisterCode[reg.code()];
- }
-
- static Register FromAllocationIndex(int index) {
- ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
- Register result = { kRegisterCodeByAllocationIndex[index] };
- return result;
- }
-
- static const char* AllocationIndexToString(int index) {
- ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
- const char* const names[] = {
- "rax",
- "rbx",
- "rdx",
- "rcx",
- "rdi",
- "r8",
- "r9",
- "r11",
- "r14",
- "r15"
- };
- return names[index];
- }
-
- static Register from_code(int code) {
- Register r = { code };
- return r;
- }
- bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; }
- bool is(Register reg) const { return code_ == reg.code_; }
- // rax, rbx, rcx and rdx are byte registers, the rest are not.
- bool is_byte_register() const { return code_ <= 3; }
- int code() const {
- ASSERT(is_valid());
- return code_;
- }
- int bit() const {
- return 1 << code_;
- }
-
- // Return the high bit of the register code as a 0 or 1. Used often
- // when constructing the REX prefix byte.
- int high_bit() const {
- return code_ >> 3;
- }
- // Return the 3 low bits of the register code. Used when encoding registers
- // in modR/M, SIB, and opcode bytes.
- int low_bits() const {
- return code_ & 0x7;
- }
-
- // Unfortunately we can't make this private in a struct when initializing
- // by assignment.
- int code_;
-
- private:
- static const int kRegisterCodeByAllocationIndex[kMaxNumAllocatableRegisters];
- static const int kAllocationIndexByRegisterCode[kNumRegisters];
-};
-
-const int kRegister_rax_Code = 0;
-const int kRegister_rcx_Code = 1;
-const int kRegister_rdx_Code = 2;
-const int kRegister_rbx_Code = 3;
-const int kRegister_rsp_Code = 4;
-const int kRegister_rbp_Code = 5;
-const int kRegister_rsi_Code = 6;
-const int kRegister_rdi_Code = 7;
-const int kRegister_r8_Code = 8;
-const int kRegister_r9_Code = 9;
-const int kRegister_r10_Code = 10;
-const int kRegister_r11_Code = 11;
-const int kRegister_r12_Code = 12;
-const int kRegister_r13_Code = 13;
-const int kRegister_r14_Code = 14;
-const int kRegister_r15_Code = 15;
-const int kRegister_no_reg_Code = -1;
-
-const Register rax = { kRegister_rax_Code };
-const Register rcx = { kRegister_rcx_Code };
-const Register rdx = { kRegister_rdx_Code };
-const Register rbx = { kRegister_rbx_Code };
-const Register rsp = { kRegister_rsp_Code };
-const Register rbp = { kRegister_rbp_Code };
-const Register rsi = { kRegister_rsi_Code };
-const Register rdi = { kRegister_rdi_Code };
-const Register r8 = { kRegister_r8_Code };
-const Register r9 = { kRegister_r9_Code };
-const Register r10 = { kRegister_r10_Code };
-const Register r11 = { kRegister_r11_Code };
-const Register r12 = { kRegister_r12_Code };
-const Register r13 = { kRegister_r13_Code };
-const Register r14 = { kRegister_r14_Code };
-const Register r15 = { kRegister_r15_Code };
-const Register no_reg = { kRegister_no_reg_Code };
-
-
-struct XMMRegister {
- static const int kMaxNumRegisters = 16;
- static const int kMaxNumAllocatableRegisters = 15;
- static int NumAllocatableRegisters() {
- return kMaxNumAllocatableRegisters;
- }
-
- static int ToAllocationIndex(XMMRegister reg) {
- ASSERT(reg.code() != 0);
- return reg.code() - 1;
- }
-
- static XMMRegister FromAllocationIndex(int index) {
- ASSERT(0 <= index && index < kMaxNumAllocatableRegisters);
- XMMRegister result = { index + 1 };
- return result;
- }
-
- static const char* AllocationIndexToString(int index) {
- ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
- const char* const names[] = {
- "xmm1",
- "xmm2",
- "xmm3",
- "xmm4",
- "xmm5",
- "xmm6",
- "xmm7",
- "xmm8",
- "xmm9",
- "xmm10",
- "xmm11",
- "xmm12",
- "xmm13",
- "xmm14",
- "xmm15"
- };
- return names[index];
- }
-
- static XMMRegister from_code(int code) {
- ASSERT(code >= 0);
- ASSERT(code < kMaxNumRegisters);
- XMMRegister r = { code };
- return r;
- }
- bool is_valid() const { return 0 <= code_ && code_ < kMaxNumRegisters; }
- bool is(XMMRegister reg) const { return code_ == reg.code_; }
- int code() const {
- ASSERT(is_valid());
- return code_;
- }
-
- // Return the high bit of the register code as a 0 or 1. Used often
- // when constructing the REX prefix byte.
- int high_bit() const {
- return code_ >> 3;
- }
- // Return the 3 low bits of the register code. Used when encoding registers
- // in modR/M, SIB, and opcode bytes.
- int low_bits() const {
- return code_ & 0x7;
- }
-
- int code_;
-};
-
-const XMMRegister xmm0 = { 0 };
-const XMMRegister xmm1 = { 1 };
-const XMMRegister xmm2 = { 2 };
-const XMMRegister xmm3 = { 3 };
-const XMMRegister xmm4 = { 4 };
-const XMMRegister xmm5 = { 5 };
-const XMMRegister xmm6 = { 6 };
-const XMMRegister xmm7 = { 7 };
-const XMMRegister xmm8 = { 8 };
-const XMMRegister xmm9 = { 9 };
-const XMMRegister xmm10 = { 10 };
-const XMMRegister xmm11 = { 11 };
-const XMMRegister xmm12 = { 12 };
-const XMMRegister xmm13 = { 13 };
-const XMMRegister xmm14 = { 14 };
-const XMMRegister xmm15 = { 15 };
-
-
-typedef XMMRegister DoubleRegister;
-
-
-enum Condition {
- // any value < 0 is considered no_condition
- no_condition = -1,
-
- overflow = 0,
- no_overflow = 1,
- below = 2,
- above_equal = 3,
- equal = 4,
- not_equal = 5,
- below_equal = 6,
- above = 7,
- negative = 8,
- positive = 9,
- parity_even = 10,
- parity_odd = 11,
- less = 12,
- greater_equal = 13,
- less_equal = 14,
- greater = 15,
-
- // Fake conditions that are handled by the
- // opcodes using them.
- always = 16,
- never = 17,
- // aliases
- carry = below,
- not_carry = above_equal,
- zero = equal,
- not_zero = not_equal,
- sign = negative,
- not_sign = positive,
- last_condition = greater
-};
-
-
-// Returns the equivalent of !cc.
-// Negation of the default no_condition (-1) results in a non-default
-// no_condition value (-2). As long as tests for no_condition check
-// for condition < 0, this will work as expected.
-inline Condition NegateCondition(Condition cc) {
- return static_cast<Condition>(cc ^ 1);
-}
-
-
-// Corresponds to transposing the operands of a comparison.
-inline Condition ReverseCondition(Condition cc) {
- switch (cc) {
- case below:
- return above;
- case above:
- return below;
- case above_equal:
- return below_equal;
- case below_equal:
- return above_equal;
- case less:
- return greater;
- case greater:
- return less;
- case greater_equal:
- return less_equal;
- case less_equal:
- return greater_equal;
- default:
- return cc;
- };
-}
-
-
-// -----------------------------------------------------------------------------
-// Machine instruction Immediates
-
-class Immediate BASE_EMBEDDED {
- public:
- explicit Immediate(int32_t value) : value_(value) {}
-
- private:
- int32_t value_;
-
- friend class Assembler;
-};
-
-
-// -----------------------------------------------------------------------------
-// Machine instruction Operands
-
-enum ScaleFactor {
- times_1 = 0,
- times_2 = 1,
- times_4 = 2,
- times_8 = 3,
- times_int_size = times_4,
- times_pointer_size = times_8
-};
-
-
-class Operand BASE_EMBEDDED {
- public:
- // [base + disp/r]
- Operand(Register base, int32_t disp);
-
- // [base + index*scale + disp/r]
- Operand(Register base,
- Register index,
- ScaleFactor scale,
- int32_t disp);
-
- // [index*scale + disp/r]
- Operand(Register index,
- ScaleFactor scale,
- int32_t disp);
-
- // Offset from existing memory operand.
- // Offset is added to existing displacement as 32-bit signed values and
- // this must not overflow.
- Operand(const Operand& base, int32_t offset);
-
- // Checks whether either base or index register is the given register.
- // Does not check the "reg" part of the Operand.
- bool AddressUsesRegister(Register reg) const;
-
- // Queries related to the size of the generated instruction.
- // Whether the generated instruction will have a REX prefix.
- bool requires_rex() const { return rex_ != 0; }
- // Size of the ModR/M, SIB and displacement parts of the generated
- // instruction.
- int operand_size() const { return len_; }
-
- private:
- byte rex_;
- byte buf_[6];
- // The number of bytes of buf_ in use.
- byte len_;
-
- // Set the ModR/M byte without an encoded 'reg' register. The
- // register is encoded later as part of the emit_operand operation.
- // set_modrm can be called before or after set_sib and set_disp*.
- inline void set_modrm(int mod, Register rm);
-
- // Set the SIB byte if one is needed. Sets the length to 2 rather than 1.
- inline void set_sib(ScaleFactor scale, Register index, Register base);
-
- // Adds operand displacement fields (offsets added to the memory address).
- // Needs to be called after set_sib, not before it.
- inline void set_disp8(int disp);
- inline void set_disp32(int disp);
-
- friend class Assembler;
-};
-
-
-// CpuFeatures keeps track of which features are supported by the target CPU.
-// Supported features must be enabled by a Scope before use.
-// Example:
-// if (CpuFeatures::IsSupported(SSE3)) {
-// CpuFeatures::Scope fscope(SSE3);
-// // Generate SSE3 floating point code.
-// } else {
-// // Generate standard x87 or SSE2 floating point code.
-// }
-class CpuFeatures : public AllStatic {
- public:
- // Detect features of the target CPU. Set safe defaults if the serializer
- // is enabled (snapshots must be portable).
- static void Probe();
-
- // Check whether a feature is supported by the target CPU.
- static bool IsSupported(CpuFeature f) {
- ASSERT(initialized_);
- if (f == SSE2 && !FLAG_enable_sse2) return false;
- if (f == SSE3 && !FLAG_enable_sse3) return false;
- if (f == SSE4_1 && !FLAG_enable_sse4_1) return false;
- if (f == CMOV && !FLAG_enable_cmov) return false;
- if (f == RDTSC && !FLAG_enable_rdtsc) return false;
- if (f == SAHF && !FLAG_enable_sahf) return false;
- return (supported_ & (V8_UINT64_C(1) << f)) != 0;
- }
-
-#ifdef DEBUG
- // Check whether a feature is currently enabled.
- static bool IsEnabled(CpuFeature f) {
- ASSERT(initialized_);
- Isolate* isolate = Isolate::UncheckedCurrent();
- if (isolate == NULL) {
- // When no isolate is available, work as if we're running in
- // release mode.
- return IsSupported(f);
- }
- uint64_t enabled = isolate->enabled_cpu_features();
- return (enabled & (V8_UINT64_C(1) << f)) != 0;
- }
-#endif
-
- // Enable a specified feature within a scope.
- class Scope BASE_EMBEDDED {
-#ifdef DEBUG
-
- public:
- explicit Scope(CpuFeature f) {
- uint64_t mask = V8_UINT64_C(1) << f;
- ASSERT(CpuFeatures::IsSupported(f));
- ASSERT(!Serializer::enabled() ||
- (CpuFeatures::found_by_runtime_probing_ & mask) == 0);
- isolate_ = Isolate::UncheckedCurrent();
- old_enabled_ = 0;
- if (isolate_ != NULL) {
- old_enabled_ = isolate_->enabled_cpu_features();
- isolate_->set_enabled_cpu_features(old_enabled_ | mask);
- }
- }
- ~Scope() {
- ASSERT_EQ(Isolate::UncheckedCurrent(), isolate_);
- if (isolate_ != NULL) {
- isolate_->set_enabled_cpu_features(old_enabled_);
- }
- }
-
- private:
- Isolate* isolate_;
- uint64_t old_enabled_;
-#else
-
- public:
- explicit Scope(CpuFeature f) {}
-#endif
- };
-
- private:
- // Safe defaults include SSE2 and CMOV for X64. It is always available, if
- // anyone checks, but they shouldn't need to check.
- // The required user mode extensions in X64 are (from AMD64 ABI Table A.1):
- // fpu, tsc, cx8, cmov, mmx, sse, sse2, fxsr, syscall
- static const uint64_t kDefaultCpuFeatures = (1 << SSE2 | 1 << CMOV);
-
-#ifdef DEBUG
- static bool initialized_;
-#endif
- static uint64_t supported_;
- static uint64_t found_by_runtime_probing_;
-
- friend class ExternalReference;
- DISALLOW_COPY_AND_ASSIGN(CpuFeatures);
-};
-
-
-class Assembler : public AssemblerBase {
- private:
- // We check before assembling an instruction that there is sufficient
- // space to write an instruction and its relocation information.
- // The relocation writer's position must be kGap bytes above the end of
- // the generated instructions. This leaves enough space for the
- // longest possible x64 instruction, 15 bytes, and the longest possible
- // relocation information encoding, RelocInfoWriter::kMaxLength == 16.
- // (There is a 15 byte limit on x64 instruction length that rules out some
- // otherwise valid instructions.)
- // This allows for a single, fast space check per instruction.
- static const int kGap = 32;
-
- public:
- // Create an assembler. Instructions and relocation information are emitted
- // into a buffer, with the instructions starting from the beginning and the
- // relocation information starting from the end of the buffer. See CodeDesc
- // for a detailed comment on the layout (globals.h).
- //
- // If the provided buffer is NULL, the assembler allocates and grows its own
- // buffer, and buffer_size determines the initial buffer size. The buffer is
- // owned by the assembler and deallocated upon destruction of the assembler.
- //
- // If the provided buffer is not NULL, the assembler uses the provided buffer
- // for code generation and assumes its size to be buffer_size. If the buffer
- // is too small, a fatal error occurs. No deallocation of the buffer is done
- // upon destruction of the assembler.
- Assembler(Isolate* isolate, void* buffer, int buffer_size);
- virtual ~Assembler() { }
-
- // GetCode emits any pending (non-emitted) code and fills the descriptor
- // desc. GetCode() is idempotent; it returns the same result if no other
- // Assembler functions are invoked in between GetCode() calls.
- void GetCode(CodeDesc* desc);
-
- // Read/Modify the code target in the relative branch/call instruction at pc.
- // On the x64 architecture, we use relative jumps with a 32-bit displacement
- // to jump to other Code objects in the Code space in the heap.
- // Jumps to C functions are done indirectly through a 64-bit register holding
- // the absolute address of the target.
- // These functions convert between absolute Addresses of Code objects and
- // the relative displacements stored in the code.
- static inline Address target_address_at(Address pc);
- static inline void set_target_address_at(Address pc, Address target);
-
- // Return the code target address at a call site from the return address
- // of that call in the instruction stream.
- static inline Address target_address_from_return_address(Address pc);
-
- // This sets the branch destination (which is in the instruction on x64).
- // This is for calls and branches within generated code.
- inline static void deserialization_set_special_target_at(
- Address instruction_payload, Address target) {
- set_target_address_at(instruction_payload, target);
- }
-
- // This sets the branch destination (which is a load instruction on x64).
- // This is for calls and branches to runtime code.
- inline static void set_external_target_at(Address instruction_payload,
- Address target) {
- *reinterpret_cast<Address*>(instruction_payload) = target;
- }
-
- inline Handle<Object> code_target_object_handle_at(Address pc);
- // Number of bytes taken up by the branch target in the code.
- static const int kSpecialTargetSize = 4; // Use 32-bit displacement.
- // Distance between the address of the code target in the call instruction
- // and the return address pushed on the stack.
- static const int kCallTargetAddressOffset = 4; // Use 32-bit displacement.
- // Distance between the start of the JS return sequence and where the
- // 32-bit displacement of a near call would be, relative to the pushed
- // return address. TODO: Use return sequence length instead.
- // Should equal Debug::kX64JSReturnSequenceLength - kCallTargetAddressOffset;
- static const int kPatchReturnSequenceAddressOffset = 13 - 4;
- // Distance between start of patched debug break slot and where the
- // 32-bit displacement of a near call would be, relative to the pushed
- // return address. TODO: Use return sequence length instead.
- // Should equal Debug::kX64JSReturnSequenceLength - kCallTargetAddressOffset;
- static const int kPatchDebugBreakSlotAddressOffset = 13 - 4;
- // TODO(X64): Rename this, removing the "Real", after changing the above.
- static const int kRealPatchReturnSequenceAddressOffset = 2;
-
- // Some x64 JS code is padded with int3 to make it large
- // enough to hold an instruction when the debugger patches it.
- static const int kJumpInstructionLength = 13;
- static const int kCallInstructionLength = 13;
- static const int kJSReturnSequenceLength = 13;
- static const int kShortCallInstructionLength = 5;
- static const int kPatchDebugBreakSlotReturnOffset = 4;
-
- // The debug break slot must be able to contain a call instruction.
- static const int kDebugBreakSlotLength = kCallInstructionLength;
-
- // One byte opcode for test eax,0xXXXXXXXX.
- static const byte kTestEaxByte = 0xA9;
- // One byte opcode for test al, 0xXX.
- static const byte kTestAlByte = 0xA8;
- // One byte opcode for nop.
- static const byte kNopByte = 0x90;
-
- // One byte prefix for a short conditional jump.
- static const byte kJccShortPrefix = 0x70;
- static const byte kJncShortOpcode = kJccShortPrefix | not_carry;
- static const byte kJcShortOpcode = kJccShortPrefix | carry;
- static const byte kJnzShortOpcode = kJccShortPrefix | not_zero;
- static const byte kJzShortOpcode = kJccShortPrefix | zero;
-
-
- // ---------------------------------------------------------------------------
- // Code generation
- //
- // Function names correspond one-to-one to x64 instruction mnemonics.
- // Unless specified otherwise, instructions operate on 64-bit operands.
- //
- // If we need versions of an assembly instruction that operate on different
- // width arguments, we add a single-letter suffix specifying the width.
- // This is done for the following instructions: mov, cmp, inc, dec,
- // add, sub, and test.
- // There are no versions of these instructions without the suffix.
- // - Instructions on 8-bit (byte) operands/registers have a trailing 'b'.
- // - Instructions on 16-bit (word) operands/registers have a trailing 'w'.
- // - Instructions on 32-bit (doubleword) operands/registers use 'l'.
- // - Instructions on 64-bit (quadword) operands/registers use 'q'.
- //
- // Some mnemonics, such as "and", are the same as C++ keywords.
- // Naming conflicts with C++ keywords are resolved by adding a trailing '_'.
-
- // Insert the smallest number of nop instructions
- // possible to align the pc offset to a multiple
- // of m, where m must be a power of 2.
- void Align(int m);
- void Nop(int bytes = 1);
- // Aligns code to something that's optimal for a jump target for the platform.
- void CodeTargetAlign();
-
- // Stack
- void pushfq();
- void popfq();
-
- void push(Immediate value);
- // Push a 32 bit integer, and guarantee that it is actually pushed as a
- // 32 bit value, the normal push will optimize the 8 bit case.
- void push_imm32(int32_t imm32);
- void push(Register src);
- void push(const Operand& src);
-
- void pop(Register dst);
- void pop(const Operand& dst);
-
- void enter(Immediate size);
- void leave();
-
- // Moves
- void movb(Register dst, const Operand& src);
- void movb(Register dst, Immediate imm);
- void movb(const Operand& dst, Register src);
-
- // Move the low 16 bits of a 64-bit register value to a 16-bit
- // memory location.
- void movw(const Operand& dst, Register src);
-
- void movl(Register dst, Register src);
- void movl(Register dst, const Operand& src);
- void movl(const Operand& dst, Register src);
- void movl(const Operand& dst, Immediate imm);
- // Load a 32-bit immediate value, zero-extended to 64 bits.
- void movl(Register dst, Immediate imm32);
-
- // Move 64 bit register value to 64-bit memory location.
- void movq(const Operand& dst, Register src);
- // Move 64 bit memory location to 64-bit register value.
- void movq(Register dst, const Operand& src);
- void movq(Register dst, Register src);
- // Sign extends immediate 32-bit value to 64 bits.
- void movq(Register dst, Immediate x);
- // Move the offset of the label location relative to the current
- // position (after the move) to the destination.
- void movl(const Operand& dst, Label* src);
-
- // Move sign extended immediate to memory location.
- void movq(const Operand& dst, Immediate value);
- // Instructions to load a 64-bit immediate into a register.
- // All 64-bit immediates must have a relocation mode.
- void movq(Register dst, void* ptr, RelocInfo::Mode rmode);
- void movq(Register dst, int64_t value, RelocInfo::Mode rmode);
- void movq(Register dst, const char* s, RelocInfo::Mode rmode);
- // Moves the address of the external reference into the register.
- void movq(Register dst, ExternalReference ext);
- void movq(Register dst, Handle<Object> handle, RelocInfo::Mode rmode);
-
- void movsxbq(Register dst, const Operand& src);
- void movsxwq(Register dst, const Operand& src);
- void movsxlq(Register dst, Register src);
- void movsxlq(Register dst, const Operand& src);
- void movzxbq(Register dst, const Operand& src);
- void movzxbl(Register dst, const Operand& src);
- void movzxwq(Register dst, const Operand& src);
- void movzxwl(Register dst, const Operand& src);
- void movzxwl(Register dst, Register src);
-
- // Repeated moves.
-
- void repmovsb();
- void repmovsw();
- void repmovsl();
- void repmovsq();
-
- // Instruction to load from an immediate 64-bit pointer into RAX.
- void load_rax(void* ptr, RelocInfo::Mode rmode);
- void load_rax(ExternalReference ext);
-
- // Conditional moves.
- void cmovq(Condition cc, Register dst, Register src);
- void cmovq(Condition cc, Register dst, const Operand& src);
- void cmovl(Condition cc, Register dst, Register src);
- void cmovl(Condition cc, Register dst, const Operand& src);
-
- // Exchange two registers
- void xchg(Register dst, Register src);
-
- // Arithmetics
- void addl(Register dst, Register src) {
- arithmetic_op_32(0x03, dst, src);
- }
-
- void addl(Register dst, Immediate src) {
- immediate_arithmetic_op_32(0x0, dst, src);
- }
-
- void addl(Register dst, const Operand& src) {
- arithmetic_op_32(0x03, dst, src);
- }
-
- void addl(const Operand& dst, Immediate src) {
- immediate_arithmetic_op_32(0x0, dst, src);
- }
-
- void addl(const Operand& dst, Register src) {
- arithmetic_op_32(0x01, src, dst);
- }
-
- void addq(Register dst, Register src) {
- arithmetic_op(0x03, dst, src);
- }
-
- void addq(Register dst, const Operand& src) {
- arithmetic_op(0x03, dst, src);
- }
-
- void addq(const Operand& dst, Register src) {
- arithmetic_op(0x01, src, dst);
- }
-
- void addq(Register dst, Immediate src) {
- immediate_arithmetic_op(0x0, dst, src);
- }
-
- void addq(const Operand& dst, Immediate src) {
- immediate_arithmetic_op(0x0, dst, src);
- }
-
- void sbbl(Register dst, Register src) {
- arithmetic_op_32(0x1b, dst, src);
- }
-
- void sbbq(Register dst, Register src) {
- arithmetic_op(0x1b, dst, src);
- }
-
- void cmpb(Register dst, Immediate src) {
- immediate_arithmetic_op_8(0x7, dst, src);
- }
-
- void cmpb_al(Immediate src);
-
- void cmpb(Register dst, Register src) {
- arithmetic_op(0x3A, dst, src);
- }
-
- void cmpb(Register dst, const Operand& src) {
- arithmetic_op(0x3A, dst, src);
- }
-
- void cmpb(const Operand& dst, Register src) {
- arithmetic_op(0x38, src, dst);
- }
-
- void cmpb(const Operand& dst, Immediate src) {
- immediate_arithmetic_op_8(0x7, dst, src);
- }
-
- void cmpw(const Operand& dst, Immediate src) {
- immediate_arithmetic_op_16(0x7, dst, src);
- }
-
- void cmpw(Register dst, Immediate src) {
- immediate_arithmetic_op_16(0x7, dst, src);
- }
-
- void cmpw(Register dst, const Operand& src) {
- arithmetic_op_16(0x3B, dst, src);
- }
-
- void cmpw(Register dst, Register src) {
- arithmetic_op_16(0x3B, dst, src);
- }
-
- void cmpw(const Operand& dst, Register src) {
- arithmetic_op_16(0x39, src, dst);
- }
-
- void cmpl(Register dst, Register src) {
- arithmetic_op_32(0x3B, dst, src);
- }
-
- void cmpl(Register dst, const Operand& src) {
- arithmetic_op_32(0x3B, dst, src);
- }
-
- void cmpl(const Operand& dst, Register src) {
- arithmetic_op_32(0x39, src, dst);
- }
-
- void cmpl(Register dst, Immediate src) {
- immediate_arithmetic_op_32(0x7, dst, src);
- }
-
- void cmpl(const Operand& dst, Immediate src) {
- immediate_arithmetic_op_32(0x7, dst, src);
- }
-
- void cmpq(Register dst, Register src) {
- arithmetic_op(0x3B, dst, src);
- }
-
- void cmpq(Register dst, const Operand& src) {
- arithmetic_op(0x3B, dst, src);
- }
-
- void cmpq(const Operand& dst, Register src) {
- arithmetic_op(0x39, src, dst);
- }
-
- void cmpq(Register dst, Immediate src) {
- immediate_arithmetic_op(0x7, dst, src);
- }
-
- void cmpq(const Operand& dst, Immediate src) {
- immediate_arithmetic_op(0x7, dst, src);
- }
-
- void and_(Register dst, Register src) {
- arithmetic_op(0x23, dst, src);
- }
-
- void and_(Register dst, const Operand& src) {
- arithmetic_op(0x23, dst, src);
- }
-
- void and_(const Operand& dst, Register src) {
- arithmetic_op(0x21, src, dst);
- }
-
- void and_(Register dst, Immediate src) {
- immediate_arithmetic_op(0x4, dst, src);
- }
-
- void and_(const Operand& dst, Immediate src) {
- immediate_arithmetic_op(0x4, dst, src);
- }
-
- void andl(Register dst, Immediate src) {
- immediate_arithmetic_op_32(0x4, dst, src);
- }
-
- void andl(Register dst, Register src) {
- arithmetic_op_32(0x23, dst, src);
- }
-
- void andl(Register dst, const Operand& src) {
- arithmetic_op_32(0x23, dst, src);
- }
-
- void andb(Register dst, Immediate src) {
- immediate_arithmetic_op_8(0x4, dst, src);
- }
-
- void decq(Register dst);
- void decq(const Operand& dst);
- void decl(Register dst);
- void decl(const Operand& dst);
- void decb(Register dst);
- void decb(const Operand& dst);
-
- // Sign-extends rax into rdx:rax.
- void cqo();
- // Sign-extends eax into edx:eax.
- void cdq();
-
- // Divide rdx:rax by src. Quotient in rax, remainder in rdx.
- void idivq(Register src);
- // Divide edx:eax by lower 32 bits of src. Quotient in eax, rem. in edx.
- void idivl(Register src);
-
- // Signed multiply instructions.
- void imul(Register src); // rdx:rax = rax * src.
- void imul(Register dst, Register src); // dst = dst * src.
- void imul(Register dst, const Operand& src); // dst = dst * src.
- void imul(Register dst, Register src, Immediate imm); // dst = src * imm.
- // Signed 32-bit multiply instructions.
- void imull(Register dst, Register src); // dst = dst * src.
- void imull(Register dst, const Operand& src); // dst = dst * src.
- void imull(Register dst, Register src, Immediate imm); // dst = src * imm.
-
- void incq(Register dst);
- void incq(const Operand& dst);
- void incl(Register dst);
- void incl(const Operand& dst);
-
- void lea(Register dst, const Operand& src);
- void leal(Register dst, const Operand& src);
-
- // Multiply rax by src, put the result in rdx:rax.
- void mul(Register src);
-
- void neg(Register dst);
- void neg(const Operand& dst);
- void negl(Register dst);
-
- void not_(Register dst);
- void not_(const Operand& dst);
- void notl(Register dst);
-
- void or_(Register dst, Register src) {
- arithmetic_op(0x0B, dst, src);
- }
-
- void orl(Register dst, Register src) {
- arithmetic_op_32(0x0B, dst, src);
- }
-
- void or_(Register dst, const Operand& src) {
- arithmetic_op(0x0B, dst, src);
- }
-
- void orl(Register dst, const Operand& src) {
- arithmetic_op_32(0x0B, dst, src);
- }
-
- void or_(const Operand& dst, Register src) {
- arithmetic_op(0x09, src, dst);
- }
-
- void or_(Register dst, Immediate src) {
- immediate_arithmetic_op(0x1, dst, src);
- }
-
- void orl(Register dst, Immediate src) {
- immediate_arithmetic_op_32(0x1, dst, src);
- }
-
- void or_(const Operand& dst, Immediate src) {
- immediate_arithmetic_op(0x1, dst, src);
- }
-
- void orl(const Operand& dst, Immediate src) {
- immediate_arithmetic_op_32(0x1, dst, src);
- }
-
-
- void rcl(Register dst, Immediate imm8) {
- shift(dst, imm8, 0x2);
- }
-
- void rol(Register dst, Immediate imm8) {
- shift(dst, imm8, 0x0);
- }
-
- void rcr(Register dst, Immediate imm8) {
- shift(dst, imm8, 0x3);
- }
-
- void ror(Register dst, Immediate imm8) {
- shift(dst, imm8, 0x1);
- }
-
- void rorl(Register dst, Immediate imm8) {
- shift_32(dst, imm8, 0x1);
- }
-
- void rorl_cl(Register dst) {
- shift_32(dst, 0x1);
- }
-
- // Shifts dst:src left by cl bits, affecting only dst.
- void shld(Register dst, Register src);
-
- // Shifts src:dst right by cl bits, affecting only dst.
- void shrd(Register dst, Register src);
-
- // Shifts dst right, duplicating sign bit, by shift_amount bits.
- // Shifting by 1 is handled efficiently.
- void sar(Register dst, Immediate shift_amount) {
- shift(dst, shift_amount, 0x7);
- }
-
- // Shifts dst right, duplicating sign bit, by shift_amount bits.
- // Shifting by 1 is handled efficiently.
- void sarl(Register dst, Immediate shift_amount) {
- shift_32(dst, shift_amount, 0x7);
- }
-
- // Shifts dst right, duplicating sign bit, by cl % 64 bits.
- void sar_cl(Register dst) {
- shift(dst, 0x7);
- }
-
- // Shifts dst right, duplicating sign bit, by cl % 64 bits.
- void sarl_cl(Register dst) {
- shift_32(dst, 0x7);
- }
-
- void shl(Register dst, Immediate shift_amount) {
- shift(dst, shift_amount, 0x4);
- }
-
- void shl_cl(Register dst) {
- shift(dst, 0x4);
- }
-
- void shll_cl(Register dst) {
- shift_32(dst, 0x4);
- }
-
- void shll(Register dst, Immediate shift_amount) {
- shift_32(dst, shift_amount, 0x4);
- }
-
- void shr(Register dst, Immediate shift_amount) {
- shift(dst, shift_amount, 0x5);
- }
-
- void shr_cl(Register dst) {
- shift(dst, 0x5);
- }
-
- void shrl_cl(Register dst) {
- shift_32(dst, 0x5);
- }
-
- void shrl(Register dst, Immediate shift_amount) {
- shift_32(dst, shift_amount, 0x5);
- }
-
- void store_rax(void* dst, RelocInfo::Mode mode);
- void store_rax(ExternalReference ref);
-
- void subq(Register dst, Register src) {
- arithmetic_op(0x2B, dst, src);
- }
-
- void subq(Register dst, const Operand& src) {
- arithmetic_op(0x2B, dst, src);
- }
-
- void subq(const Operand& dst, Register src) {
- arithmetic_op(0x29, src, dst);
- }
-
- void subq(Register dst, Immediate src) {
- immediate_arithmetic_op(0x5, dst, src);
- }
-
- void subq(const Operand& dst, Immediate src) {
- immediate_arithmetic_op(0x5, dst, src);
- }
-
- void subl(Register dst, Register src) {
- arithmetic_op_32(0x2B, dst, src);
- }
-
- void subl(Register dst, const Operand& src) {
- arithmetic_op_32(0x2B, dst, src);
- }
-
- void subl(const Operand& dst, Immediate src) {
- immediate_arithmetic_op_32(0x5, dst, src);
- }
-
- void subl(Register dst, Immediate src) {
- immediate_arithmetic_op_32(0x5, dst, src);
- }
-
- void subb(Register dst, Immediate src) {
- immediate_arithmetic_op_8(0x5, dst, src);
- }
-
- void testb(Register dst, Register src);
- void testb(Register reg, Immediate mask);
- void testb(const Operand& op, Immediate mask);
- void testb(const Operand& op, Register reg);
- void testl(Register dst, Register src);
- void testl(Register reg, Immediate mask);
- void testl(const Operand& op, Immediate mask);
- void testq(const Operand& op, Register reg);
- void testq(Register dst, Register src);
- void testq(Register dst, Immediate mask);
-
- void xor_(Register dst, Register src) {
- if (dst.code() == src.code()) {
- arithmetic_op_32(0x33, dst, src);
- } else {
- arithmetic_op(0x33, dst, src);
- }
- }
-
- void xorl(Register dst, Register src) {
- arithmetic_op_32(0x33, dst, src);
- }
-
- void xorl(Register dst, const Operand& src) {
- arithmetic_op_32(0x33, dst, src);
- }
-
- void xorl(Register dst, Immediate src) {
- immediate_arithmetic_op_32(0x6, dst, src);
- }
-
- void xorl(const Operand& dst, Immediate src) {
- immediate_arithmetic_op_32(0x6, dst, src);
- }
-
- void xor_(Register dst, const Operand& src) {
- arithmetic_op(0x33, dst, src);
- }
-
- void xor_(const Operand& dst, Register src) {
- arithmetic_op(0x31, src, dst);
- }
-
- void xor_(Register dst, Immediate src) {
- immediate_arithmetic_op(0x6, dst, src);
- }
-
- void xor_(const Operand& dst, Immediate src) {
- immediate_arithmetic_op(0x6, dst, src);
- }
-
- // Bit operations.
- void bt(const Operand& dst, Register src);
- void bts(const Operand& dst, Register src);
-
- // Miscellaneous
- void clc();
- void cld();
- void cpuid();
- void hlt();
- void int3();
- void nop();
- void rdtsc();
- void ret(int imm16);
- void setcc(Condition cc, Register reg);
-
- // Label operations & relative jumps (PPUM Appendix D)
- //
- // Takes a branch opcode (cc) and a label (L) and generates
- // either a backward branch or a forward branch and links it
- // to the label fixup chain. Usage:
- //
- // Label L; // unbound label
- // j(cc, &L); // forward branch to unbound label
- // bind(&L); // bind label to the current pc
- // j(cc, &L); // backward branch to bound label
- // bind(&L); // illegal: a label may be bound only once
- //
- // Note: The same Label can be used for forward and backward branches
- // but it may be bound only once.
-
- void bind(Label* L); // binds an unbound label L to the current code position
-
- // Calls
- // Call near relative 32-bit displacement, relative to next instruction.
- void call(Label* L);
- void call(Handle<Code> target,
- RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
- TypeFeedbackId ast_id = TypeFeedbackId::None());
-
- // Calls directly to the given address using a relative offset.
- // Should only ever be used in Code objects for calls within the
- // same Code object. Should not be used when generating new code (use labels),
- // but only when patching existing code.
- void call(Address target);
-
- // Call near absolute indirect, address in register
- void call(Register adr);
-
- // Call near indirect
- void call(const Operand& operand);
-
- // Jumps
- // Jump short or near relative.
- // Use a 32-bit signed displacement.
- // Unconditional jump to L
- void jmp(Label* L, Label::Distance distance = Label::kFar);
- void jmp(Handle<Code> target, RelocInfo::Mode rmode);
-
- // Jump near absolute indirect (r64)
- void jmp(Register adr);
-
- // Jump near absolute indirect (m64)
- void jmp(const Operand& src);
-
- // Conditional jumps
- void j(Condition cc,
- Label* L,
- Label::Distance distance = Label::kFar);
- void j(Condition cc, Handle<Code> target, RelocInfo::Mode rmode);
-
- // Floating-point operations
- void fld(int i);
-
- void fld1();
- void fldz();
- void fldpi();
- void fldln2();
-
- void fld_s(const Operand& adr);
- void fld_d(const Operand& adr);
-
- void fstp_s(const Operand& adr);
- void fstp_d(const Operand& adr);
- void fstp(int index);
-
- void fild_s(const Operand& adr);
- void fild_d(const Operand& adr);
-
- void fist_s(const Operand& adr);
-
- void fistp_s(const Operand& adr);
- void fistp_d(const Operand& adr);
-
- void fisttp_s(const Operand& adr);
- void fisttp_d(const Operand& adr);
-
- void fabs();
- void fchs();
-
- void fadd(int i);
- void fsub(int i);
- void fmul(int i);
- void fdiv(int i);
-
- void fisub_s(const Operand& adr);
-
- void faddp(int i = 1);
- void fsubp(int i = 1);
- void fsubrp(int i = 1);
- void fmulp(int i = 1);
- void fdivp(int i = 1);
- void fprem();
- void fprem1();
-
- void fxch(int i = 1);
- void fincstp();
- void ffree(int i = 0);
-
- void ftst();
- void fucomp(int i);
- void fucompp();
- void fucomi(int i);
- void fucomip();
-
- void fcompp();
- void fnstsw_ax();
- void fwait();
- void fnclex();
-
- void fsin();
- void fcos();
- void fptan();
- void fyl2x();
- void f2xm1();
- void fscale();
- void fninit();
-
- void frndint();
-
- void sahf();
-
- // SSE2 instructions
- void movd(XMMRegister dst, Register src);
- void movd(Register dst, XMMRegister src);
- void movq(XMMRegister dst, Register src);
- void movq(Register dst, XMMRegister src);
- void movq(XMMRegister dst, XMMRegister src);
- void extractps(Register dst, XMMRegister src, byte imm8);
-
- // Don't use this unless it's important to keep the
- // top half of the destination register unchanged.
- // Used movaps when moving double values and movq for integer
- // values in xmm registers.
- void movsd(XMMRegister dst, XMMRegister src);
-
- void movsd(const Operand& dst, XMMRegister src);
- void movsd(XMMRegister dst, const Operand& src);
-
- void movdqa(const Operand& dst, XMMRegister src);
- void movdqa(XMMRegister dst, const Operand& src);
-
- void movapd(XMMRegister dst, XMMRegister src);
- void movaps(XMMRegister dst, XMMRegister src);
-
- void movss(XMMRegister dst, const Operand& src);
- void movss(const Operand& dst, XMMRegister src);
-
- void cvttss2si(Register dst, const Operand& src);
- void cvttss2si(Register dst, XMMRegister src);
- void cvttsd2si(Register dst, const Operand& src);
- void cvttsd2si(Register dst, XMMRegister src);
- void cvttsd2siq(Register dst, XMMRegister src);
-
- void cvtlsi2sd(XMMRegister dst, const Operand& src);
- void cvtlsi2sd(XMMRegister dst, Register src);
- void cvtqsi2sd(XMMRegister dst, const Operand& src);
- void cvtqsi2sd(XMMRegister dst, Register src);
-
- void cvtlsi2ss(XMMRegister dst, Register src);
-
- void cvtss2sd(XMMRegister dst, XMMRegister src);
- void cvtss2sd(XMMRegister dst, const Operand& src);
- void cvtsd2ss(XMMRegister dst, XMMRegister src);
-
- void cvtsd2si(Register dst, XMMRegister src);
- void cvtsd2siq(Register dst, XMMRegister src);
-
- void addsd(XMMRegister dst, XMMRegister src);
- void addsd(XMMRegister dst, const Operand& src);
- void subsd(XMMRegister dst, XMMRegister src);
- void mulsd(XMMRegister dst, XMMRegister src);
- void mulsd(XMMRegister dst, const Operand& src);
- void divsd(XMMRegister dst, XMMRegister src);
-
- void andpd(XMMRegister dst, XMMRegister src);
- void orpd(XMMRegister dst, XMMRegister src);
- void xorpd(XMMRegister dst, XMMRegister src);
- void xorps(XMMRegister dst, XMMRegister src);
- void sqrtsd(XMMRegister dst, XMMRegister src);
-
- void ucomisd(XMMRegister dst, XMMRegister src);
- void ucomisd(XMMRegister dst, const Operand& src);
-
- enum RoundingMode {
- kRoundToNearest = 0x0,
- kRoundDown = 0x1,
- kRoundUp = 0x2,
- kRoundToZero = 0x3
- };
-
- void roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode);
-
- void movmskpd(Register dst, XMMRegister src);
- void movmskps(Register dst, XMMRegister src);
-
- // The first argument is the reg field, the second argument is the r/m field.
- void emit_sse_operand(XMMRegister dst, XMMRegister src);
- void emit_sse_operand(XMMRegister reg, const Operand& adr);
- void emit_sse_operand(XMMRegister dst, Register src);
- void emit_sse_operand(Register dst, XMMRegister src);
-
- // Debugging
- void Print();
-
- // Check the code size generated from label to here.
- int SizeOfCodeGeneratedSince(Label* label) {
- return pc_offset() - label->pos();
- }
-
- // Mark address of the ExitJSFrame code.
- void RecordJSReturn();
-
- // Mark address of a debug break slot.
- void RecordDebugBreakSlot();
-
- // Record a comment relocation entry that can be used by a disassembler.
- // Use --code-comments to enable.
- void RecordComment(const char* msg, bool force = false);
-
- // Writes a single word of data in the code stream.
- // Used for inline tables, e.g., jump-tables.
- void db(uint8_t data);
- void dd(uint32_t data);
-
- PositionsRecorder* positions_recorder() { return &positions_recorder_; }
-
- // Check if there is less than kGap bytes available in the buffer.
- // If this is the case, we need to grow the buffer before emitting
- // an instruction or relocation information.
- inline bool buffer_overflow() const {
- return pc_ >= reloc_info_writer.pos() - kGap;
- }
-
- // Get the number of bytes available in the buffer.
- inline int available_space() const {
- return static_cast<int>(reloc_info_writer.pos() - pc_);
- }
-
- static bool IsNop(Address addr);
-
- // Avoid overflows for displacements etc.
- static const int kMaximalBufferSize = 512*MB;
-
- byte byte_at(int pos) { return buffer_[pos]; }
- void set_byte_at(int pos, byte value) { buffer_[pos] = value; }
-
- private:
- byte* addr_at(int pos) { return buffer_ + pos; }
- uint32_t long_at(int pos) {
- return *reinterpret_cast<uint32_t*>(addr_at(pos));
- }
- void long_at_put(int pos, uint32_t x) {
- *reinterpret_cast<uint32_t*>(addr_at(pos)) = x;
- }
-
- // code emission
- void GrowBuffer();
-
- void emit(byte x) { *pc_++ = x; }
- inline void emitl(uint32_t x);
- inline void emitq(uint64_t x, RelocInfo::Mode rmode);
- inline void emitw(uint16_t x);
- inline void emit_code_target(Handle<Code> target,
- RelocInfo::Mode rmode,
- TypeFeedbackId ast_id = TypeFeedbackId::None());
- void emit(Immediate x) { emitl(x.value_); }
-
- // Emits a REX prefix that encodes a 64-bit operand size and
- // the top bit of both register codes.
- // High bit of reg goes to REX.R, high bit of rm_reg goes to REX.B.
- // REX.W is set.
- inline void emit_rex_64(XMMRegister reg, Register rm_reg);
- inline void emit_rex_64(Register reg, XMMRegister rm_reg);
- inline void emit_rex_64(Register reg, Register rm_reg);
-
- // Emits a REX prefix that encodes a 64-bit operand size and
- // the top bit of the destination, index, and base register codes.
- // The high bit of reg is used for REX.R, the high bit of op's base
- // register is used for REX.B, and the high bit of op's index register
- // is used for REX.X. REX.W is set.
- inline void emit_rex_64(Register reg, const Operand& op);
- inline void emit_rex_64(XMMRegister reg, const Operand& op);
-
- // Emits a REX prefix that encodes a 64-bit operand size and
- // the top bit of the register code.
- // The high bit of register is used for REX.B.
- // REX.W is set and REX.R and REX.X are clear.
- inline void emit_rex_64(Register rm_reg);
-
- // Emits a REX prefix that encodes a 64-bit operand size and
- // the top bit of the index and base register codes.
- // The high bit of op's base register is used for REX.B, and the high
- // bit of op's index register is used for REX.X.
- // REX.W is set and REX.R clear.
- inline void emit_rex_64(const Operand& op);
-
- // Emit a REX prefix that only sets REX.W to choose a 64-bit operand size.
- void emit_rex_64() { emit(0x48); }
-
- // High bit of reg goes to REX.R, high bit of rm_reg goes to REX.B.
- // REX.W is clear.
- inline void emit_rex_32(Register reg, Register rm_reg);
-
- // The high bit of reg is used for REX.R, the high bit of op's base
- // register is used for REX.B, and the high bit of op's index register
- // is used for REX.X. REX.W is cleared.
- inline void emit_rex_32(Register reg, const Operand& op);
-
- // High bit of rm_reg goes to REX.B.
- // REX.W, REX.R and REX.X are clear.
- inline void emit_rex_32(Register rm_reg);
-
- // High bit of base goes to REX.B and high bit of index to REX.X.
- // REX.W and REX.R are clear.
- inline void emit_rex_32(const Operand& op);
-
- // High bit of reg goes to REX.R, high bit of rm_reg goes to REX.B.
- // REX.W is cleared. If no REX bits are set, no byte is emitted.
- inline void emit_optional_rex_32(Register reg, Register rm_reg);
-
- // The high bit of reg is used for REX.R, the high bit of op's base
- // register is used for REX.B, and the high bit of op's index register
- // is used for REX.X. REX.W is cleared. If no REX bits are set, nothing
- // is emitted.
- inline void emit_optional_rex_32(Register reg, const Operand& op);
-
- // As for emit_optional_rex_32(Register, Register), except that
- // the registers are XMM registers.
- inline void emit_optional_rex_32(XMMRegister reg, XMMRegister base);
-
- // As for emit_optional_rex_32(Register, Register), except that
- // one of the registers is an XMM registers.
- inline void emit_optional_rex_32(XMMRegister reg, Register base);
-
- // As for emit_optional_rex_32(Register, Register), except that
- // one of the registers is an XMM registers.
- inline void emit_optional_rex_32(Register reg, XMMRegister base);
-
- // As for emit_optional_rex_32(Register, const Operand&), except that
- // the register is an XMM register.
- inline void emit_optional_rex_32(XMMRegister reg, const Operand& op);
-
- // Optionally do as emit_rex_32(Register) if the register number has
- // the high bit set.
- inline void emit_optional_rex_32(Register rm_reg);
-
- // Optionally do as emit_rex_32(const Operand&) if the operand register
- // numbers have a high bit set.
- inline void emit_optional_rex_32(const Operand& op);
-
-
- // Emit the ModR/M byte, and optionally the SIB byte and
- // 1- or 4-byte offset for a memory operand. Also encodes
- // the second operand of the operation, a register or operation
- // subcode, into the reg field of the ModR/M byte.
- void emit_operand(Register reg, const Operand& adr) {
- emit_operand(reg.low_bits(), adr);
- }
-
- // Emit the ModR/M byte, and optionally the SIB byte and
- // 1- or 4-byte offset for a memory operand. Also used to encode
- // a three-bit opcode extension into the ModR/M byte.
- void emit_operand(int rm, const Operand& adr);
-
- // Emit a ModR/M byte with registers coded in the reg and rm_reg fields.
- void emit_modrm(Register reg, Register rm_reg) {
- emit(0xC0 | reg.low_bits() << 3 | rm_reg.low_bits());
- }
-
- // Emit a ModR/M byte with an operation subcode in the reg field and
- // a register in the rm_reg field.
- void emit_modrm(int code, Register rm_reg) {
- ASSERT(is_uint3(code));
- emit(0xC0 | code << 3 | rm_reg.low_bits());
- }
-
- // Emit the code-object-relative offset of the label's position
- inline void emit_code_relative_offset(Label* label);
-
- // Emit machine code for one of the operations ADD, ADC, SUB, SBC,
- // AND, OR, XOR, or CMP. The encodings of these operations are all
- // similar, differing just in the opcode or in the reg field of the
- // ModR/M byte.
- void arithmetic_op_16(byte opcode, Register reg, Register rm_reg);
- void arithmetic_op_16(byte opcode, Register reg, const Operand& rm_reg);
- void arithmetic_op_32(byte opcode, Register reg, Register rm_reg);
- void arithmetic_op_32(byte opcode, Register reg, const Operand& rm_reg);
- void arithmetic_op(byte opcode, Register reg, Register rm_reg);
- void arithmetic_op(byte opcode, Register reg, const Operand& rm_reg);
- void immediate_arithmetic_op(byte subcode, Register dst, Immediate src);
- void immediate_arithmetic_op(byte subcode, const Operand& dst, Immediate src);
- // Operate on a byte in memory or register.
- void immediate_arithmetic_op_8(byte subcode,
- Register dst,
- Immediate src);
- void immediate_arithmetic_op_8(byte subcode,
- const Operand& dst,
- Immediate src);
- // Operate on a word in memory or register.
- void immediate_arithmetic_op_16(byte subcode,
- Register dst,
- Immediate src);
- void immediate_arithmetic_op_16(byte subcode,
- const Operand& dst,
- Immediate src);
- // Operate on a 32-bit word in memory or register.
- void immediate_arithmetic_op_32(byte subcode,
- Register dst,
- Immediate src);
- void immediate_arithmetic_op_32(byte subcode,
- const Operand& dst,
- Immediate src);
-
- // Emit machine code for a shift operation.
- void shift(Register dst, Immediate shift_amount, int subcode);
- void shift_32(Register dst, Immediate shift_amount, int subcode);
- // Shift dst by cl % 64 bits.
- void shift(Register dst, int subcode);
- void shift_32(Register dst, int subcode);
-
- void emit_farith(int b1, int b2, int i);
-
- // labels
- // void print(Label* L);
- void bind_to(Label* L, int pos);
-
- // record reloc info for current pc_
- void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
-
- friend class CodePatcher;
- friend class EnsureSpace;
- friend class RegExpMacroAssemblerX64;
-
- // code generation
- RelocInfoWriter reloc_info_writer;
-
- List< Handle<Code> > code_targets_;
-
- PositionsRecorder positions_recorder_;
- friend class PositionsRecorder;
-};
-
-
-// Helper class that ensures that there is enough space for generating
-// instructions and relocation information. The constructor makes
-// sure that there is enough space and (in debug mode) the destructor
-// checks that we did not generate too much.
-class EnsureSpace BASE_EMBEDDED {
- public:
- explicit EnsureSpace(Assembler* assembler) : assembler_(assembler) {
- if (assembler_->buffer_overflow()) assembler_->GrowBuffer();
-#ifdef DEBUG
- space_before_ = assembler_->available_space();
-#endif
- }
-
-#ifdef DEBUG
- ~EnsureSpace() {
- int bytes_generated = space_before_ - assembler_->available_space();
- ASSERT(bytes_generated < assembler_->kGap);
- }
-#endif
-
- private:
- Assembler* assembler_;
-#ifdef DEBUG
- int space_before_;
-#endif
-};
-
-} } // namespace v8::internal
-
-#endif // V8_X64_ASSEMBLER_X64_H_
diff --git a/src/3rdparty/v8/src/x64/builtins-x64.cc b/src/3rdparty/v8/src/x64/builtins-x64.cc
deleted file mode 100644
index 144962b..0000000
--- a/src/3rdparty/v8/src/x64/builtins-x64.cc
+++ /dev/null
@@ -1,1884 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_X64)
-
-#include "codegen.h"
-#include "deoptimizer.h"
-#include "full-codegen.h"
-
-namespace v8 {
-namespace internal {
-
-
-#define __ ACCESS_MASM(masm)
-
-
-void Builtins::Generate_Adaptor(MacroAssembler* masm,
- CFunctionId id,
- BuiltinExtraArguments extra_args) {
- // ----------- S t a t e -------------
- // -- rax : number of arguments excluding receiver
- // -- rdi : called function (only guaranteed when
- // extra_args requires it)
- // -- rsi : context
- // -- rsp[0] : return address
- // -- rsp[8] : last argument
- // -- ...
- // -- rsp[8 * argc] : first argument (argc == rax)
- // -- rsp[8 * (argc +1)] : receiver
- // -----------------------------------
-
- // Insert extra arguments.
- int num_extra_args = 0;
- if (extra_args == NEEDS_CALLED_FUNCTION) {
- num_extra_args = 1;
- __ pop(kScratchRegister); // Save return address.
- __ push(rdi);
- __ push(kScratchRegister); // Restore return address.
- } else {
- ASSERT(extra_args == NO_EXTRA_ARGUMENTS);
- }
-
- // JumpToExternalReference expects rax to contain the number of arguments
- // including the receiver and the extra arguments.
- __ addq(rax, Immediate(num_extra_args + 1));
- __ JumpToExternalReference(ExternalReference(id, masm->isolate()), 1);
-}
-
-
-static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
- __ movq(kScratchRegister,
- FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
- __ movq(kScratchRegister,
- FieldOperand(kScratchRegister, SharedFunctionInfo::kCodeOffset));
- __ lea(kScratchRegister, FieldOperand(kScratchRegister, Code::kHeaderSize));
- __ jmp(kScratchRegister);
-}
-
-
-void Builtins::Generate_InRecompileQueue(MacroAssembler* masm) {
- GenerateTailCallToSharedCode(masm);
-}
-
-
-void Builtins::Generate_ParallelRecompile(MacroAssembler* masm) {
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Push a copy of the function onto the stack.
- __ push(rdi);
- // Push call kind information.
- __ push(rcx);
-
- __ push(rdi); // Function is also the parameter to the runtime call.
- __ CallRuntime(Runtime::kParallelRecompile, 1);
-
- // Restore call kind information.
- __ pop(rcx);
- // Restore receiver.
- __ pop(rdi);
-
- // Tear down internal frame.
- }
-
- GenerateTailCallToSharedCode(masm);
-}
-
-
-static void Generate_JSConstructStubHelper(MacroAssembler* masm,
- bool is_api_function,
- bool count_constructions) {
- // ----------- S t a t e -------------
- // -- rax: number of arguments
- // -- rdi: constructor function
- // -----------------------------------
-
- // Should never count constructions for api objects.
- ASSERT(!is_api_function || !count_constructions);
-
- // Enter a construct frame.
- {
- FrameScope scope(masm, StackFrame::CONSTRUCT);
-
- // Store a smi-tagged arguments count on the stack.
- __ Integer32ToSmi(rax, rax);
- __ push(rax);
-
- // Push the function to invoke on the stack.
- __ push(rdi);
-
- // Try to allocate the object without transitioning into C code. If any of
- // the preconditions is not met, the code bails out to the runtime call.
- Label rt_call, allocated;
- if (FLAG_inline_new) {
- Label undo_allocation;
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- ExternalReference debug_step_in_fp =
- ExternalReference::debug_step_in_fp_address(masm->isolate());
- __ movq(kScratchRegister, debug_step_in_fp);
- __ cmpq(Operand(kScratchRegister, 0), Immediate(0));
- __ j(not_equal, &rt_call);
-#endif
-
- // Verified that the constructor is a JSFunction.
- // Load the initial map and verify that it is in fact a map.
- // rdi: constructor
- __ movq(rax, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
- // Will both indicate a NULL and a Smi
- ASSERT(kSmiTag == 0);
- __ JumpIfSmi(rax, &rt_call);
- // rdi: constructor
- // rax: initial map (if proven valid below)
- __ CmpObjectType(rax, MAP_TYPE, rbx);
- __ j(not_equal, &rt_call);
-
- // Check that the constructor is not constructing a JSFunction (see
- // comments in Runtime_NewObject in runtime.cc). In which case the
- // initial map's instance type would be JS_FUNCTION_TYPE.
- // rdi: constructor
- // rax: initial map
- __ CmpInstanceType(rax, JS_FUNCTION_TYPE);
- __ j(equal, &rt_call);
-
- if (count_constructions) {
- Label allocate;
- // Decrease generous allocation count.
- __ movq(rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
- __ decb(FieldOperand(rcx,
- SharedFunctionInfo::kConstructionCountOffset));
- __ j(not_zero, &allocate);
-
- __ push(rax);
- __ push(rdi);
-
- __ push(rdi); // constructor
- // The call will replace the stub, so the countdown is only done once.
- __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
-
- __ pop(rdi);
- __ pop(rax);
-
- __ bind(&allocate);
- }
-
- // Now allocate the JSObject on the heap.
- __ movzxbq(rdi, FieldOperand(rax, Map::kInstanceSizeOffset));
- __ shl(rdi, Immediate(kPointerSizeLog2));
- // rdi: size of new object
- __ AllocateInNewSpace(rdi,
- rbx,
- rdi,
- no_reg,
- &rt_call,
- NO_ALLOCATION_FLAGS);
- // Allocated the JSObject, now initialize the fields.
- // rax: initial map
- // rbx: JSObject (not HeapObject tagged - the actual address).
- // rdi: start of next object
- __ movq(Operand(rbx, JSObject::kMapOffset), rax);
- __ LoadRoot(rcx, Heap::kEmptyFixedArrayRootIndex);
- __ movq(Operand(rbx, JSObject::kPropertiesOffset), rcx);
- __ movq(Operand(rbx, JSObject::kElementsOffset), rcx);
- // Set extra fields in the newly allocated object.
- // rax: initial map
- // rbx: JSObject
- // rdi: start of next object
- __ lea(rcx, Operand(rbx, JSObject::kHeaderSize));
- __ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
- if (count_constructions) {
- __ movzxbq(rsi,
- FieldOperand(rax, Map::kPreAllocatedPropertyFieldsOffset));
- __ lea(rsi,
- Operand(rbx, rsi, times_pointer_size, JSObject::kHeaderSize));
- // rsi: offset of first field after pre-allocated fields
- if (FLAG_debug_code) {
- __ cmpq(rsi, rdi);
- __ Assert(less_equal,
- "Unexpected number of pre-allocated property fields.");
- }
- __ InitializeFieldsWithFiller(rcx, rsi, rdx);
- __ LoadRoot(rdx, Heap::kOnePointerFillerMapRootIndex);
- }
- __ InitializeFieldsWithFiller(rcx, rdi, rdx);
-
- // Add the object tag to make the JSObject real, so that we can continue
- // and jump into the continuation code at any time from now on. Any
- // failures need to undo the allocation, so that the heap is in a
- // consistent state and verifiable.
- // rax: initial map
- // rbx: JSObject
- // rdi: start of next object
- __ or_(rbx, Immediate(kHeapObjectTag));
-
- // Check if a non-empty properties array is needed.
- // Allocate and initialize a FixedArray if it is.
- // rax: initial map
- // rbx: JSObject
- // rdi: start of next object
- // Calculate total properties described map.
- __ movzxbq(rdx, FieldOperand(rax, Map::kUnusedPropertyFieldsOffset));
- __ movzxbq(rcx,
- FieldOperand(rax, Map::kPreAllocatedPropertyFieldsOffset));
- __ addq(rdx, rcx);
- // Calculate unused properties past the end of the in-object properties.
- __ movzxbq(rcx, FieldOperand(rax, Map::kInObjectPropertiesOffset));
- __ subq(rdx, rcx);
- // Done if no extra properties are to be allocated.
- __ j(zero, &allocated);
- __ Assert(positive, "Property allocation count failed.");
-
- // Scale the number of elements by pointer size and add the header for
- // FixedArrays to the start of the next object calculation from above.
- // rbx: JSObject
- // rdi: start of next object (will be start of FixedArray)
- // rdx: number of elements in properties array
- __ AllocateInNewSpace(FixedArray::kHeaderSize,
- times_pointer_size,
- rdx,
- rdi,
- rax,
- no_reg,
- &undo_allocation,
- RESULT_CONTAINS_TOP);
-
- // Initialize the FixedArray.
- // rbx: JSObject
- // rdi: FixedArray
- // rdx: number of elements
- // rax: start of next object
- __ LoadRoot(rcx, Heap::kFixedArrayMapRootIndex);
- __ movq(Operand(rdi, HeapObject::kMapOffset), rcx); // setup the map
- __ Integer32ToSmi(rdx, rdx);
- __ movq(Operand(rdi, FixedArray::kLengthOffset), rdx); // and length
-
- // Initialize the fields to undefined.
- // rbx: JSObject
- // rdi: FixedArray
- // rax: start of next object
- // rdx: number of elements
- { Label loop, entry;
- __ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
- __ lea(rcx, Operand(rdi, FixedArray::kHeaderSize));
- __ jmp(&entry);
- __ bind(&loop);
- __ movq(Operand(rcx, 0), rdx);
- __ addq(rcx, Immediate(kPointerSize));
- __ bind(&entry);
- __ cmpq(rcx, rax);
- __ j(below, &loop);
- }
-
- // Store the initialized FixedArray into the properties field of
- // the JSObject
- // rbx: JSObject
- // rdi: FixedArray
- __ or_(rdi, Immediate(kHeapObjectTag)); // add the heap tag
- __ movq(FieldOperand(rbx, JSObject::kPropertiesOffset), rdi);
-
-
- // Continue with JSObject being successfully allocated
- // rbx: JSObject
- __ jmp(&allocated);
-
- // Undo the setting of the new top so that the heap is verifiable. For
- // example, the map's unused properties potentially do not match the
- // allocated objects unused properties.
- // rbx: JSObject (previous new top)
- __ bind(&undo_allocation);
- __ UndoAllocationInNewSpace(rbx);
- }
-
- // Allocate the new receiver object using the runtime call.
- // rdi: function (constructor)
- __ bind(&rt_call);
- // Must restore rdi (constructor) before calling runtime.
- __ movq(rdi, Operand(rsp, 0));
- __ push(rdi);
- __ CallRuntime(Runtime::kNewObject, 1);
- __ movq(rbx, rax); // store result in rbx
-
- // New object allocated.
- // rbx: newly allocated object
- __ bind(&allocated);
- // Retrieve the function from the stack.
- __ pop(rdi);
-
- // Retrieve smi-tagged arguments count from the stack.
- __ movq(rax, Operand(rsp, 0));
- __ SmiToInteger32(rax, rax);
-
- // Push the allocated receiver to the stack. We need two copies
- // because we may have to return the original one and the calling
- // conventions dictate that the called function pops the receiver.
- __ push(rbx);
- __ push(rbx);
-
- // Set up pointer to last argument.
- __ lea(rbx, Operand(rbp, StandardFrameConstants::kCallerSPOffset));
-
- // Copy arguments and receiver to the expression stack.
- Label loop, entry;
- __ movq(rcx, rax);
- __ jmp(&entry);
- __ bind(&loop);
- __ push(Operand(rbx, rcx, times_pointer_size, 0));
- __ bind(&entry);
- __ decq(rcx);
- __ j(greater_equal, &loop);
-
- // Call the function.
- if (is_api_function) {
- __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
- Handle<Code> code =
- masm->isolate()->builtins()->HandleApiCallConstruct();
- ParameterCount expected(0);
- __ InvokeCode(code, expected, expected, RelocInfo::CODE_TARGET,
- CALL_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
- } else {
- ParameterCount actual(rax);
- __ InvokeFunction(rdi, actual, CALL_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
- }
-
- // Store offset of return address for deoptimizer.
- if (!is_api_function && !count_constructions) {
- masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset());
- }
-
- // Restore context from the frame.
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
-
- // If the result is an object (in the ECMA sense), we should get rid
- // of the receiver and use the result; see ECMA-262 section 13.2.2-7
- // on page 74.
- Label use_receiver, exit;
- // If the result is a smi, it is *not* an object in the ECMA sense.
- __ JumpIfSmi(rax, &use_receiver);
-
- // If the type of the result (stored in its map) is less than
- // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense.
- STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
- __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rcx);
- __ j(above_equal, &exit);
-
- // Symbols are "objects".
- __ CmpInstanceType(rcx, SYMBOL_TYPE);
- __ j(equal, &exit);
-
- // Throw away the result of the constructor invocation and use the
- // on-stack receiver as the result.
- __ bind(&use_receiver);
- __ movq(rax, Operand(rsp, 0));
-
- // Restore the arguments count and leave the construct frame.
- __ bind(&exit);
- __ movq(rbx, Operand(rsp, kPointerSize)); // Get arguments count.
-
- // Leave construct frame.
- }
-
- // Remove caller arguments from the stack and return.
- __ pop(rcx);
- SmiIndex index = masm->SmiToIndex(rbx, rbx, kPointerSizeLog2);
- __ lea(rsp, Operand(rsp, index.reg, index.scale, 1 * kPointerSize));
- __ push(rcx);
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->constructed_objects(), 1);
- __ ret(0);
-}
-
-
-void Builtins::Generate_JSConstructStubCountdown(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, true);
-}
-
-
-void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, false);
-}
-
-
-void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, true, false);
-}
-
-
-static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
- bool is_construct) {
- // Expects five C++ function parameters.
- // - Address entry (ignored)
- // - JSFunction* function (
- // - Object* receiver
- // - int argc
- // - Object*** argv
- // (see Handle::Invoke in execution.cc).
-
- // Open a C++ scope for the FrameScope.
- {
- // Platform specific argument handling. After this, the stack contains
- // an internal frame and the pushed function and receiver, and
- // register rax and rbx holds the argument count and argument array,
- // while rdi holds the function pointer and rsi the context.
-
-#ifdef _WIN64
- // MSVC parameters in:
- // rcx : entry (ignored)
- // rdx : function
- // r8 : receiver
- // r9 : argc
- // [rsp+0x20] : argv
-
- // Clear the context before we push it when entering the internal frame.
- __ Set(rsi, 0);
- // Enter an internal frame.
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Load the function context into rsi.
- __ movq(rsi, FieldOperand(rdx, JSFunction::kContextOffset));
-
- // Push the function and the receiver onto the stack.
- __ push(rdx);
- __ push(r8);
-
- // Load the number of arguments and setup pointer to the arguments.
- __ movq(rax, r9);
- // Load the previous frame pointer to access C argument on stack
- __ movq(kScratchRegister, Operand(rbp, 0));
- __ movq(rbx, Operand(kScratchRegister, EntryFrameConstants::kArgvOffset));
- // Load the function pointer into rdi.
- __ movq(rdi, rdx);
-#else // _WIN64
- // GCC parameters in:
- // rdi : entry (ignored)
- // rsi : function
- // rdx : receiver
- // rcx : argc
- // r8 : argv
-
- __ movq(rdi, rsi);
- // rdi : function
-
- // Clear the context before we push it when entering the internal frame.
- __ Set(rsi, 0);
- // Enter an internal frame.
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Push the function and receiver and setup the context.
- __ push(rdi);
- __ push(rdx);
- __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
-
- // Load the number of arguments and setup pointer to the arguments.
- __ movq(rax, rcx);
- __ movq(rbx, r8);
-#endif // _WIN64
-
- // Current stack contents:
- // [rsp + 2 * kPointerSize ... ]: Internal frame
- // [rsp + kPointerSize] : function
- // [rsp] : receiver
- // Current register contents:
- // rax : argc
- // rbx : argv
- // rsi : context
- // rdi : function
-
- // Copy arguments to the stack in a loop.
- // Register rbx points to array of pointers to handle locations.
- // Push the values of these handles.
- Label loop, entry;
- __ Set(rcx, 0); // Set loop variable to 0.
- __ jmp(&entry);
- __ bind(&loop);
- __ movq(kScratchRegister, Operand(rbx, rcx, times_pointer_size, 0));
- __ push(Operand(kScratchRegister, 0)); // dereference handle
- __ addq(rcx, Immediate(1));
- __ bind(&entry);
- __ cmpq(rcx, rax);
- __ j(not_equal, &loop);
-
- // Invoke the code.
- if (is_construct) {
- // No type feedback cell is available
- Handle<Object> undefined_sentinel(
- masm->isolate()->factory()->undefined_value());
- __ Move(rbx, undefined_sentinel);
- // Expects rdi to hold function pointer.
- CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
- __ CallStub(&stub);
- } else {
- ParameterCount actual(rax);
- // Function must be in rdi.
- __ InvokeFunction(rdi, actual, CALL_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
- }
- // Exit the internal frame. Notice that this also removes the empty
- // context and the function left on the stack by the code
- // invocation.
- }
-
- // TODO(X64): Is argument correct? Is there a receiver to remove?
- __ ret(1 * kPointerSize); // Remove receiver.
-}
-
-
-void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) {
- Generate_JSEntryTrampolineHelper(masm, false);
-}
-
-
-void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
- Generate_JSEntryTrampolineHelper(masm, true);
-}
-
-
-void Builtins::Generate_LazyCompile(MacroAssembler* masm) {
- // Enter an internal frame.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Push a copy of the function onto the stack.
- __ push(rdi);
- // Push call kind information.
- __ push(rcx);
-
- __ push(rdi); // Function is also the parameter to the runtime call.
- __ CallRuntime(Runtime::kLazyCompile, 1);
-
- // Restore call kind information.
- __ pop(rcx);
- // Restore receiver.
- __ pop(rdi);
-
- // Tear down internal frame.
- }
-
- // Do a tail-call of the compiled function.
- __ lea(rax, FieldOperand(rax, Code::kHeaderSize));
- __ jmp(rax);
-}
-
-
-void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
- // Enter an internal frame.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Push a copy of the function onto the stack.
- __ push(rdi);
- // Push call kind information.
- __ push(rcx);
-
- __ push(rdi); // Function is also the parameter to the runtime call.
- __ CallRuntime(Runtime::kLazyRecompile, 1);
-
- // Restore call kind information.
- __ pop(rcx);
- // Restore function.
- __ pop(rdi);
-
- // Tear down internal frame.
- }
-
- // Do a tail-call of the compiled function.
- __ lea(rax, FieldOperand(rax, Code::kHeaderSize));
- __ jmp(rax);
-}
-
-
-static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
- // For now, we are relying on the fact that make_code_young doesn't do any
- // garbage collection which allows us to save/restore the registers without
- // worrying about which of them contain pointers. We also don't build an
- // internal frame to make the code faster, since we shouldn't have to do stack
- // crawls in MakeCodeYoung. This seems a bit fragile.
-
- // Re-execute the code that was patched back to the young age when
- // the stub returns.
- __ subq(Operand(rsp, 0), Immediate(5));
- __ Pushad();
-#ifdef _WIN64
- __ movq(rcx, Operand(rsp, kNumSafepointRegisters * kPointerSize));
-#else
- __ movq(rdi, Operand(rsp, kNumSafepointRegisters * kPointerSize));
-#endif
- { // NOLINT
- FrameScope scope(masm, StackFrame::MANUAL);
- __ PrepareCallCFunction(1);
- __ CallCFunction(
- ExternalReference::get_make_code_young_function(masm->isolate()), 1);
- }
- __ Popad();
- __ ret(0);
-}
-
-
-#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C) \
-void Builtins::Generate_Make##C##CodeYoungAgainEvenMarking( \
- MacroAssembler* masm) { \
- GenerateMakeCodeYoungAgainCommon(masm); \
-} \
-void Builtins::Generate_Make##C##CodeYoungAgainOddMarking( \
- MacroAssembler* masm) { \
- GenerateMakeCodeYoungAgainCommon(masm); \
-}
-CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
-#undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
-
-
-void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
- // Enter an internal frame.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Preserve registers across notification, this is important for compiled
- // stubs that tail call the runtime on deopts passing their parameters in
- // registers.
- __ Pushad();
- __ CallRuntime(Runtime::kNotifyStubFailure, 0);
- __ Popad();
- // Tear down internal frame.
- }
-
- __ pop(MemOperand(rsp, 0)); // Ignore state offset
- __ ret(0); // Return to IC Miss stub, continuation still on stack.
-}
-
-
-static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
- Deoptimizer::BailoutType type) {
- // Enter an internal frame.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Pass the deoptimization type to the runtime system.
- __ Push(Smi::FromInt(static_cast<int>(type)));
-
- __ CallRuntime(Runtime::kNotifyDeoptimized, 1);
- // Tear down internal frame.
- }
-
- // Get the full codegen state from the stack and untag it.
- __ SmiToInteger32(r10, Operand(rsp, 1 * kPointerSize));
-
- // Switch on the state.
- Label not_no_registers, not_tos_rax;
- __ cmpq(r10, Immediate(FullCodeGenerator::NO_REGISTERS));
- __ j(not_equal, &not_no_registers, Label::kNear);
- __ ret(1 * kPointerSize); // Remove state.
-
- __ bind(&not_no_registers);
- __ movq(rax, Operand(rsp, 2 * kPointerSize));
- __ cmpq(r10, Immediate(FullCodeGenerator::TOS_REG));
- __ j(not_equal, &not_tos_rax, Label::kNear);
- __ ret(2 * kPointerSize); // Remove state, rax.
-
- __ bind(&not_tos_rax);
- __ Abort("no cases left");
-}
-
-void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
- Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
-}
-
-
-void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
- Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
-}
-
-
-void Builtins::Generate_NotifyOSR(MacroAssembler* masm) {
- // For now, we are relying on the fact that Runtime::NotifyOSR
- // doesn't do any garbage collection which allows us to save/restore
- // the registers without worrying about which of them contain
- // pointers. This seems a bit fragile.
- __ Pushad();
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kNotifyOSR, 0);
- }
- __ Popad();
- __ ret(0);
-}
-
-
-void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
- // Stack Layout:
- // rsp[0]: Return address
- // rsp[1]: Argument n
- // rsp[2]: Argument n-1
- // ...
- // rsp[n]: Argument 1
- // rsp[n+1]: Receiver (function to call)
- //
- // rax contains the number of arguments, n, not counting the receiver.
- //
- // 1. Make sure we have at least one argument.
- { Label done;
- __ testq(rax, rax);
- __ j(not_zero, &done);
- __ pop(rbx);
- __ Push(masm->isolate()->factory()->undefined_value());
- __ push(rbx);
- __ incq(rax);
- __ bind(&done);
- }
-
- // 2. Get the function to call (passed as receiver) from the stack, check
- // if it is a function.
- Label slow, non_function;
- // The function to call is at position n+1 on the stack.
- __ movq(rdi, Operand(rsp, rax, times_pointer_size, 1 * kPointerSize));
- __ JumpIfSmi(rdi, &non_function);
- __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
- __ j(not_equal, &slow);
-
- // 3a. Patch the first argument if necessary when calling a function.
- Label shift_arguments;
- __ Set(rdx, 0); // indicate regular JS_FUNCTION
- { Label convert_to_object, use_global_receiver, patch_receiver;
- // Change context eagerly in case we need the global receiver.
- __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
-
- // Do not transform the receiver for strict mode functions.
- __ movq(rbx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
- __ testb(FieldOperand(rbx, SharedFunctionInfo::kStrictModeByteOffset),
- Immediate(1 << SharedFunctionInfo::kStrictModeBitWithinByte));
- __ j(not_equal, &shift_arguments);
-
- // Do not transform the receiver for natives.
- // SharedFunctionInfo is already loaded into rbx.
- __ testb(FieldOperand(rbx, SharedFunctionInfo::kNativeByteOffset),
- Immediate(1 << SharedFunctionInfo::kNativeBitWithinByte));
- __ j(not_zero, &shift_arguments);
-
- // Compute the receiver in non-strict mode.
- __ movq(rbx, Operand(rsp, rax, times_pointer_size, 0));
- __ JumpIfSmi(rbx, &convert_to_object, Label::kNear);
-
- __ CompareRoot(rbx, Heap::kNullValueRootIndex);
- __ j(equal, &use_global_receiver);
- __ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
- __ j(equal, &use_global_receiver);
-
- STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
- __ CmpObjectType(rbx, FIRST_SPEC_OBJECT_TYPE, rcx);
- __ j(above_equal, &shift_arguments);
-
- __ bind(&convert_to_object);
- {
- // Enter an internal frame in order to preserve argument count.
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Integer32ToSmi(rax, rax);
- __ push(rax);
-
- __ push(rbx);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
- __ movq(rbx, rax);
- __ Set(rdx, 0); // indicate regular JS_FUNCTION
-
- __ pop(rax);
- __ SmiToInteger32(rax, rax);
- }
-
- // Restore the function to rdi.
- __ movq(rdi, Operand(rsp, rax, times_pointer_size, 1 * kPointerSize));
- __ jmp(&patch_receiver, Label::kNear);
-
- // Use the global receiver object from the called function as the
- // receiver.
- __ bind(&use_global_receiver);
- const int kGlobalIndex =
- Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
- __ movq(rbx, FieldOperand(rsi, kGlobalIndex));
- __ movq(rbx, FieldOperand(rbx, GlobalObject::kNativeContextOffset));
- __ movq(rbx, FieldOperand(rbx, kGlobalIndex));
- __ movq(rbx, FieldOperand(rbx, GlobalObject::kGlobalReceiverOffset));
-
- __ bind(&patch_receiver);
- __ movq(Operand(rsp, rax, times_pointer_size, 0), rbx);
-
- __ jmp(&shift_arguments);
- }
-
- // 3b. Check for function proxy.
- __ bind(&slow);
- __ Set(rdx, 1); // indicate function proxy
- __ CmpInstanceType(rcx, JS_FUNCTION_PROXY_TYPE);
- __ j(equal, &shift_arguments);
- __ bind(&non_function);
- __ Set(rdx, 2); // indicate non-function
-
- // 3c. Patch the first argument when calling a non-function. The
- // CALL_NON_FUNCTION builtin expects the non-function callee as
- // receiver, so overwrite the first argument which will ultimately
- // become the receiver.
- __ movq(Operand(rsp, rax, times_pointer_size, 0), rdi);
-
- // 4. Shift arguments and return address one slot down on the stack
- // (overwriting the original receiver). Adjust argument count to make
- // the original first argument the new receiver.
- __ bind(&shift_arguments);
- { Label loop;
- __ movq(rcx, rax);
- __ bind(&loop);
- __ movq(rbx, Operand(rsp, rcx, times_pointer_size, 0));
- __ movq(Operand(rsp, rcx, times_pointer_size, 1 * kPointerSize), rbx);
- __ decq(rcx);
- __ j(not_sign, &loop); // While non-negative (to copy return address).
- __ pop(rbx); // Discard copy of return address.
- __ decq(rax); // One fewer argument (first argument is new receiver).
- }
-
- // 5a. Call non-function via tail call to CALL_NON_FUNCTION builtin,
- // or a function proxy via CALL_FUNCTION_PROXY.
- { Label function, non_proxy;
- __ testq(rdx, rdx);
- __ j(zero, &function);
- __ Set(rbx, 0);
- __ SetCallKind(rcx, CALL_AS_METHOD);
- __ cmpq(rdx, Immediate(1));
- __ j(not_equal, &non_proxy);
-
- __ pop(rdx); // return address
- __ push(rdi); // re-add proxy object as additional argument
- __ push(rdx);
- __ incq(rax);
- __ GetBuiltinEntry(rdx, Builtins::CALL_FUNCTION_PROXY);
- __ jmp(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
- RelocInfo::CODE_TARGET);
-
- __ bind(&non_proxy);
- __ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION);
- __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
- RelocInfo::CODE_TARGET);
- __ bind(&function);
- }
-
- // 5b. Get the code to call from the function and check that the number of
- // expected arguments matches what we're providing. If so, jump
- // (tail-call) to the code in register edx without checking arguments.
- __ movq(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
- __ movsxlq(rbx,
- FieldOperand(rdx,
- SharedFunctionInfo::kFormalParameterCountOffset));
- __ movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
- __ SetCallKind(rcx, CALL_AS_METHOD);
- __ cmpq(rax, rbx);
- __ j(not_equal,
- masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
- RelocInfo::CODE_TARGET);
-
- ParameterCount expected(0);
- __ InvokeCode(rdx, expected, expected, JUMP_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
-}
-
-
-void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
- // Stack at entry:
- // rsp: return address
- // rsp+8: arguments
- // rsp+16: receiver ("this")
- // rsp+24: function
- {
- FrameScope frame_scope(masm, StackFrame::INTERNAL);
- // Stack frame:
- // rbp: Old base pointer
- // rbp[1]: return address
- // rbp[2]: function arguments
- // rbp[3]: receiver
- // rbp[4]: function
- static const int kArgumentsOffset = 2 * kPointerSize;
- static const int kReceiverOffset = 3 * kPointerSize;
- static const int kFunctionOffset = 4 * kPointerSize;
-
- __ push(Operand(rbp, kFunctionOffset));
- __ push(Operand(rbp, kArgumentsOffset));
- __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
-
- // Check the stack for overflow. We are not trying to catch
- // interruptions (e.g. debug break and preemption) here, so the "real stack
- // limit" is checked.
- Label okay;
- __ LoadRoot(kScratchRegister, Heap::kRealStackLimitRootIndex);
- __ movq(rcx, rsp);
- // Make rcx the space we have left. The stack might already be overflowed
- // here which will cause rcx to become negative.
- __ subq(rcx, kScratchRegister);
- // Make rdx the space we need for the array when it is unrolled onto the
- // stack.
- __ PositiveSmiTimesPowerOfTwoToInteger64(rdx, rax, kPointerSizeLog2);
- // Check if the arguments will overflow the stack.
- __ cmpq(rcx, rdx);
- __ j(greater, &okay); // Signed comparison.
-
- // Out of stack space.
- __ push(Operand(rbp, kFunctionOffset));
- __ push(rax);
- __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
- __ bind(&okay);
- // End of stack check.
-
- // Push current index and limit.
- const int kLimitOffset =
- StandardFrameConstants::kExpressionsOffset - 1 * kPointerSize;
- const int kIndexOffset = kLimitOffset - 1 * kPointerSize;
- __ push(rax); // limit
- __ push(Immediate(0)); // index
-
- // Get the receiver.
- __ movq(rbx, Operand(rbp, kReceiverOffset));
-
- // Check that the function is a JS function (otherwise it must be a proxy).
- Label push_receiver;
- __ movq(rdi, Operand(rbp, kFunctionOffset));
- __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
- __ j(not_equal, &push_receiver);
-
- // Change context eagerly to get the right global object if necessary.
- __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
-
- // Do not transform the receiver for strict mode functions.
- Label call_to_object, use_global_receiver;
- __ movq(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
- __ testb(FieldOperand(rdx, SharedFunctionInfo::kStrictModeByteOffset),
- Immediate(1 << SharedFunctionInfo::kStrictModeBitWithinByte));
- __ j(not_equal, &push_receiver);
-
- // Do not transform the receiver for natives.
- __ testb(FieldOperand(rdx, SharedFunctionInfo::kNativeByteOffset),
- Immediate(1 << SharedFunctionInfo::kNativeBitWithinByte));
- __ j(not_equal, &push_receiver);
-
- // Compute the receiver in non-strict mode.
- __ JumpIfSmi(rbx, &call_to_object, Label::kNear);
- __ CompareRoot(rbx, Heap::kNullValueRootIndex);
- __ j(equal, &use_global_receiver);
- __ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
- __ j(equal, &use_global_receiver);
-
- // If given receiver is already a JavaScript object then there's no
- // reason for converting it.
- STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
- __ CmpObjectType(rbx, FIRST_SPEC_OBJECT_TYPE, rcx);
- __ j(above_equal, &push_receiver);
-
- // Convert the receiver to an object.
- __ bind(&call_to_object);
- __ push(rbx);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
- __ movq(rbx, rax);
- __ jmp(&push_receiver, Label::kNear);
-
- // Use the current global receiver object as the receiver.
- __ bind(&use_global_receiver);
- const int kGlobalOffset =
- Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
- __ movq(rbx, FieldOperand(rsi, kGlobalOffset));
- __ movq(rbx, FieldOperand(rbx, GlobalObject::kNativeContextOffset));
- __ movq(rbx, FieldOperand(rbx, kGlobalOffset));
- __ movq(rbx, FieldOperand(rbx, GlobalObject::kGlobalReceiverOffset));
-
- // Push the receiver.
- __ bind(&push_receiver);
- __ push(rbx);
-
- // Copy all arguments from the array to the stack.
- Label entry, loop;
- __ movq(rax, Operand(rbp, kIndexOffset));
- __ jmp(&entry);
- __ bind(&loop);
- __ movq(rdx, Operand(rbp, kArgumentsOffset)); // load arguments
-
- // Use inline caching to speed up access to arguments.
- Handle<Code> ic =
- masm->isolate()->builtins()->KeyedLoadIC_Initialize();
- __ Call(ic, RelocInfo::CODE_TARGET);
- // It is important that we do not have a test instruction after the
- // call. A test instruction after the call is used to indicate that
- // we have generated an inline version of the keyed load. In this
- // case, we know that we are not generating a test instruction next.
-
- // Push the nth argument.
- __ push(rax);
-
- // Update the index on the stack and in register rax.
- __ movq(rax, Operand(rbp, kIndexOffset));
- __ SmiAddConstant(rax, rax, Smi::FromInt(1));
- __ movq(Operand(rbp, kIndexOffset), rax);
-
- __ bind(&entry);
- __ cmpq(rax, Operand(rbp, kLimitOffset));
- __ j(not_equal, &loop);
-
- // Invoke the function.
- Label call_proxy;
- ParameterCount actual(rax);
- __ SmiToInteger32(rax, rax);
- __ movq(rdi, Operand(rbp, kFunctionOffset));
- __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
- __ j(not_equal, &call_proxy);
- __ InvokeFunction(rdi, actual, CALL_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
-
- frame_scope.GenerateLeaveFrame();
- __ ret(3 * kPointerSize); // remove this, receiver, and arguments
-
- // Invoke the function proxy.
- __ bind(&call_proxy);
- __ push(rdi); // add function proxy as last argument
- __ incq(rax);
- __ Set(rbx, 0);
- __ SetCallKind(rcx, CALL_AS_METHOD);
- __ GetBuiltinEntry(rdx, Builtins::CALL_FUNCTION_PROXY);
- __ call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
- RelocInfo::CODE_TARGET);
-
- // Leave internal frame.
- }
- __ ret(3 * kPointerSize); // remove this, receiver, and arguments
-}
-
-
-// Allocate an empty JSArray. The allocated array is put into the result
-// register. If the parameter initial_capacity is larger than zero an elements
-// backing store is allocated with this size and filled with the hole values.
-// Otherwise the elements backing store is set to the empty FixedArray.
-static void AllocateEmptyJSArray(MacroAssembler* masm,
- Register array_function,
- Register result,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* gc_required) {
- const int initial_capacity = JSArray::kPreallocatedArrayElements;
- STATIC_ASSERT(initial_capacity >= 0);
-
- __ LoadInitialArrayMap(array_function, scratch2, scratch1, false);
-
- // Allocate the JSArray object together with space for a fixed array with the
- // requested elements.
- int size = JSArray::kSize;
- if (initial_capacity > 0) {
- size += FixedArray::SizeFor(initial_capacity);
- }
- __ AllocateInNewSpace(size,
- result,
- scratch2,
- scratch3,
- gc_required,
- TAG_OBJECT);
-
- // Allocated the JSArray. Now initialize the fields except for the elements
- // array.
- // result: JSObject
- // scratch1: initial map
- // scratch2: start of next object
- Factory* factory = masm->isolate()->factory();
- __ movq(FieldOperand(result, JSObject::kMapOffset), scratch1);
- __ Move(FieldOperand(result, JSArray::kPropertiesOffset),
- factory->empty_fixed_array());
- // Field JSArray::kElementsOffset is initialized later.
- __ Move(FieldOperand(result, JSArray::kLengthOffset), Smi::FromInt(0));
-
- // If no storage is requested for the elements array just set the empty
- // fixed array.
- if (initial_capacity == 0) {
- __ Move(FieldOperand(result, JSArray::kElementsOffset),
- factory->empty_fixed_array());
- return;
- }
-
- // Calculate the location of the elements array and set elements array member
- // of the JSArray.
- // result: JSObject
- // scratch2: start of next object
- __ lea(scratch1, Operand(result, JSArray::kSize));
- __ movq(FieldOperand(result, JSArray::kElementsOffset), scratch1);
-
- // Initialize the FixedArray and fill it with holes. FixedArray length is
- // stored as a smi.
- // result: JSObject
- // scratch1: elements array
- // scratch2: start of next object
- __ Move(FieldOperand(scratch1, HeapObject::kMapOffset),
- factory->fixed_array_map());
- __ Move(FieldOperand(scratch1, FixedArray::kLengthOffset),
- Smi::FromInt(initial_capacity));
-
- // Fill the FixedArray with the hole value. Inline the code if short.
- // Reconsider loop unfolding if kPreallocatedArrayElements gets changed.
- static const int kLoopUnfoldLimit = 4;
- __ LoadRoot(scratch3, Heap::kTheHoleValueRootIndex);
- if (initial_capacity <= kLoopUnfoldLimit) {
- // Use a scratch register here to have only one reloc info when unfolding
- // the loop.
- for (int i = 0; i < initial_capacity; i++) {
- __ movq(FieldOperand(scratch1,
- FixedArray::kHeaderSize + i * kPointerSize),
- scratch3);
- }
- } else {
- Label loop, entry;
- __ movq(scratch2, Immediate(initial_capacity));
- __ jmp(&entry);
- __ bind(&loop);
- __ movq(FieldOperand(scratch1,
- scratch2,
- times_pointer_size,
- FixedArray::kHeaderSize),
- scratch3);
- __ bind(&entry);
- __ decq(scratch2);
- __ j(not_sign, &loop);
- }
-}
-
-
-// Allocate a JSArray with the number of elements stored in a register. The
-// register array_function holds the built-in Array function and the register
-// array_size holds the size of the array as a smi. The allocated array is put
-// into the result register and beginning and end of the FixedArray elements
-// storage is put into registers elements_array and elements_array_end (see
-// below for when that is not the case). If the parameter fill_with_holes is
-// true the allocated elements backing store is filled with the hole values
-// otherwise it is left uninitialized. When the backing store is filled the
-// register elements_array is scratched.
-static void AllocateJSArray(MacroAssembler* masm,
- Register array_function, // Array function.
- Register array_size, // As a smi, cannot be 0.
- Register result,
- Register elements_array,
- Register elements_array_end,
- Register scratch,
- bool fill_with_hole,
- Label* gc_required) {
- __ LoadInitialArrayMap(array_function, scratch,
- elements_array, fill_with_hole);
-
- if (FLAG_debug_code) { // Assert that array size is not zero.
- __ testq(array_size, array_size);
- __ Assert(not_zero, "array size is unexpectedly 0");
- }
-
- // Allocate the JSArray object together with space for a FixedArray with the
- // requested elements.
- SmiIndex index =
- masm->SmiToIndex(kScratchRegister, array_size, kPointerSizeLog2);
- __ AllocateInNewSpace(JSArray::kSize + FixedArray::kHeaderSize,
- index.scale,
- index.reg,
- result,
- elements_array_end,
- scratch,
- gc_required,
- TAG_OBJECT);
-
- // Allocated the JSArray. Now initialize the fields except for the elements
- // array.
- // result: JSObject
- // elements_array: initial map
- // elements_array_end: start of next object
- // array_size: size of array (smi)
- Factory* factory = masm->isolate()->factory();
- __ movq(FieldOperand(result, JSObject::kMapOffset), elements_array);
- __ Move(elements_array, factory->empty_fixed_array());
- __ movq(FieldOperand(result, JSArray::kPropertiesOffset), elements_array);
- // Field JSArray::kElementsOffset is initialized later.
- __ movq(FieldOperand(result, JSArray::kLengthOffset), array_size);
-
- // Calculate the location of the elements array and set elements array member
- // of the JSArray.
- // result: JSObject
- // elements_array_end: start of next object
- // array_size: size of array (smi)
- __ lea(elements_array, Operand(result, JSArray::kSize));
- __ movq(FieldOperand(result, JSArray::kElementsOffset), elements_array);
-
- // Initialize the fixed array. FixedArray length is stored as a smi.
- // result: JSObject
- // elements_array: elements array
- // elements_array_end: start of next object
- // array_size: size of array (smi)
- __ Move(FieldOperand(elements_array, JSObject::kMapOffset),
- factory->fixed_array_map());
- // For non-empty JSArrays the length of the FixedArray and the JSArray is the
- // same.
- __ movq(FieldOperand(elements_array, FixedArray::kLengthOffset), array_size);
-
- // Fill the allocated FixedArray with the hole value if requested.
- // result: JSObject
- // elements_array: elements array
- // elements_array_end: start of next object
- if (fill_with_hole) {
- Label loop, entry;
- __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
- __ lea(elements_array, Operand(elements_array,
- FixedArray::kHeaderSize - kHeapObjectTag));
- __ jmp(&entry);
- __ bind(&loop);
- __ movq(Operand(elements_array, 0), scratch);
- __ addq(elements_array, Immediate(kPointerSize));
- __ bind(&entry);
- __ cmpq(elements_array, elements_array_end);
- __ j(below, &loop);
- }
-}
-
-
-// Create a new array for the built-in Array function. This function allocates
-// the JSArray object and the FixedArray elements array and initializes these.
-// If the Array cannot be constructed in native code the runtime is called. This
-// function assumes the following state:
-// rdi: constructor (built-in Array function)
-// rax: argc
-// rsp[0]: return address
-// rsp[8]: last argument
-// This function is used for both construct and normal calls of Array. The only
-// difference between handling a construct call and a normal call is that for a
-// construct call the constructor function in rdi needs to be preserved for
-// entering the generic code. In both cases argc in rax needs to be preserved.
-// Both registers are preserved by this code so no need to differentiate between
-// a construct call and a normal call.
-static void ArrayNativeCode(MacroAssembler* masm,
- Label* call_generic_code) {
- Label argc_one_or_more, argc_two_or_more, empty_array, not_empty_array,
- has_non_smi_element, finish, cant_transition_map, not_double;
-
- // Check for array construction with zero arguments.
- __ testq(rax, rax);
- __ j(not_zero, &argc_one_or_more);
-
- __ bind(&empty_array);
- // Handle construction of an empty array.
- AllocateEmptyJSArray(masm,
- rdi,
- rbx,
- rcx,
- rdx,
- r8,
- call_generic_code);
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->array_function_native(), 1);
- __ movq(rax, rbx);
- __ ret(kPointerSize);
-
- // Check for one argument. Bail out if argument is not smi or if it is
- // negative.
- __ bind(&argc_one_or_more);
- __ cmpq(rax, Immediate(1));
- __ j(not_equal, &argc_two_or_more);
- __ movq(rdx, Operand(rsp, kPointerSize)); // Get the argument from the stack.
-
- __ SmiTest(rdx);
- __ j(not_zero, &not_empty_array);
- __ pop(r8); // Adjust stack.
- __ Drop(1);
- __ push(r8);
- __ movq(rax, Immediate(0)); // Treat this as a call with argc of zero.
- __ jmp(&empty_array);
-
- __ bind(&not_empty_array);
- __ JumpUnlessNonNegativeSmi(rdx, call_generic_code);
-
- // Handle construction of an empty array of a certain size. Bail out if size
- // is to large to actually allocate an elements array.
- __ SmiCompare(rdx, Smi::FromInt(JSObject::kInitialMaxFastElementArray));
- __ j(greater_equal, call_generic_code);
-
- // rax: argc
- // rdx: array_size (smi)
- // rdi: constructor
- // esp[0]: return address
- // esp[8]: argument
- AllocateJSArray(masm,
- rdi,
- rdx,
- rbx,
- rcx,
- r8,
- r9,
- true,
- call_generic_code);
- __ IncrementCounter(counters->array_function_native(), 1);
- __ movq(rax, rbx);
- __ ret(2 * kPointerSize);
-
- // Handle construction of an array from a list of arguments.
- __ bind(&argc_two_or_more);
- __ movq(rdx, rax);
- __ Integer32ToSmi(rdx, rdx); // Convet argc to a smi.
- // rax: argc
- // rdx: array_size (smi)
- // rdi: constructor
- // esp[0] : return address
- // esp[8] : last argument
- AllocateJSArray(masm,
- rdi,
- rdx,
- rbx,
- rcx,
- r8,
- r9,
- false,
- call_generic_code);
- __ IncrementCounter(counters->array_function_native(), 1);
-
- // rax: argc
- // rbx: JSArray
- // rcx: elements_array
- // r8: elements_array_end (untagged)
- // esp[0]: return address
- // esp[8]: last argument
-
- // Location of the last argument
- __ lea(r9, Operand(rsp, kPointerSize));
-
- // Location of the first array element (Parameter fill_with_holes to
- // AllocateJSArrayis false, so the FixedArray is returned in rcx).
- __ lea(rdx, Operand(rcx, FixedArray::kHeaderSize - kHeapObjectTag));
-
- // rax: argc
- // rbx: JSArray
- // rdx: location of the first array element
- // r9: location of the last argument
- // esp[0]: return address
- // esp[8]: last argument
- Label loop, entry;
- __ movq(rcx, rax);
- __ jmp(&entry);
- __ bind(&loop);
- __ movq(r8, Operand(r9, rcx, times_pointer_size, 0));
- if (FLAG_smi_only_arrays) {
- __ JumpIfNotSmi(r8, &has_non_smi_element);
- }
- __ movq(Operand(rdx, 0), r8);
- __ addq(rdx, Immediate(kPointerSize));
- __ bind(&entry);
- __ decq(rcx);
- __ j(greater_equal, &loop);
-
- // Remove caller arguments from the stack and return.
- // rax: argc
- // rbx: JSArray
- // esp[0]: return address
- // esp[8]: last argument
- __ bind(&finish);
- __ pop(rcx);
- __ lea(rsp, Operand(rsp, rax, times_pointer_size, 1 * kPointerSize));
- __ push(rcx);
- __ movq(rax, rbx);
- __ ret(0);
-
- __ bind(&has_non_smi_element);
- // Double values are handled by the runtime.
- __ CheckMap(r8,
- masm->isolate()->factory()->heap_number_map(),
- &not_double,
- DONT_DO_SMI_CHECK);
- __ bind(&cant_transition_map);
- __ UndoAllocationInNewSpace(rbx);
- __ jmp(call_generic_code);
-
- __ bind(&not_double);
- // Transition FAST_SMI_ELEMENTS to FAST_ELEMENTS.
- // rbx: JSArray
- __ movq(r11, FieldOperand(rbx, HeapObject::kMapOffset));
- __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- FAST_ELEMENTS,
- r11,
- kScratchRegister,
- &cant_transition_map);
-
- __ movq(FieldOperand(rbx, HeapObject::kMapOffset), r11);
- __ RecordWriteField(rbx, HeapObject::kMapOffset, r11, r8,
- kDontSaveFPRegs, OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-
- // Finish the array initialization loop.
- Label loop2;
- __ bind(&loop2);
- __ movq(r8, Operand(r9, rcx, times_pointer_size, 0));
- __ movq(Operand(rdx, 0), r8);
- __ addq(rdx, Immediate(kPointerSize));
- __ decq(rcx);
- __ j(greater_equal, &loop2);
- __ jmp(&finish);
-}
-
-
-void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rax : argc
- // -- rsp[0] : return address
- // -- rsp[8] : last argument
- // -----------------------------------
- Label generic_array_code;
-
- // Get the InternalArray function.
- __ LoadGlobalFunction(Context::INTERNAL_ARRAY_FUNCTION_INDEX, rdi);
-
- if (FLAG_debug_code) {
- // Initial map for the builtin InternalArray functions should be maps.
- __ movq(rbx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
- // Will both indicate a NULL and a Smi.
- STATIC_ASSERT(kSmiTag == 0);
- Condition not_smi = NegateCondition(masm->CheckSmi(rbx));
- __ Check(not_smi, "Unexpected initial map for InternalArray function");
- __ CmpObjectType(rbx, MAP_TYPE, rcx);
- __ Check(equal, "Unexpected initial map for InternalArray function");
- }
-
- // Run the native code for the InternalArray function called as a normal
- // function.
- ArrayNativeCode(masm, &generic_array_code);
-
- // Jump to the generic array code in case the specialized code cannot handle
- // the construction.
- __ bind(&generic_array_code);
- Handle<Code> array_code =
- masm->isolate()->builtins()->InternalArrayCodeGeneric();
- __ Jump(array_code, RelocInfo::CODE_TARGET);
-}
-
-
-void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rax : argc
- // -- rsp[0] : return address
- // -- rsp[8] : last argument
- // -----------------------------------
- Label generic_array_code;
-
- // Get the Array function.
- __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, rdi);
-
- if (FLAG_debug_code) {
- // Initial map for the builtin Array functions should be maps.
- __ movq(rbx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
- // Will both indicate a NULL and a Smi.
- STATIC_ASSERT(kSmiTag == 0);
- Condition not_smi = NegateCondition(masm->CheckSmi(rbx));
- __ Check(not_smi, "Unexpected initial map for Array function");
- __ CmpObjectType(rbx, MAP_TYPE, rcx);
- __ Check(equal, "Unexpected initial map for Array function");
- }
-
- // Run the native code for the Array function called as a normal function.
- ArrayNativeCode(masm, &generic_array_code);
-
- // Jump to the generic array code in case the specialized code cannot handle
- // the construction.
- __ bind(&generic_array_code);
- Handle<Code> array_code =
- masm->isolate()->builtins()->ArrayCodeGeneric();
- __ Jump(array_code, RelocInfo::CODE_TARGET);
-}
-
-
-void Builtins::Generate_ArrayConstructCode(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rax : argc
- // -- rdi : constructor
- // -- rsp[0] : return address
- // -- rsp[8] : last argument
- // -----------------------------------
- if (FLAG_debug_code) {
- // The array construct code is only set for the builtin and internal
- // Array functions which always have a map.
-
- // Initial map for the builtin Array function should be a map.
- __ movq(rcx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
- // Will both indicate a NULL and a Smi.
- STATIC_ASSERT(kSmiTag == 0);
- Condition not_smi = NegateCondition(masm->CheckSmi(rcx));
- __ Check(not_smi, "Unexpected initial map for Array function");
- __ CmpObjectType(rcx, MAP_TYPE, rcx);
- __ Check(equal, "Unexpected initial map for Array function");
-
- if (FLAG_optimize_constructed_arrays) {
- // We should either have undefined in ebx or a valid jsglobalpropertycell
- Label okay_here;
- Handle<Object> undefined_sentinel(
- masm->isolate()->factory()->undefined_value());
- Handle<Map> global_property_cell_map(
- masm->isolate()->heap()->global_property_cell_map());
- __ Cmp(rbx, undefined_sentinel);
- __ j(equal, &okay_here);
- __ Cmp(FieldOperand(rbx, 0), global_property_cell_map);
- __ Assert(equal, "Expected property cell in register rbx");
- __ bind(&okay_here);
- }
- }
-
- if (FLAG_optimize_constructed_arrays) {
- Label not_zero_case, not_one_case;
- __ testq(rax, rax);
- __ j(not_zero, &not_zero_case);
- ArrayNoArgumentConstructorStub no_argument_stub;
- __ TailCallStub(&no_argument_stub);
-
- __ bind(&not_zero_case);
- __ cmpq(rax, Immediate(1));
- __ j(greater, &not_one_case);
- ArraySingleArgumentConstructorStub single_argument_stub;
- __ TailCallStub(&single_argument_stub);
-
- __ bind(&not_one_case);
- ArrayNArgumentsConstructorStub n_argument_stub;
- __ TailCallStub(&n_argument_stub);
- } else {
- Label generic_constructor;
- // Run the native code for the Array function called as constructor.
- ArrayNativeCode(masm, &generic_constructor);
-
- // Jump to the generic construct code in case the specialized code cannot
- // handle the construction.
- __ bind(&generic_constructor);
- Handle<Code> generic_construct_stub =
- masm->isolate()->builtins()->JSConstructStubGeneric();
- __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET);
- }
-}
-
-
-void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rax : number of arguments
- // -- rdi : constructor function
- // -- rsp[0] : return address
- // -- rsp[(argc - n) * 8] : arg[n] (zero-based)
- // -- rsp[(argc + 1) * 8] : receiver
- // -----------------------------------
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->string_ctor_calls(), 1);
-
- if (FLAG_debug_code) {
- __ LoadGlobalFunction(Context::STRING_FUNCTION_INDEX, rcx);
- __ cmpq(rdi, rcx);
- __ Assert(equal, "Unexpected String function");
- }
-
- // Load the first argument into rax and get rid of the rest
- // (including the receiver).
- Label no_arguments;
- __ testq(rax, rax);
- __ j(zero, &no_arguments);
- __ movq(rbx, Operand(rsp, rax, times_pointer_size, 0));
- __ pop(rcx);
- __ lea(rsp, Operand(rsp, rax, times_pointer_size, kPointerSize));
- __ push(rcx);
- __ movq(rax, rbx);
-
- // Lookup the argument in the number to string cache.
- Label not_cached, argument_is_string;
- NumberToStringStub::GenerateLookupNumberStringCache(
- masm,
- rax, // Input.
- rbx, // Result.
- rcx, // Scratch 1.
- rdx, // Scratch 2.
- false, // Input is known to be smi?
- &not_cached);
- __ IncrementCounter(counters->string_ctor_cached_number(), 1);
- __ bind(&argument_is_string);
-
- // ----------- S t a t e -------------
- // -- rbx : argument converted to string
- // -- rdi : constructor function
- // -- rsp[0] : return address
- // -----------------------------------
-
- // Allocate a JSValue and put the tagged pointer into rax.
- Label gc_required;
- __ AllocateInNewSpace(JSValue::kSize,
- rax, // Result.
- rcx, // New allocation top (we ignore it).
- no_reg,
- &gc_required,
- TAG_OBJECT);
-
- // Set the map.
- __ LoadGlobalFunctionInitialMap(rdi, rcx);
- if (FLAG_debug_code) {
- __ cmpb(FieldOperand(rcx, Map::kInstanceSizeOffset),
- Immediate(JSValue::kSize >> kPointerSizeLog2));
- __ Assert(equal, "Unexpected string wrapper instance size");
- __ cmpb(FieldOperand(rcx, Map::kUnusedPropertyFieldsOffset), Immediate(0));
- __ Assert(equal, "Unexpected unused properties of string wrapper");
- }
- __ movq(FieldOperand(rax, HeapObject::kMapOffset), rcx);
-
- // Set properties and elements.
- __ LoadRoot(rcx, Heap::kEmptyFixedArrayRootIndex);
- __ movq(FieldOperand(rax, JSObject::kPropertiesOffset), rcx);
- __ movq(FieldOperand(rax, JSObject::kElementsOffset), rcx);
-
- // Set the value.
- __ movq(FieldOperand(rax, JSValue::kValueOffset), rbx);
-
- // Ensure the object is fully initialized.
- STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
-
- // We're done. Return.
- __ ret(0);
-
- // The argument was not found in the number to string cache. Check
- // if it's a string already before calling the conversion builtin.
- Label convert_argument;
- __ bind(&not_cached);
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfSmi(rax, &convert_argument);
- Condition is_string = masm->IsObjectStringType(rax, rbx, rcx);
- __ j(NegateCondition(is_string), &convert_argument);
- __ movq(rbx, rax);
- __ IncrementCounter(counters->string_ctor_string_value(), 1);
- __ jmp(&argument_is_string);
-
- // Invoke the conversion builtin and put the result into rbx.
- __ bind(&convert_argument);
- __ IncrementCounter(counters->string_ctor_conversions(), 1);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(rdi); // Preserve the function.
- __ push(rax);
- __ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION);
- __ pop(rdi);
- }
- __ movq(rbx, rax);
- __ jmp(&argument_is_string);
-
- // Load the empty string into rbx, remove the receiver from the
- // stack, and jump back to the case where the argument is a string.
- __ bind(&no_arguments);
- __ LoadRoot(rbx, Heap::kempty_stringRootIndex);
- __ pop(rcx);
- __ lea(rsp, Operand(rsp, kPointerSize));
- __ push(rcx);
- __ jmp(&argument_is_string);
-
- // At this point the argument is already a string. Call runtime to
- // create a string wrapper.
- __ bind(&gc_required);
- __ IncrementCounter(counters->string_ctor_gc_required(), 1);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(rbx);
- __ CallRuntime(Runtime::kNewStringWrapper, 1);
- }
- __ ret(0);
-}
-
-
-static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
- __ push(rbp);
- __ movq(rbp, rsp);
-
- // Store the arguments adaptor context sentinel.
- __ Push(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
-
- // Push the function on the stack.
- __ push(rdi);
-
- // Preserve the number of arguments on the stack. Must preserve rax,
- // rbx and rcx because these registers are used when copying the
- // arguments and the receiver.
- __ Integer32ToSmi(r8, rax);
- __ push(r8);
-}
-
-
-static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
- // Retrieve the number of arguments from the stack. Number is a Smi.
- __ movq(rbx, Operand(rbp, ArgumentsAdaptorFrameConstants::kLengthOffset));
-
- // Leave the frame.
- __ movq(rsp, rbp);
- __ pop(rbp);
-
- // Remove caller arguments from the stack.
- __ pop(rcx);
- SmiIndex index = masm->SmiToIndex(rbx, rbx, kPointerSizeLog2);
- __ lea(rsp, Operand(rsp, index.reg, index.scale, 1 * kPointerSize));
- __ push(rcx);
-}
-
-
-void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rax : actual number of arguments
- // -- rbx : expected number of arguments
- // -- rcx : call kind information
- // -- rdx : code entry to call
- // -----------------------------------
-
- Label invoke, dont_adapt_arguments;
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->arguments_adaptors(), 1);
-
- Label enough, too_few;
- __ cmpq(rax, rbx);
- __ j(less, &too_few);
- __ cmpq(rbx, Immediate(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
- __ j(equal, &dont_adapt_arguments);
-
- { // Enough parameters: Actual >= expected.
- __ bind(&enough);
- EnterArgumentsAdaptorFrame(masm);
-
- // Copy receiver and all expected arguments.
- const int offset = StandardFrameConstants::kCallerSPOffset;
- __ lea(rax, Operand(rbp, rax, times_pointer_size, offset));
- __ Set(r8, -1); // account for receiver
-
- Label copy;
- __ bind(&copy);
- __ incq(r8);
- __ push(Operand(rax, 0));
- __ subq(rax, Immediate(kPointerSize));
- __ cmpq(r8, rbx);
- __ j(less, &copy);
- __ jmp(&invoke);
- }
-
- { // Too few parameters: Actual < expected.
- __ bind(&too_few);
- EnterArgumentsAdaptorFrame(masm);
-
- // Copy receiver and all actual arguments.
- const int offset = StandardFrameConstants::kCallerSPOffset;
- __ lea(rdi, Operand(rbp, rax, times_pointer_size, offset));
- __ Set(r8, -1); // account for receiver
-
- Label copy;
- __ bind(&copy);
- __ incq(r8);
- __ push(Operand(rdi, 0));
- __ subq(rdi, Immediate(kPointerSize));
- __ cmpq(r8, rax);
- __ j(less, &copy);
-
- // Fill remaining expected arguments with undefined values.
- Label fill;
- __ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
- __ bind(&fill);
- __ incq(r8);
- __ push(kScratchRegister);
- __ cmpq(r8, rbx);
- __ j(less, &fill);
-
- // Restore function pointer.
- __ movq(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- }
-
- // Call the entry point.
- __ bind(&invoke);
- __ call(rdx);
-
- // Store offset of return address for deoptimizer.
- masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
-
- // Leave frame and return.
- LeaveArgumentsAdaptorFrame(masm);
- __ ret(0);
-
- // -------------------------------------------
- // Dont adapt arguments.
- // -------------------------------------------
- __ bind(&dont_adapt_arguments);
- __ jmp(rdx);
-}
-
-
-void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
- // Get the loop depth of the stack guard check. This is recorded in
- // a test(rax, depth) instruction right after the call.
- Label stack_check;
- __ movq(rbx, Operand(rsp, 0)); // return address
- __ movzxbq(rbx, Operand(rbx, 1)); // depth
-
- // Get the loop nesting level at which we allow OSR from the
- // unoptimized code and check if we want to do OSR yet. If not we
- // should perform a stack guard check so we can get interrupts while
- // waiting for on-stack replacement.
- __ movq(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- __ movq(rcx, FieldOperand(rax, JSFunction::kSharedFunctionInfoOffset));
- __ movq(rcx, FieldOperand(rcx, SharedFunctionInfo::kCodeOffset));
- __ cmpb(rbx, FieldOperand(rcx, Code::kAllowOSRAtLoopNestingLevelOffset));
- __ j(greater, &stack_check);
-
- // Pass the function to optimize as the argument to the on-stack
- // replacement runtime function.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(rax);
- __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
- }
-
- // If the result was -1 it means that we couldn't optimize the
- // function. Just return and continue in the unoptimized version.
- Label skip;
- __ SmiCompare(rax, Smi::FromInt(-1));
- __ j(not_equal, &skip, Label::kNear);
- __ ret(0);
-
- // If we decide not to perform on-stack replacement we perform a
- // stack guard check to enable interrupts.
- __ bind(&stack_check);
- Label ok;
- __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
- __ j(above_equal, &ok, Label::kNear);
-
- StackCheckStub stub;
- __ TailCallStub(&stub);
- if (FLAG_debug_code) {
- __ Abort("Unreachable code: returned from tail call.");
- }
- __ bind(&ok);
- __ ret(0);
-
- __ bind(&skip);
- // Untag the AST id and push it on the stack.
- __ SmiToInteger32(rax, rax);
- __ push(rax);
-
- // Generate the code for doing the frame-to-frame translation using
- // the deoptimizer infrastructure.
- Deoptimizer::EntryGenerator generator(masm, Deoptimizer::OSR);
- generator.Generate();
-}
-
-
-#undef __
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_X64
diff --git a/src/3rdparty/v8/src/x64/code-stubs-x64.cc b/src/3rdparty/v8/src/x64/code-stubs-x64.cc
deleted file mode 100644
index c4dd865..0000000
--- a/src/3rdparty/v8/src/x64/code-stubs-x64.cc
+++ /dev/null
@@ -1,6940 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_X64)
-
-#include "bootstrapper.h"
-#include "code-stubs.h"
-#include "regexp-macro-assembler.h"
-#include "stub-cache.h"
-#include "runtime.h"
-
-namespace v8 {
-namespace internal {
-
-
-void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- static Register registers[] = { rax, rbx, rcx, rdx };
- descriptor->register_param_count_ = 4;
- descriptor->register_params_ = registers;
- descriptor->stack_parameter_count_ = NULL;
- descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kCreateObjectLiteralShallow)->entry;
-}
-
-
-void KeyedLoadFastElementStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- static Register registers[] = { rdx, rax };
- descriptor->register_param_count_ = 2;
- descriptor->register_params_ = registers;
- descriptor->deoptimization_handler_ =
- FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure);
-}
-
-
-void TransitionElementsKindStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- static Register registers[] = { rax, rbx };
- descriptor->register_param_count_ = 2;
- descriptor->register_params_ = registers;
- descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kTransitionElementsKind)->entry;
-}
-
-
-static void InitializeArrayConstructorDescriptor(Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- // register state
- // rdi -- constructor function
- // rbx -- type info cell with elements kind
- // rax -- number of arguments to the constructor function
- static Register registers[] = { rdi, rbx };
- descriptor->register_param_count_ = 2;
- // stack param count needs (constructor pointer, and single argument)
- descriptor->stack_parameter_count_ = &rax;
- descriptor->register_params_ = registers;
- descriptor->extra_expression_stack_count_ = 1;
- descriptor->deoptimization_handler_ =
- FUNCTION_ADDR(ArrayConstructor_StubFailure);
-}
-
-
-void ArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- InitializeArrayConstructorDescriptor(isolate, descriptor);
-}
-
-
-void ArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- InitializeArrayConstructorDescriptor(isolate, descriptor);
-}
-
-
-void ArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
- Isolate* isolate,
- CodeStubInterfaceDescriptor* descriptor) {
- InitializeArrayConstructorDescriptor(isolate, descriptor);
-}
-
-
-#define __ ACCESS_MASM(masm)
-
-void ToNumberStub::Generate(MacroAssembler* masm) {
- // The ToNumber stub takes one argument in eax.
- Label check_heap_number, call_builtin;
- __ SmiTest(rax);
- __ j(not_zero, &check_heap_number, Label::kNear);
- __ Ret();
-
- __ bind(&check_heap_number);
- __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
- Heap::kHeapNumberMapRootIndex);
- __ j(not_equal, &call_builtin, Label::kNear);
- __ Ret();
-
- __ bind(&call_builtin);
- __ pop(rcx); // Pop return address.
- __ push(rax);
- __ push(rcx); // Push return address.
- __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION);
-}
-
-
-void FastNewClosureStub::Generate(MacroAssembler* masm) {
- // Create a new closure from the given function info in new
- // space. Set the context to the current context in rsi.
- Counters* counters = masm->isolate()->counters();
-
- Label gc;
- __ AllocateInNewSpace(JSFunction::kSize, rax, rbx, rcx, &gc, TAG_OBJECT);
-
- __ IncrementCounter(counters->fast_new_closure_total(), 1);
-
- // Get the function info from the stack.
- __ movq(rdx, Operand(rsp, 1 * kPointerSize));
-
- int map_index = (language_mode_ == CLASSIC_MODE)
- ? Context::FUNCTION_MAP_INDEX
- : Context::STRICT_MODE_FUNCTION_MAP_INDEX;
-
- // Compute the function map in the current native context and set that
- // as the map of the allocated object.
- __ movq(rcx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ movq(rcx, FieldOperand(rcx, GlobalObject::kNativeContextOffset));
- __ movq(rbx, Operand(rcx, Context::SlotOffset(map_index)));
- __ movq(FieldOperand(rax, JSObject::kMapOffset), rbx);
-
- // Initialize the rest of the function. We don't have to update the
- // write barrier because the allocated object is in new space.
- __ LoadRoot(rbx, Heap::kEmptyFixedArrayRootIndex);
- __ LoadRoot(r8, Heap::kTheHoleValueRootIndex);
- __ LoadRoot(rdi, Heap::kUndefinedValueRootIndex);
- __ movq(FieldOperand(rax, JSObject::kPropertiesOffset), rbx);
- __ movq(FieldOperand(rax, JSObject::kElementsOffset), rbx);
- __ movq(FieldOperand(rax, JSFunction::kPrototypeOrInitialMapOffset), r8);
- __ movq(FieldOperand(rax, JSFunction::kSharedFunctionInfoOffset), rdx);
- __ movq(FieldOperand(rax, JSFunction::kContextOffset), rsi);
- __ movq(FieldOperand(rax, JSFunction::kLiteralsOffset), rbx);
-
- // Initialize the code pointer in the function to be the one
- // found in the shared function info object.
- // But first check if there is an optimized version for our context.
- Label check_optimized;
- Label install_unoptimized;
- if (FLAG_cache_optimized_code) {
- __ movq(rbx,
- FieldOperand(rdx, SharedFunctionInfo::kOptimizedCodeMapOffset));
- __ testq(rbx, rbx);
- __ j(not_zero, &check_optimized, Label::kNear);
- }
- __ bind(&install_unoptimized);
- __ movq(FieldOperand(rax, JSFunction::kNextFunctionLinkOffset),
- rdi); // Initialize with undefined.
- __ movq(rdx, FieldOperand(rdx, SharedFunctionInfo::kCodeOffset));
- __ lea(rdx, FieldOperand(rdx, Code::kHeaderSize));
- __ movq(FieldOperand(rax, JSFunction::kCodeEntryOffset), rdx);
-
- // Return and remove the on-stack parameter.
- __ ret(1 * kPointerSize);
-
- __ bind(&check_optimized);
-
- __ IncrementCounter(counters->fast_new_closure_try_optimized(), 1);
-
- // rcx holds native context, ebx points to fixed array of 3-element entries
- // (native context, optimized code, literals).
- // The optimized code map must never be empty, so check the first elements.
- Label install_optimized;
- // Speculatively move code object into edx.
- __ movq(rdx, FieldOperand(rbx, FixedArray::kHeaderSize + kPointerSize));
- __ cmpq(rcx, FieldOperand(rbx, FixedArray::kHeaderSize));
- __ j(equal, &install_optimized);
-
- // Iterate through the rest of map backwards. rdx holds an index.
- Label loop;
- Label restore;
- __ movq(rdx, FieldOperand(rbx, FixedArray::kLengthOffset));
- __ SmiToInteger32(rdx, rdx);
- __ bind(&loop);
- // Do not double check first entry.
- __ cmpq(rdx, Immediate(SharedFunctionInfo::kEntryLength));
- __ j(equal, &restore);
- __ subq(rdx, Immediate(SharedFunctionInfo::kEntryLength)); // Skip an entry.
- __ cmpq(rcx, FieldOperand(rbx,
- rdx,
- times_pointer_size,
- FixedArray::kHeaderSize));
- __ j(not_equal, &loop, Label::kNear);
- // Hit: fetch the optimized code.
- __ movq(rdx, FieldOperand(rbx,
- rdx,
- times_pointer_size,
- FixedArray::kHeaderSize + 1 * kPointerSize));
-
- __ bind(&install_optimized);
- __ IncrementCounter(counters->fast_new_closure_install_optimized(), 1);
-
- // TODO(fschneider): Idea: store proper code pointers in the map and either
- // unmangle them on marking or do nothing as the whole map is discarded on
- // major GC anyway.
- __ lea(rdx, FieldOperand(rdx, Code::kHeaderSize));
- __ movq(FieldOperand(rax, JSFunction::kCodeEntryOffset), rdx);
-
- // Now link a function into a list of optimized functions.
- __ movq(rdx, ContextOperand(rcx, Context::OPTIMIZED_FUNCTIONS_LIST));
-
- __ movq(FieldOperand(rax, JSFunction::kNextFunctionLinkOffset), rdx);
- // No need for write barrier as JSFunction (rax) is in the new space.
-
- __ movq(ContextOperand(rcx, Context::OPTIMIZED_FUNCTIONS_LIST), rax);
- // Store JSFunction (rax) into rdx before issuing write barrier as
- // it clobbers all the registers passed.
- __ movq(rdx, rax);
- __ RecordWriteContextSlot(
- rcx,
- Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST),
- rdx,
- rbx,
- kDontSaveFPRegs);
-
- // Return and remove the on-stack parameter.
- __ ret(1 * kPointerSize);
-
- __ bind(&restore);
- __ movq(rdx, Operand(rsp, 1 * kPointerSize));
- __ jmp(&install_unoptimized);
-
- // Create a new closure through the slower runtime call.
- __ bind(&gc);
- __ pop(rcx); // Temporarily remove return address.
- __ pop(rdx);
- __ push(rsi);
- __ push(rdx);
- __ PushRoot(Heap::kFalseValueRootIndex);
- __ push(rcx); // Restore return address.
- __ TailCallRuntime(Runtime::kNewClosure, 3, 1);
-}
-
-
-void FastNewContextStub::Generate(MacroAssembler* masm) {
- // Try to allocate the context in new space.
- Label gc;
- int length = slots_ + Context::MIN_CONTEXT_SLOTS;
- __ AllocateInNewSpace((length * kPointerSize) + FixedArray::kHeaderSize,
- rax, rbx, rcx, &gc, TAG_OBJECT);
-
- // Get the function from the stack.
- __ movq(rcx, Operand(rsp, 1 * kPointerSize));
-
- // Set up the object header.
- __ LoadRoot(kScratchRegister, Heap::kFunctionContextMapRootIndex);
- __ movq(FieldOperand(rax, HeapObject::kMapOffset), kScratchRegister);
- __ Move(FieldOperand(rax, FixedArray::kLengthOffset), Smi::FromInt(length));
-
- // Set up the fixed slots.
- __ Set(rbx, 0); // Set to NULL.
- __ movq(Operand(rax, Context::SlotOffset(Context::CLOSURE_INDEX)), rcx);
- __ movq(Operand(rax, Context::SlotOffset(Context::PREVIOUS_INDEX)), rsi);
- __ movq(Operand(rax, Context::SlotOffset(Context::EXTENSION_INDEX)), rbx);
-
- // Copy the global object from the previous context.
- __ movq(rbx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ movq(Operand(rax, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)), rbx);
-
- // Copy the qmlglobal object from the previous context.
- __ movq(rbx,
- Operand(rsi, Context::SlotOffset(Context::QML_GLOBAL_OBJECT_INDEX)));
- __ movq(Operand(rax, Context::SlotOffset(Context::QML_GLOBAL_OBJECT_INDEX)),
- rbx);
-
- // Initialize the rest of the slots to undefined.
- __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
- for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
- __ movq(Operand(rax, Context::SlotOffset(i)), rbx);
- }
-
- // Return and remove the on-stack parameter.
- __ movq(rsi, rax);
- __ ret(1 * kPointerSize);
-
- // Need to collect. Call into runtime system.
- __ bind(&gc);
- __ TailCallRuntime(Runtime::kNewFunctionContext, 1, 1);
-}
-
-
-void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
- // Stack layout on entry:
- //
- // [rsp + (1 * kPointerSize)]: function
- // [rsp + (2 * kPointerSize)]: serialized scope info
-
- // Try to allocate the context in new space.
- Label gc;
- int length = slots_ + Context::MIN_CONTEXT_SLOTS;
- __ AllocateInNewSpace(FixedArray::SizeFor(length),
- rax, rbx, rcx, &gc, TAG_OBJECT);
-
- // Get the function from the stack.
- __ movq(rcx, Operand(rsp, 1 * kPointerSize));
-
- // Get the serialized scope info from the stack.
- __ movq(rbx, Operand(rsp, 2 * kPointerSize));
-
- // Set up the object header.
- __ LoadRoot(kScratchRegister, Heap::kBlockContextMapRootIndex);
- __ movq(FieldOperand(rax, HeapObject::kMapOffset), kScratchRegister);
- __ Move(FieldOperand(rax, FixedArray::kLengthOffset), Smi::FromInt(length));
-
- // If this block context is nested in the native context we get a smi
- // sentinel instead of a function. The block context should get the
- // canonical empty function of the native context as its closure which
- // we still have to look up.
- Label after_sentinel;
- __ JumpIfNotSmi(rcx, &after_sentinel, Label::kNear);
- if (FLAG_debug_code) {
- const char* message = "Expected 0 as a Smi sentinel";
- __ cmpq(rcx, Immediate(0));
- __ Assert(equal, message);
- }
- __ movq(rcx, GlobalObjectOperand());
- __ movq(rcx, FieldOperand(rcx, GlobalObject::kNativeContextOffset));
- __ movq(rcx, ContextOperand(rcx, Context::CLOSURE_INDEX));
- __ bind(&after_sentinel);
-
- // Set up the fixed slots.
- __ movq(ContextOperand(rax, Context::CLOSURE_INDEX), rcx);
- __ movq(ContextOperand(rax, Context::PREVIOUS_INDEX), rsi);
- __ movq(ContextOperand(rax, Context::EXTENSION_INDEX), rbx);
-
- // Copy the global object from the previous context.
- __ movq(rbx, ContextOperand(rsi, Context::GLOBAL_OBJECT_INDEX));
- __ movq(ContextOperand(rax, Context::GLOBAL_OBJECT_INDEX), rbx);
-
- // Copy the qmlglobal object from the previous context.
- __ movq(rbx, ContextOperand(rsi, Context::QML_GLOBAL_OBJECT_INDEX));
- __ movq(ContextOperand(rax, Context::QML_GLOBAL_OBJECT_INDEX), rbx);
-
- // Initialize the rest of the slots to the hole value.
- __ LoadRoot(rbx, Heap::kTheHoleValueRootIndex);
- for (int i = 0; i < slots_; i++) {
- __ movq(ContextOperand(rax, i + Context::MIN_CONTEXT_SLOTS), rbx);
- }
-
- // Return and remove the on-stack parameter.
- __ movq(rsi, rax);
- __ ret(2 * kPointerSize);
-
- // Need to collect. Call into runtime system.
- __ bind(&gc);
- __ TailCallRuntime(Runtime::kPushBlockContext, 2, 1);
-}
-
-
-static void GenerateFastCloneShallowArrayCommon(
- MacroAssembler* masm,
- int length,
- FastCloneShallowArrayStub::Mode mode,
- AllocationSiteMode allocation_site_mode,
- Label* fail) {
- // Registers on entry:
- //
- // rcx: boilerplate literal array.
- ASSERT(mode != FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS);
-
- // All sizes here are multiples of kPointerSize.
- int elements_size = 0;
- if (length > 0) {
- elements_size = mode == FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
- ? FixedDoubleArray::SizeFor(length)
- : FixedArray::SizeFor(length);
- }
- int size = JSArray::kSize;
- int allocation_info_start = size;
- if (allocation_site_mode == TRACK_ALLOCATION_SITE) {
- size += AllocationSiteInfo::kSize;
- }
- size += elements_size;
-
- // Allocate both the JS array and the elements array in one big
- // allocation. This avoids multiple limit checks.
- AllocationFlags flags = TAG_OBJECT;
- if (mode == FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS) {
- flags = static_cast<AllocationFlags>(DOUBLE_ALIGNMENT | flags);
- }
- __ AllocateInNewSpace(size, rax, rbx, rdx, fail, flags);
-
- if (allocation_site_mode == TRACK_ALLOCATION_SITE) {
- __ LoadRoot(kScratchRegister, Heap::kAllocationSiteInfoMapRootIndex);
- __ movq(FieldOperand(rax, allocation_info_start), kScratchRegister);
- __ movq(FieldOperand(rax, allocation_info_start + kPointerSize), rcx);
- }
-
- // Copy the JS array part.
- for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
- if ((i != JSArray::kElementsOffset) || (length == 0)) {
- __ movq(rbx, FieldOperand(rcx, i));
- __ movq(FieldOperand(rax, i), rbx);
- }
- }
-
- if (length > 0) {
- // Get hold of the elements array of the boilerplate and setup the
- // elements pointer in the resulting object.
- __ movq(rcx, FieldOperand(rcx, JSArray::kElementsOffset));
- if (allocation_site_mode == TRACK_ALLOCATION_SITE) {
- __ lea(rdx, Operand(rax, JSArray::kSize + AllocationSiteInfo::kSize));
- } else {
- __ lea(rdx, Operand(rax, JSArray::kSize));
- }
- __ movq(FieldOperand(rax, JSArray::kElementsOffset), rdx);
-
- // Copy the elements array.
- if (mode == FastCloneShallowArrayStub::CLONE_ELEMENTS) {
- for (int i = 0; i < elements_size; i += kPointerSize) {
- __ movq(rbx, FieldOperand(rcx, i));
- __ movq(FieldOperand(rdx, i), rbx);
- }
- } else {
- ASSERT(mode == FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS);
- int i;
- for (i = 0; i < FixedDoubleArray::kHeaderSize; i += kPointerSize) {
- __ movq(rbx, FieldOperand(rcx, i));
- __ movq(FieldOperand(rdx, i), rbx);
- }
- while (i < elements_size) {
- __ movsd(xmm0, FieldOperand(rcx, i));
- __ movsd(FieldOperand(rdx, i), xmm0);
- i += kDoubleSize;
- }
- ASSERT(i == elements_size);
- }
- }
-}
-
-void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
- // Stack layout on entry:
- //
- // [rsp + kPointerSize]: constant elements.
- // [rsp + (2 * kPointerSize)]: literal index.
- // [rsp + (3 * kPointerSize)]: literals array.
-
- // Load boilerplate object into rcx and check if we need to create a
- // boilerplate.
- __ movq(rcx, Operand(rsp, 3 * kPointerSize));
- __ movq(rax, Operand(rsp, 2 * kPointerSize));
- SmiIndex index = masm->SmiToIndex(rax, rax, kPointerSizeLog2);
- __ movq(rcx,
- FieldOperand(rcx, index.reg, index.scale, FixedArray::kHeaderSize));
- __ CompareRoot(rcx, Heap::kUndefinedValueRootIndex);
- Label slow_case;
- __ j(equal, &slow_case);
-
- FastCloneShallowArrayStub::Mode mode = mode_;
- // rcx is boilerplate object.
- Factory* factory = masm->isolate()->factory();
- if (mode == CLONE_ANY_ELEMENTS) {
- Label double_elements, check_fast_elements;
- __ movq(rbx, FieldOperand(rcx, JSArray::kElementsOffset));
- __ Cmp(FieldOperand(rbx, HeapObject::kMapOffset),
- factory->fixed_cow_array_map());
- __ j(not_equal, &check_fast_elements);
- GenerateFastCloneShallowArrayCommon(masm, 0, COPY_ON_WRITE_ELEMENTS,
- allocation_site_mode_,
- &slow_case);
- __ ret(3 * kPointerSize);
-
- __ bind(&check_fast_elements);
- __ Cmp(FieldOperand(rbx, HeapObject::kMapOffset),
- factory->fixed_array_map());
- __ j(not_equal, &double_elements);
- GenerateFastCloneShallowArrayCommon(masm, length_, CLONE_ELEMENTS,
- allocation_site_mode_,
- &slow_case);
- __ ret(3 * kPointerSize);
-
- __ bind(&double_elements);
- mode = CLONE_DOUBLE_ELEMENTS;
- // Fall through to generate the code to handle double elements.
- }
-
- if (FLAG_debug_code) {
- const char* message;
- Heap::RootListIndex expected_map_index;
- if (mode == CLONE_ELEMENTS) {
- message = "Expected (writable) fixed array";
- expected_map_index = Heap::kFixedArrayMapRootIndex;
- } else if (mode == CLONE_DOUBLE_ELEMENTS) {
- message = "Expected (writable) fixed double array";
- expected_map_index = Heap::kFixedDoubleArrayMapRootIndex;
- } else {
- ASSERT(mode == COPY_ON_WRITE_ELEMENTS);
- message = "Expected copy-on-write fixed array";
- expected_map_index = Heap::kFixedCOWArrayMapRootIndex;
- }
- __ push(rcx);
- __ movq(rcx, FieldOperand(rcx, JSArray::kElementsOffset));
- __ CompareRoot(FieldOperand(rcx, HeapObject::kMapOffset),
- expected_map_index);
- __ Assert(equal, message);
- __ pop(rcx);
- }
-
- GenerateFastCloneShallowArrayCommon(masm, length_, mode,
- allocation_site_mode_,
- &slow_case);
- __ ret(3 * kPointerSize);
-
- __ bind(&slow_case);
- __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1);
-}
-
-
-// The stub expects its argument on the stack and returns its result in tos_:
-// zero for false, and a non-zero value for true.
-void ToBooleanStub::Generate(MacroAssembler* masm) {
- // This stub overrides SometimesSetsUpAFrame() to return false. That means
- // we cannot call anything that could cause a GC from this stub.
- Label patch;
- const Register argument = rax;
- const Register map = rdx;
-
- if (!types_.IsEmpty()) {
- __ movq(argument, Operand(rsp, 1 * kPointerSize));
- }
-
- // undefined -> false
- CheckOddball(masm, UNDEFINED, Heap::kUndefinedValueRootIndex, false);
-
- // Boolean -> its value
- CheckOddball(masm, BOOLEAN, Heap::kFalseValueRootIndex, false);
- CheckOddball(masm, BOOLEAN, Heap::kTrueValueRootIndex, true);
-
- // 'null' -> false.
- CheckOddball(masm, NULL_TYPE, Heap::kNullValueRootIndex, false);
-
- if (types_.Contains(SMI)) {
- // Smis: 0 -> false, all other -> true
- Label not_smi;
- __ JumpIfNotSmi(argument, &not_smi, Label::kNear);
- // argument contains the correct return value already
- if (!tos_.is(argument)) {
- __ movq(tos_, argument);
- }
- __ ret(1 * kPointerSize);
- __ bind(&not_smi);
- } else if (types_.NeedsMap()) {
- // If we need a map later and have a Smi -> patch.
- __ JumpIfSmi(argument, &patch, Label::kNear);
- }
-
- if (types_.NeedsMap()) {
- __ movq(map, FieldOperand(argument, HeapObject::kMapOffset));
-
- if (types_.CanBeUndetectable()) {
- __ testb(FieldOperand(map, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsUndetectable));
- // Undetectable -> false.
- Label not_undetectable;
- __ j(zero, &not_undetectable, Label::kNear);
- __ Set(tos_, 0);
- __ ret(1 * kPointerSize);
- __ bind(&not_undetectable);
- }
- }
-
- if (types_.Contains(SPEC_OBJECT)) {
- // spec object -> true.
- Label not_js_object;
- __ CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
- __ j(below, &not_js_object, Label::kNear);
- // argument contains the correct return value already.
- if (!tos_.is(argument)) {
- __ Set(tos_, 1);
- }
- __ ret(1 * kPointerSize);
- __ bind(&not_js_object);
- }
-
- if (types_.Contains(STRING)) {
- // String value -> false iff empty.
- Label not_string;
- __ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
- __ j(above_equal, &not_string, Label::kNear);
- __ movq(tos_, FieldOperand(argument, String::kLengthOffset));
- __ ret(1 * kPointerSize); // the string length is OK as the return value
- __ bind(&not_string);
- }
-
- if (types_.Contains(HEAP_NUMBER)) {
- // heap number -> false iff +0, -0, or NaN.
- Label not_heap_number, false_result;
- __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
- __ j(not_equal, &not_heap_number, Label::kNear);
- __ xorps(xmm0, xmm0);
- __ ucomisd(xmm0, FieldOperand(argument, HeapNumber::kValueOffset));
- __ j(zero, &false_result, Label::kNear);
- // argument contains the correct return value already.
- if (!tos_.is(argument)) {
- __ Set(tos_, 1);
- }
- __ ret(1 * kPointerSize);
- __ bind(&false_result);
- __ Set(tos_, 0);
- __ ret(1 * kPointerSize);
- __ bind(&not_heap_number);
- }
-
- __ bind(&patch);
- GenerateTypeTransition(masm);
-}
-
-
-void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
- __ PushCallerSaved(save_doubles_);
- const int argument_count = 1;
- __ PrepareCallCFunction(argument_count);
-#ifdef _WIN64
- __ LoadAddress(rcx, ExternalReference::isolate_address());
-#else
- __ LoadAddress(rdi, ExternalReference::isolate_address());
-#endif
-
- AllowExternalCallThatCantCauseGC scope(masm);
- __ CallCFunction(
- ExternalReference::store_buffer_overflow_function(masm->isolate()),
- argument_count);
- __ PopCallerSaved(save_doubles_);
- __ ret(0);
-}
-
-
-void ToBooleanStub::CheckOddball(MacroAssembler* masm,
- Type type,
- Heap::RootListIndex value,
- bool result) {
- const Register argument = rax;
- if (types_.Contains(type)) {
- // If we see an expected oddball, return its ToBoolean value tos_.
- Label different_value;
- __ CompareRoot(argument, value);
- __ j(not_equal, &different_value, Label::kNear);
- if (!result) {
- // If we have to return zero, there is no way around clearing tos_.
- __ Set(tos_, 0);
- } else if (!tos_.is(argument)) {
- // If we have to return non-zero, we can re-use the argument if it is the
- // same register as the result, because we never see Smi-zero here.
- __ Set(tos_, 1);
- }
- __ ret(1 * kPointerSize);
- __ bind(&different_value);
- }
-}
-
-
-void ToBooleanStub::GenerateTypeTransition(MacroAssembler* masm) {
- __ pop(rcx); // Get return address, operand is now on top of stack.
- __ Push(Smi::FromInt(tos_.code()));
- __ Push(Smi::FromInt(types_.ToByte()));
- __ push(rcx); // Push return address.
- // Patch the caller to an appropriate specialized stub and return the
- // operation result to the caller of the stub.
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kToBoolean_Patch), masm->isolate()),
- 3,
- 1);
-}
-
-
-class FloatingPointHelper : public AllStatic {
- public:
- enum ConvertUndefined {
- CONVERT_UNDEFINED_TO_ZERO,
- BAILOUT_ON_UNDEFINED
- };
- // Load the operands from rdx and rax into xmm0 and xmm1, as doubles.
- // If the operands are not both numbers, jump to not_numbers.
- // Leaves rdx and rax unchanged. SmiOperands assumes both are smis.
- // NumberOperands assumes both are smis or heap numbers.
- static void LoadSSE2SmiOperands(MacroAssembler* masm);
- static void LoadSSE2NumberOperands(MacroAssembler* masm);
- static void LoadSSE2UnknownOperands(MacroAssembler* masm,
- Label* not_numbers);
-
- // Takes the operands in rdx and rax and loads them as integers in rax
- // and rcx.
- static void LoadAsIntegers(MacroAssembler* masm,
- Label* operand_conversion_failure,
- Register heap_number_map);
- // As above, but we know the operands to be numbers. In that case,
- // conversion can't fail.
- static void LoadNumbersAsIntegers(MacroAssembler* masm);
-
- // Tries to convert two values to smis losslessly.
- // This fails if either argument is not a Smi nor a HeapNumber,
- // or if it's a HeapNumber with a value that can't be converted
- // losslessly to a Smi. In that case, control transitions to the
- // on_not_smis label.
- // On success, either control goes to the on_success label (if one is
- // provided), or it falls through at the end of the code (if on_success
- // is NULL).
- // On success, both first and second holds Smi tagged values.
- // One of first or second must be non-Smi when entering.
- static void NumbersToSmis(MacroAssembler* masm,
- Register first,
- Register second,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* on_success,
- Label* on_not_smis,
- ConvertUndefined convert_undefined);
-};
-
-
-// Get the integer part of a heap number.
-// Overwrites the contents of rdi, rbx and rcx. Result cannot be rdi or rbx.
-void IntegerConvert(MacroAssembler* masm,
- Register result,
- Register source) {
- // Result may be rcx. If result and source are the same register, source will
- // be overwritten.
- ASSERT(!result.is(rdi) && !result.is(rbx));
- // TODO(lrn): When type info reaches here, if value is a 32-bit integer, use
- // cvttsd2si (32-bit version) directly.
- Register double_exponent = rbx;
- Register double_value = rdi;
- Label done, exponent_63_plus;
- // Get double and extract exponent.
- __ movq(double_value, FieldOperand(source, HeapNumber::kValueOffset));
- // Clear result preemptively, in case we need to return zero.
- __ xorl(result, result);
- __ movq(xmm0, double_value); // Save copy in xmm0 in case we need it there.
- // Double to remove sign bit, shift exponent down to least significant bits.
- // and subtract bias to get the unshifted, unbiased exponent.
- __ lea(double_exponent, Operand(double_value, double_value, times_1, 0));
- __ shr(double_exponent, Immediate(64 - HeapNumber::kExponentBits));
- __ subl(double_exponent, Immediate(HeapNumber::kExponentBias));
- // Check whether the exponent is too big for a 63 bit unsigned integer.
- __ cmpl(double_exponent, Immediate(63));
- __ j(above_equal, &exponent_63_plus, Label::kNear);
- // Handle exponent range 0..62.
- __ cvttsd2siq(result, xmm0);
- __ jmp(&done, Label::kNear);
-
- __ bind(&exponent_63_plus);
- // Exponent negative or 63+.
- __ cmpl(double_exponent, Immediate(83));
- // If exponent negative or above 83, number contains no significant bits in
- // the range 0..2^31, so result is zero, and rcx already holds zero.
- __ j(above, &done, Label::kNear);
-
- // Exponent in rage 63..83.
- // Mantissa * 2^exponent contains bits in the range 2^0..2^31, namely
- // the least significant exponent-52 bits.
-
- // Negate low bits of mantissa if value is negative.
- __ addq(double_value, double_value); // Move sign bit to carry.
- __ sbbl(result, result); // And convert carry to -1 in result register.
- // if scratch2 is negative, do (scratch2-1)^-1, otherwise (scratch2-0)^0.
- __ addl(double_value, result);
- // Do xor in opposite directions depending on where we want the result
- // (depending on whether result is rcx or not).
-
- if (result.is(rcx)) {
- __ xorl(double_value, result);
- // Left shift mantissa by (exponent - mantissabits - 1) to save the
- // bits that have positional values below 2^32 (the extra -1 comes from the
- // doubling done above to move the sign bit into the carry flag).
- __ leal(rcx, Operand(double_exponent, -HeapNumber::kMantissaBits - 1));
- __ shll_cl(double_value);
- __ movl(result, double_value);
- } else {
- // As the then-branch, but move double-value to result before shifting.
- __ xorl(result, double_value);
- __ leal(rcx, Operand(double_exponent, -HeapNumber::kMantissaBits - 1));
- __ shll_cl(result);
- }
-
- __ bind(&done);
-}
-
-
-void UnaryOpStub::Generate(MacroAssembler* masm) {
- switch (operand_type_) {
- case UnaryOpIC::UNINITIALIZED:
- GenerateTypeTransition(masm);
- break;
- case UnaryOpIC::SMI:
- GenerateSmiStub(masm);
- break;
- case UnaryOpIC::NUMBER:
- GenerateNumberStub(masm);
- break;
- case UnaryOpIC::GENERIC:
- GenerateGenericStub(masm);
- break;
- }
-}
-
-
-void UnaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
- __ pop(rcx); // Save return address.
-
- __ push(rax); // the operand
- __ Push(Smi::FromInt(op_));
- __ Push(Smi::FromInt(mode_));
- __ Push(Smi::FromInt(operand_type_));
-
- __ push(rcx); // Push return address.
-
- // Patch the caller to an appropriate specialized stub and return the
- // operation result to the caller of the stub.
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kUnaryOp_Patch), masm->isolate()), 4, 1);
-}
-
-
-// TODO(svenpanne): Use virtual functions instead of switch.
-void UnaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
- switch (op_) {
- case Token::SUB:
- GenerateSmiStubSub(masm);
- break;
- case Token::BIT_NOT:
- GenerateSmiStubBitNot(masm);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void UnaryOpStub::GenerateSmiStubSub(MacroAssembler* masm) {
- Label slow;
- GenerateSmiCodeSub(masm, &slow, &slow, Label::kNear, Label::kNear);
- __ bind(&slow);
- GenerateTypeTransition(masm);
-}
-
-
-void UnaryOpStub::GenerateSmiStubBitNot(MacroAssembler* masm) {
- Label non_smi;
- GenerateSmiCodeBitNot(masm, &non_smi, Label::kNear);
- __ bind(&non_smi);
- GenerateTypeTransition(masm);
-}
-
-
-void UnaryOpStub::GenerateSmiCodeSub(MacroAssembler* masm,
- Label* non_smi,
- Label* slow,
- Label::Distance non_smi_near,
- Label::Distance slow_near) {
- Label done;
- __ JumpIfNotSmi(rax, non_smi, non_smi_near);
- __ SmiNeg(rax, rax, &done, Label::kNear);
- __ jmp(slow, slow_near);
- __ bind(&done);
- __ ret(0);
-}
-
-
-void UnaryOpStub::GenerateSmiCodeBitNot(MacroAssembler* masm,
- Label* non_smi,
- Label::Distance non_smi_near) {
- __ JumpIfNotSmi(rax, non_smi, non_smi_near);
- __ SmiNot(rax, rax);
- __ ret(0);
-}
-
-
-// TODO(svenpanne): Use virtual functions instead of switch.
-void UnaryOpStub::GenerateNumberStub(MacroAssembler* masm) {
- switch (op_) {
- case Token::SUB:
- GenerateNumberStubSub(masm);
- break;
- case Token::BIT_NOT:
- GenerateNumberStubBitNot(masm);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void UnaryOpStub::GenerateNumberStubSub(MacroAssembler* masm) {
- Label non_smi, slow, call_builtin;
- GenerateSmiCodeSub(masm, &non_smi, &call_builtin, Label::kNear);
- __ bind(&non_smi);
- GenerateHeapNumberCodeSub(masm, &slow);
- __ bind(&slow);
- GenerateTypeTransition(masm);
- __ bind(&call_builtin);
- GenerateGenericCodeFallback(masm);
-}
-
-
-void UnaryOpStub::GenerateNumberStubBitNot(
- MacroAssembler* masm) {
- Label non_smi, slow;
- GenerateSmiCodeBitNot(masm, &non_smi, Label::kNear);
- __ bind(&non_smi);
- GenerateHeapNumberCodeBitNot(masm, &slow);
- __ bind(&slow);
- GenerateTypeTransition(masm);
-}
-
-
-void UnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm,
- Label* slow) {
- // Check if the operand is a heap number.
- __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
- Heap::kHeapNumberMapRootIndex);
- __ j(not_equal, slow);
-
- // Operand is a float, negate its value by flipping the sign bit.
- if (mode_ == UNARY_OVERWRITE) {
- __ Set(kScratchRegister, 0x01);
- __ shl(kScratchRegister, Immediate(63));
- __ xor_(FieldOperand(rax, HeapNumber::kValueOffset), kScratchRegister);
- } else {
- // Allocate a heap number before calculating the answer,
- // so we don't have an untagged double around during GC.
- Label slow_allocate_heapnumber, heapnumber_allocated;
- __ AllocateHeapNumber(rcx, rbx, &slow_allocate_heapnumber);
- __ jmp(&heapnumber_allocated);
-
- __ bind(&slow_allocate_heapnumber);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(rax);
- __ CallRuntime(Runtime::kNumberAlloc, 0);
- __ movq(rcx, rax);
- __ pop(rax);
- }
- __ bind(&heapnumber_allocated);
- // rcx: allocated 'empty' number
-
- // Copy the double value to the new heap number, flipping the sign.
- __ movq(rdx, FieldOperand(rax, HeapNumber::kValueOffset));
- __ Set(kScratchRegister, 0x01);
- __ shl(kScratchRegister, Immediate(63));
- __ xor_(rdx, kScratchRegister); // Flip sign.
- __ movq(FieldOperand(rcx, HeapNumber::kValueOffset), rdx);
- __ movq(rax, rcx);
- }
- __ ret(0);
-}
-
-
-void UnaryOpStub::GenerateHeapNumberCodeBitNot(MacroAssembler* masm,
- Label* slow) {
- // Check if the operand is a heap number.
- __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
- Heap::kHeapNumberMapRootIndex);
- __ j(not_equal, slow);
-
- // Convert the heap number in rax to an untagged integer in rcx.
- IntegerConvert(masm, rax, rax);
-
- // Do the bitwise operation and smi tag the result.
- __ notl(rax);
- __ Integer32ToSmi(rax, rax);
- __ ret(0);
-}
-
-
-// TODO(svenpanne): Use virtual functions instead of switch.
-void UnaryOpStub::GenerateGenericStub(MacroAssembler* masm) {
- switch (op_) {
- case Token::SUB:
- GenerateGenericStubSub(masm);
- break;
- case Token::BIT_NOT:
- GenerateGenericStubBitNot(masm);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void UnaryOpStub::GenerateGenericStubSub(MacroAssembler* masm) {
- Label non_smi, slow;
- GenerateSmiCodeSub(masm, &non_smi, &slow, Label::kNear);
- __ bind(&non_smi);
- GenerateHeapNumberCodeSub(masm, &slow);
- __ bind(&slow);
- GenerateGenericCodeFallback(masm);
-}
-
-
-void UnaryOpStub::GenerateGenericStubBitNot(MacroAssembler* masm) {
- Label non_smi, slow;
- GenerateSmiCodeBitNot(masm, &non_smi, Label::kNear);
- __ bind(&non_smi);
- GenerateHeapNumberCodeBitNot(masm, &slow);
- __ bind(&slow);
- GenerateGenericCodeFallback(masm);
-}
-
-
-void UnaryOpStub::GenerateGenericCodeFallback(MacroAssembler* masm) {
- // Handle the slow case by jumping to the JavaScript builtin.
- __ pop(rcx); // pop return address
- __ push(rax);
- __ push(rcx); // push return address
- switch (op_) {
- case Token::SUB:
- __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
- break;
- case Token::BIT_NOT:
- __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void UnaryOpStub::PrintName(StringStream* stream) {
- const char* op_name = Token::Name(op_);
- const char* overwrite_name = NULL; // Make g++ happy.
- switch (mode_) {
- case UNARY_NO_OVERWRITE: overwrite_name = "Alloc"; break;
- case UNARY_OVERWRITE: overwrite_name = "Overwrite"; break;
- }
- stream->Add("UnaryOpStub_%s_%s_%s",
- op_name,
- overwrite_name,
- UnaryOpIC::GetName(operand_type_));
-}
-
-
-void BinaryOpStub::Initialize() {}
-
-
-void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
- __ pop(rcx); // Save return address.
- __ push(rdx);
- __ push(rax);
- // Left and right arguments are now on top.
- __ Push(Smi::FromInt(MinorKey()));
-
- __ push(rcx); // Push return address.
-
- // Patch the caller to an appropriate specialized stub and return the
- // operation result to the caller of the stub.
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
- masm->isolate()),
- 3,
- 1);
-}
-
-
-static void BinaryOpStub_GenerateSmiCode(
- MacroAssembler* masm,
- Label* slow,
- BinaryOpStub::SmiCodeGenerateHeapNumberResults allow_heapnumber_results,
- Token::Value op) {
-
- // Arguments to BinaryOpStub are in rdx and rax.
- const Register left = rdx;
- const Register right = rax;
-
- // We only generate heapnumber answers for overflowing calculations
- // for the four basic arithmetic operations and logical right shift by 0.
- bool generate_inline_heapnumber_results =
- (allow_heapnumber_results == BinaryOpStub::ALLOW_HEAPNUMBER_RESULTS) &&
- (op == Token::ADD || op == Token::SUB ||
- op == Token::MUL || op == Token::DIV || op == Token::SHR);
-
- // Smi check of both operands. If op is BIT_OR, the check is delayed
- // until after the OR operation.
- Label not_smis;
- Label use_fp_on_smis;
- Label fail;
-
- if (op != Token::BIT_OR) {
- Comment smi_check_comment(masm, "-- Smi check arguments");
- __ JumpIfNotBothSmi(left, right, &not_smis);
- }
-
- Label smi_values;
- __ bind(&smi_values);
- // Perform the operation.
- Comment perform_smi(masm, "-- Perform smi operation");
- switch (op) {
- case Token::ADD:
- ASSERT(right.is(rax));
- __ SmiAdd(right, right, left, &use_fp_on_smis); // ADD is commutative.
- break;
-
- case Token::SUB:
- __ SmiSub(left, left, right, &use_fp_on_smis);
- __ movq(rax, left);
- break;
-
- case Token::MUL:
- ASSERT(right.is(rax));
- __ SmiMul(right, right, left, &use_fp_on_smis); // MUL is commutative.
- break;
-
- case Token::DIV:
- // SmiDiv will not accept left in rdx or right in rax.
- __ movq(rbx, rax);
- __ movq(rcx, rdx);
- __ SmiDiv(rax, rcx, rbx, &use_fp_on_smis);
- break;
-
- case Token::MOD:
- // SmiMod will not accept left in rdx or right in rax.
- __ movq(rbx, rax);
- __ movq(rcx, rdx);
- __ SmiMod(rax, rcx, rbx, &use_fp_on_smis);
- break;
-
- case Token::BIT_OR: {
- ASSERT(right.is(rax));
- __ SmiOrIfSmis(right, right, left, &not_smis); // BIT_OR is commutative.
- break;
- }
- case Token::BIT_XOR:
- ASSERT(right.is(rax));
- __ SmiXor(right, right, left); // BIT_XOR is commutative.
- break;
-
- case Token::BIT_AND:
- ASSERT(right.is(rax));
- __ SmiAnd(right, right, left); // BIT_AND is commutative.
- break;
-
- case Token::SHL:
- __ SmiShiftLeft(left, left, right);
- __ movq(rax, left);
- break;
-
- case Token::SAR:
- __ SmiShiftArithmeticRight(left, left, right);
- __ movq(rax, left);
- break;
-
- case Token::SHR:
- __ SmiShiftLogicalRight(left, left, right, &use_fp_on_smis);
- __ movq(rax, left);
- break;
-
- default:
- UNREACHABLE();
- }
-
- // 5. Emit return of result in rax. Some operations have registers pushed.
- __ ret(0);
-
- if (use_fp_on_smis.is_linked()) {
- // 6. For some operations emit inline code to perform floating point
- // operations on known smis (e.g., if the result of the operation
- // overflowed the smi range).
- __ bind(&use_fp_on_smis);
- if (op == Token::DIV || op == Token::MOD) {
- // Restore left and right to rdx and rax.
- __ movq(rdx, rcx);
- __ movq(rax, rbx);
- }
-
- if (generate_inline_heapnumber_results) {
- __ AllocateHeapNumber(rcx, rbx, slow);
- Comment perform_float(masm, "-- Perform float operation on smis");
- if (op == Token::SHR) {
- __ SmiToInteger32(left, left);
- __ cvtqsi2sd(xmm0, left);
- } else {
- FloatingPointHelper::LoadSSE2SmiOperands(masm);
- switch (op) {
- case Token::ADD: __ addsd(xmm0, xmm1); break;
- case Token::SUB: __ subsd(xmm0, xmm1); break;
- case Token::MUL: __ mulsd(xmm0, xmm1); break;
- case Token::DIV: __ divsd(xmm0, xmm1); break;
- default: UNREACHABLE();
- }
- }
- __ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0);
- __ movq(rax, rcx);
- __ ret(0);
- } else {
- __ jmp(&fail);
- }
- }
-
- // 7. Non-smi operands reach the end of the code generated by
- // GenerateSmiCode, and fall through to subsequent code,
- // with the operands in rdx and rax.
- // But first we check if non-smi values are HeapNumbers holding
- // values that could be smi.
- __ bind(&not_smis);
- Comment done_comment(masm, "-- Enter non-smi code");
- FloatingPointHelper::ConvertUndefined convert_undefined =
- FloatingPointHelper::BAILOUT_ON_UNDEFINED;
- // This list must be in sync with BinaryOpPatch() behavior in ic.cc.
- if (op == Token::BIT_AND ||
- op == Token::BIT_OR ||
- op == Token::BIT_XOR ||
- op == Token::SAR ||
- op == Token::SHL ||
- op == Token::SHR) {
- convert_undefined = FloatingPointHelper::CONVERT_UNDEFINED_TO_ZERO;
- }
- FloatingPointHelper::NumbersToSmis(masm, left, right, rbx, rdi, rcx,
- &smi_values, &fail, convert_undefined);
- __ jmp(&smi_values);
- __ bind(&fail);
-}
-
-
-static void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm,
- Label* alloc_failure,
- OverwriteMode mode);
-
-
-static void BinaryOpStub_GenerateFloatingPointCode(MacroAssembler* masm,
- Label* allocation_failure,
- Label* non_numeric_failure,
- Token::Value op,
- OverwriteMode mode) {
- switch (op) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV: {
- FloatingPointHelper::LoadSSE2UnknownOperands(masm, non_numeric_failure);
-
- switch (op) {
- case Token::ADD: __ addsd(xmm0, xmm1); break;
- case Token::SUB: __ subsd(xmm0, xmm1); break;
- case Token::MUL: __ mulsd(xmm0, xmm1); break;
- case Token::DIV: __ divsd(xmm0, xmm1); break;
- default: UNREACHABLE();
- }
- BinaryOpStub_GenerateHeapResultAllocation(
- masm, allocation_failure, mode);
- __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0);
- __ ret(0);
- break;
- }
- case Token::MOD: {
- // For MOD we jump to the allocation_failure label, to call runtime.
- __ jmp(allocation_failure);
- break;
- }
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SAR:
- case Token::SHL:
- case Token::SHR: {
- Label non_smi_shr_result;
- Register heap_number_map = r9;
- __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
- FloatingPointHelper::LoadAsIntegers(masm, non_numeric_failure,
- heap_number_map);
- switch (op) {
- case Token::BIT_OR: __ orl(rax, rcx); break;
- case Token::BIT_AND: __ andl(rax, rcx); break;
- case Token::BIT_XOR: __ xorl(rax, rcx); break;
- case Token::SAR: __ sarl_cl(rax); break;
- case Token::SHL: __ shll_cl(rax); break;
- case Token::SHR: {
- __ shrl_cl(rax);
- // Check if result is negative. This can only happen for a shift
- // by zero.
- __ testl(rax, rax);
- __ j(negative, &non_smi_shr_result);
- break;
- }
- default: UNREACHABLE();
- }
- STATIC_ASSERT(kSmiValueSize == 32);
- // Tag smi result and return.
- __ Integer32ToSmi(rax, rax);
- __ Ret();
-
- // Logical shift right can produce an unsigned int32 that is not
- // an int32, and so is not in the smi range. Allocate a heap number
- // in that case.
- if (op == Token::SHR) {
- __ bind(&non_smi_shr_result);
- Label allocation_failed;
- __ movl(rbx, rax); // rbx holds result value (uint32 value as int64).
- // Allocate heap number in new space.
- // Not using AllocateHeapNumber macro in order to reuse
- // already loaded heap_number_map.
- __ AllocateInNewSpace(HeapNumber::kSize,
- rax,
- rdx,
- no_reg,
- &allocation_failed,
- TAG_OBJECT);
- // Set the map.
- __ AssertRootValue(heap_number_map,
- Heap::kHeapNumberMapRootIndex,
- "HeapNumberMap register clobbered.");
- __ movq(FieldOperand(rax, HeapObject::kMapOffset),
- heap_number_map);
- __ cvtqsi2sd(xmm0, rbx);
- __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0);
- __ Ret();
-
- __ bind(&allocation_failed);
- // We need tagged values in rdx and rax for the following code,
- // not int32 in rax and rcx.
- __ Integer32ToSmi(rax, rcx);
- __ Integer32ToSmi(rdx, rbx);
- __ jmp(allocation_failure);
- }
- break;
- }
- default: UNREACHABLE(); break;
- }
- // No fall-through from this generated code.
- if (FLAG_debug_code) {
- __ Abort("Unexpected fall-through in "
- "BinaryStub_GenerateFloatingPointCode.");
- }
-}
-
-
-void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
- ASSERT(op_ == Token::ADD);
- Label left_not_string, call_runtime;
-
- // Registers containing left and right operands respectively.
- Register left = rdx;
- Register right = rax;
-
- // Test if left operand is a string.
- __ JumpIfSmi(left, &left_not_string, Label::kNear);
- __ CmpObjectType(left, FIRST_NONSTRING_TYPE, rcx);
- __ j(above_equal, &left_not_string, Label::kNear);
- StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB);
- GenerateRegisterArgsPush(masm);
- __ TailCallStub(&string_add_left_stub);
-
- // Left operand is not a string, test right.
- __ bind(&left_not_string);
- __ JumpIfSmi(right, &call_runtime, Label::kNear);
- __ CmpObjectType(right, FIRST_NONSTRING_TYPE, rcx);
- __ j(above_equal, &call_runtime, Label::kNear);
-
- StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB);
- GenerateRegisterArgsPush(masm);
- __ TailCallStub(&string_add_right_stub);
-
- // Neither argument is a string.
- __ bind(&call_runtime);
-}
-
-
-void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
- Label call_runtime;
- if (result_type_ == BinaryOpIC::UNINITIALIZED ||
- result_type_ == BinaryOpIC::SMI) {
- // Only allow smi results.
- BinaryOpStub_GenerateSmiCode(masm, NULL, NO_HEAPNUMBER_RESULTS, op_);
- } else {
- // Allow heap number result and don't make a transition if a heap number
- // cannot be allocated.
- BinaryOpStub_GenerateSmiCode(
- masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS, op_);
- }
-
- // Code falls through if the result is not returned as either a smi or heap
- // number.
- GenerateTypeTransition(masm);
-
- if (call_runtime.is_linked()) {
- __ bind(&call_runtime);
- GenerateRegisterArgsPush(masm);
- GenerateCallRuntime(masm);
- }
-}
-
-
-void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
- // The int32 case is identical to the Smi case. We avoid creating this
- // ic state on x64.
- UNREACHABLE();
-}
-
-
-void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
- Label call_runtime;
- ASSERT(left_type_ == BinaryOpIC::STRING && right_type_ == BinaryOpIC::STRING);
- ASSERT(op_ == Token::ADD);
- // If both arguments are strings, call the string add stub.
- // Otherwise, do a transition.
-
- // Registers containing left and right operands respectively.
- Register left = rdx;
- Register right = rax;
-
- // Test if left operand is a string.
- __ JumpIfSmi(left, &call_runtime);
- __ CmpObjectType(left, FIRST_NONSTRING_TYPE, rcx);
- __ j(above_equal, &call_runtime);
-
- // Test if right operand is a string.
- __ JumpIfSmi(right, &call_runtime);
- __ CmpObjectType(right, FIRST_NONSTRING_TYPE, rcx);
- __ j(above_equal, &call_runtime);
-
- StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
- GenerateRegisterArgsPush(masm);
- __ TailCallStub(&string_add_stub);
-
- __ bind(&call_runtime);
- GenerateTypeTransition(masm);
-}
-
-
-void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
- Label call_runtime;
-
- if (op_ == Token::ADD) {
- // Handle string addition here, because it is the only operation
- // that does not do a ToNumber conversion on the operands.
- GenerateAddStrings(masm);
- }
-
- // Convert oddball arguments to numbers.
- Label check, done;
- __ CompareRoot(rdx, Heap::kUndefinedValueRootIndex);
- __ j(not_equal, &check, Label::kNear);
- if (Token::IsBitOp(op_)) {
- __ xor_(rdx, rdx);
- } else {
- __ LoadRoot(rdx, Heap::kNanValueRootIndex);
- }
- __ jmp(&done, Label::kNear);
- __ bind(&check);
- __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
- __ j(not_equal, &done, Label::kNear);
- if (Token::IsBitOp(op_)) {
- __ xor_(rax, rax);
- } else {
- __ LoadRoot(rax, Heap::kNanValueRootIndex);
- }
- __ bind(&done);
-
- GenerateNumberStub(masm);
-}
-
-
-static void BinaryOpStub_CheckSmiInput(MacroAssembler* masm,
- Register input,
- Label* fail) {
- Label ok;
- __ JumpIfSmi(input, &ok, Label::kNear);
- Register heap_number_map = r8;
- Register scratch1 = r9;
- Register scratch2 = r10;
- // HeapNumbers containing 32bit integer values are also allowed.
- __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
- __ cmpq(FieldOperand(input, HeapObject::kMapOffset), heap_number_map);
- __ j(not_equal, fail);
- __ movsd(xmm0, FieldOperand(input, HeapNumber::kValueOffset));
- // Convert, convert back, and compare the two doubles' bits.
- __ cvttsd2siq(scratch2, xmm0);
- __ cvtlsi2sd(xmm1, scratch2);
- __ movq(scratch1, xmm0);
- __ movq(scratch2, xmm1);
- __ cmpq(scratch1, scratch2);
- __ j(not_equal, fail);
- __ bind(&ok);
-}
-
-
-void BinaryOpStub::GenerateNumberStub(MacroAssembler* masm) {
- Label gc_required, not_number;
-
- // It could be that only SMIs have been seen at either the left
- // or the right operand. For precise type feedback, patch the IC
- // again if this changes.
- if (left_type_ == BinaryOpIC::SMI) {
- BinaryOpStub_CheckSmiInput(masm, rdx, &not_number);
- }
- if (right_type_ == BinaryOpIC::SMI) {
- BinaryOpStub_CheckSmiInput(masm, rax, &not_number);
- }
-
- BinaryOpStub_GenerateFloatingPointCode(
- masm, &gc_required, &not_number, op_, mode_);
-
- __ bind(&not_number);
- GenerateTypeTransition(masm);
-
- __ bind(&gc_required);
- GenerateRegisterArgsPush(masm);
- GenerateCallRuntime(masm);
-}
-
-
-void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
- Label call_runtime, call_string_add_or_runtime;
-
- BinaryOpStub_GenerateSmiCode(
- masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS, op_);
-
- BinaryOpStub_GenerateFloatingPointCode(
- masm, &call_runtime, &call_string_add_or_runtime, op_, mode_);
-
- __ bind(&call_string_add_or_runtime);
- if (op_ == Token::ADD) {
- GenerateAddStrings(masm);
- }
-
- __ bind(&call_runtime);
- GenerateRegisterArgsPush(masm);
- GenerateCallRuntime(masm);
-}
-
-
-static void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm,
- Label* alloc_failure,
- OverwriteMode mode) {
- Label skip_allocation;
- switch (mode) {
- case OVERWRITE_LEFT: {
- // If the argument in rdx is already an object, we skip the
- // allocation of a heap number.
- __ JumpIfNotSmi(rdx, &skip_allocation);
- // Allocate a heap number for the result. Keep eax and edx intact
- // for the possible runtime call.
- __ AllocateHeapNumber(rbx, rcx, alloc_failure);
- // Now rdx can be overwritten losing one of the arguments as we are
- // now done and will not need it any more.
- __ movq(rdx, rbx);
- __ bind(&skip_allocation);
- // Use object in rdx as a result holder
- __ movq(rax, rdx);
- break;
- }
- case OVERWRITE_RIGHT:
- // If the argument in rax is already an object, we skip the
- // allocation of a heap number.
- __ JumpIfNotSmi(rax, &skip_allocation);
- // Fall through!
- case NO_OVERWRITE:
- // Allocate a heap number for the result. Keep rax and rdx intact
- // for the possible runtime call.
- __ AllocateHeapNumber(rbx, rcx, alloc_failure);
- // Now rax can be overwritten losing one of the arguments as we are
- // now done and will not need it any more.
- __ movq(rax, rbx);
- __ bind(&skip_allocation);
- break;
- default: UNREACHABLE();
- }
-}
-
-
-void BinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
- __ pop(rcx);
- __ push(rdx);
- __ push(rax);
- __ push(rcx);
-}
-
-
-void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
- // TAGGED case:
- // Input:
- // rsp[8]: argument (should be number).
- // rsp[0]: return address.
- // Output:
- // rax: tagged double result.
- // UNTAGGED case:
- // Input::
- // rsp[0]: return address.
- // xmm1: untagged double input argument
- // Output:
- // xmm1: untagged double result.
-
- Label runtime_call;
- Label runtime_call_clear_stack;
- Label skip_cache;
- const bool tagged = (argument_type_ == TAGGED);
- if (tagged) {
- Label input_not_smi, loaded;
- // Test that rax is a number.
- __ movq(rax, Operand(rsp, kPointerSize));
- __ JumpIfNotSmi(rax, &input_not_smi, Label::kNear);
- // Input is a smi. Untag and load it onto the FPU stack.
- // Then load the bits of the double into rbx.
- __ SmiToInteger32(rax, rax);
- __ subq(rsp, Immediate(kDoubleSize));
- __ cvtlsi2sd(xmm1, rax);
- __ movsd(Operand(rsp, 0), xmm1);
- __ movq(rbx, xmm1);
- __ movq(rdx, xmm1);
- __ fld_d(Operand(rsp, 0));
- __ addq(rsp, Immediate(kDoubleSize));
- __ jmp(&loaded, Label::kNear);
-
- __ bind(&input_not_smi);
- // Check if input is a HeapNumber.
- __ LoadRoot(rbx, Heap::kHeapNumberMapRootIndex);
- __ cmpq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
- __ j(not_equal, &runtime_call);
- // Input is a HeapNumber. Push it on the FPU stack and load its
- // bits into rbx.
- __ fld_d(FieldOperand(rax, HeapNumber::kValueOffset));
- __ movq(rbx, FieldOperand(rax, HeapNumber::kValueOffset));
- __ movq(rdx, rbx);
-
- __ bind(&loaded);
- } else { // UNTAGGED.
- __ movq(rbx, xmm1);
- __ movq(rdx, xmm1);
- }
-
- // ST[0] == double value, if TAGGED.
- // rbx = bits of double value.
- // rdx = also bits of double value.
- // Compute hash (h is 32 bits, bits are 64 and the shifts are arithmetic):
- // h = h0 = bits ^ (bits >> 32);
- // h ^= h >> 16;
- // h ^= h >> 8;
- // h = h & (cacheSize - 1);
- // or h = (h0 ^ (h0 >> 8) ^ (h0 >> 16) ^ (h0 >> 24)) & (cacheSize - 1)
- __ sar(rdx, Immediate(32));
- __ xorl(rdx, rbx);
- __ movl(rcx, rdx);
- __ movl(rax, rdx);
- __ movl(rdi, rdx);
- __ sarl(rdx, Immediate(8));
- __ sarl(rcx, Immediate(16));
- __ sarl(rax, Immediate(24));
- __ xorl(rcx, rdx);
- __ xorl(rax, rdi);
- __ xorl(rcx, rax);
- ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize));
- __ andl(rcx, Immediate(TranscendentalCache::SubCache::kCacheSize - 1));
-
- // ST[0] == double value.
- // rbx = bits of double value.
- // rcx = TranscendentalCache::hash(double value).
- ExternalReference cache_array =
- ExternalReference::transcendental_cache_array_address(masm->isolate());
- __ movq(rax, cache_array);
- int cache_array_index =
- type_ * sizeof(Isolate::Current()->transcendental_cache()->caches_[0]);
- __ movq(rax, Operand(rax, cache_array_index));
- // rax points to the cache for the type type_.
- // If NULL, the cache hasn't been initialized yet, so go through runtime.
- __ testq(rax, rax);
- __ j(zero, &runtime_call_clear_stack); // Only clears stack if TAGGED.
-#ifdef DEBUG
- // Check that the layout of cache elements match expectations.
- { // NOLINT - doesn't like a single brace on a line.
- TranscendentalCache::SubCache::Element test_elem[2];
- char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
- char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
- char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0]));
- char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1]));
- char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output));
- // Two uint_32's and a pointer per element.
- CHECK_EQ(16, static_cast<int>(elem2_start - elem_start));
- CHECK_EQ(0, static_cast<int>(elem_in0 - elem_start));
- CHECK_EQ(kIntSize, static_cast<int>(elem_in1 - elem_start));
- CHECK_EQ(2 * kIntSize, static_cast<int>(elem_out - elem_start));
- }
-#endif
- // Find the address of the rcx'th entry in the cache, i.e., &rax[rcx*16].
- __ addl(rcx, rcx);
- __ lea(rcx, Operand(rax, rcx, times_8, 0));
- // Check if cache matches: Double value is stored in uint32_t[2] array.
- Label cache_miss;
- __ cmpq(rbx, Operand(rcx, 0));
- __ j(not_equal, &cache_miss, Label::kNear);
- // Cache hit!
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->transcendental_cache_hit(), 1);
- __ movq(rax, Operand(rcx, 2 * kIntSize));
- if (tagged) {
- __ fstp(0); // Clear FPU stack.
- __ ret(kPointerSize);
- } else { // UNTAGGED.
- __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
- __ Ret();
- }
-
- __ bind(&cache_miss);
- __ IncrementCounter(counters->transcendental_cache_miss(), 1);
- // Update cache with new value.
- if (tagged) {
- __ AllocateHeapNumber(rax, rdi, &runtime_call_clear_stack);
- } else { // UNTAGGED.
- __ AllocateHeapNumber(rax, rdi, &skip_cache);
- __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm1);
- __ fld_d(FieldOperand(rax, HeapNumber::kValueOffset));
- }
- GenerateOperation(masm, type_);
- __ movq(Operand(rcx, 0), rbx);
- __ movq(Operand(rcx, 2 * kIntSize), rax);
- __ fstp_d(FieldOperand(rax, HeapNumber::kValueOffset));
- if (tagged) {
- __ ret(kPointerSize);
- } else { // UNTAGGED.
- __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
- __ Ret();
-
- // Skip cache and return answer directly, only in untagged case.
- __ bind(&skip_cache);
- __ subq(rsp, Immediate(kDoubleSize));
- __ movsd(Operand(rsp, 0), xmm1);
- __ fld_d(Operand(rsp, 0));
- GenerateOperation(masm, type_);
- __ fstp_d(Operand(rsp, 0));
- __ movsd(xmm1, Operand(rsp, 0));
- __ addq(rsp, Immediate(kDoubleSize));
- // We return the value in xmm1 without adding it to the cache, but
- // we cause a scavenging GC so that future allocations will succeed.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- // Allocate an unused object bigger than a HeapNumber.
- __ Push(Smi::FromInt(2 * kDoubleSize));
- __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
- }
- __ Ret();
- }
-
- // Call runtime, doing whatever allocation and cleanup is necessary.
- if (tagged) {
- __ bind(&runtime_call_clear_stack);
- __ fstp(0);
- __ bind(&runtime_call);
- __ TailCallExternalReference(
- ExternalReference(RuntimeFunction(), masm->isolate()), 1, 1);
- } else { // UNTAGGED.
- __ bind(&runtime_call_clear_stack);
- __ bind(&runtime_call);
- __ AllocateHeapNumber(rax, rdi, &skip_cache);
- __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm1);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(rax);
- __ CallRuntime(RuntimeFunction(), 1);
- }
- __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
- __ Ret();
- }
-}
-
-
-Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
- switch (type_) {
- // Add more cases when necessary.
- case TranscendentalCache::SIN: return Runtime::kMath_sin;
- case TranscendentalCache::COS: return Runtime::kMath_cos;
- case TranscendentalCache::TAN: return Runtime::kMath_tan;
- case TranscendentalCache::LOG: return Runtime::kMath_log;
- default:
- UNIMPLEMENTED();
- return Runtime::kAbort;
- }
-}
-
-
-void TranscendentalCacheStub::GenerateOperation(
- MacroAssembler* masm, TranscendentalCache::Type type) {
- // Registers:
- // rax: Newly allocated HeapNumber, which must be preserved.
- // rbx: Bits of input double. Must be preserved.
- // rcx: Pointer to cache entry. Must be preserved.
- // st(0): Input double
- Label done;
- if (type == TranscendentalCache::SIN ||
- type == TranscendentalCache::COS ||
- type == TranscendentalCache::TAN) {
- // Both fsin and fcos require arguments in the range +/-2^63 and
- // return NaN for infinities and NaN. They can share all code except
- // the actual fsin/fcos operation.
- Label in_range;
- // If argument is outside the range -2^63..2^63, fsin/cos doesn't
- // work. We must reduce it to the appropriate range.
- __ movq(rdi, rbx);
- // Move exponent and sign bits to low bits.
- __ shr(rdi, Immediate(HeapNumber::kMantissaBits));
- // Remove sign bit.
- __ andl(rdi, Immediate((1 << HeapNumber::kExponentBits) - 1));
- int supported_exponent_limit = (63 + HeapNumber::kExponentBias);
- __ cmpl(rdi, Immediate(supported_exponent_limit));
- __ j(below, &in_range);
- // Check for infinity and NaN. Both return NaN for sin.
- __ cmpl(rdi, Immediate(0x7ff));
- Label non_nan_result;
- __ j(not_equal, &non_nan_result, Label::kNear);
- // Input is +/-Infinity or NaN. Result is NaN.
- __ fstp(0);
- // NaN is represented by 0x7ff8000000000000.
- __ subq(rsp, Immediate(kPointerSize));
- __ movl(Operand(rsp, 4), Immediate(0x7ff80000));
- __ movl(Operand(rsp, 0), Immediate(0x00000000));
- __ fld_d(Operand(rsp, 0));
- __ addq(rsp, Immediate(kPointerSize));
- __ jmp(&done);
-
- __ bind(&non_nan_result);
-
- // Use fpmod to restrict argument to the range +/-2*PI.
- __ movq(rdi, rax); // Save rax before using fnstsw_ax.
- __ fldpi();
- __ fadd(0);
- __ fld(1);
- // FPU Stack: input, 2*pi, input.
- {
- Label no_exceptions;
- __ fwait();
- __ fnstsw_ax();
- // Clear if Illegal Operand or Zero Division exceptions are set.
- __ testl(rax, Immediate(5)); // #IO and #ZD flags of FPU status word.
- __ j(zero, &no_exceptions);
- __ fnclex();
- __ bind(&no_exceptions);
- }
-
- // Compute st(0) % st(1)
- {
- Label partial_remainder_loop;
- __ bind(&partial_remainder_loop);
- __ fprem1();
- __ fwait();
- __ fnstsw_ax();
- __ testl(rax, Immediate(0x400)); // Check C2 bit of FPU status word.
- // If C2 is set, computation only has partial result. Loop to
- // continue computation.
- __ j(not_zero, &partial_remainder_loop);
- }
- // FPU Stack: input, 2*pi, input % 2*pi
- __ fstp(2);
- // FPU Stack: input % 2*pi, 2*pi,
- __ fstp(0);
- // FPU Stack: input % 2*pi
- __ movq(rax, rdi); // Restore rax, pointer to the new HeapNumber.
- __ bind(&in_range);
- switch (type) {
- case TranscendentalCache::SIN:
- __ fsin();
- break;
- case TranscendentalCache::COS:
- __ fcos();
- break;
- case TranscendentalCache::TAN:
- // FPTAN calculates tangent onto st(0) and pushes 1.0 onto the
- // FP register stack.
- __ fptan();
- __ fstp(0); // Pop FP register stack.
- break;
- default:
- UNREACHABLE();
- }
- __ bind(&done);
- } else {
- ASSERT(type == TranscendentalCache::LOG);
- __ fldln2();
- __ fxch();
- __ fyl2x();
- }
-}
-
-
-// Input: rdx, rax are the left and right objects of a bit op.
-// Output: rax, rcx are left and right integers for a bit op.
-void FloatingPointHelper::LoadNumbersAsIntegers(MacroAssembler* masm) {
- // Check float operands.
- Label done;
- Label rax_is_smi;
- Label rax_is_object;
- Label rdx_is_object;
-
- __ JumpIfNotSmi(rdx, &rdx_is_object);
- __ SmiToInteger32(rdx, rdx);
- __ JumpIfSmi(rax, &rax_is_smi);
-
- __ bind(&rax_is_object);
- IntegerConvert(masm, rcx, rax); // Uses rdi, rcx and rbx.
- __ jmp(&done);
-
- __ bind(&rdx_is_object);
- IntegerConvert(masm, rdx, rdx); // Uses rdi, rcx and rbx.
- __ JumpIfNotSmi(rax, &rax_is_object);
- __ bind(&rax_is_smi);
- __ SmiToInteger32(rcx, rax);
-
- __ bind(&done);
- __ movl(rax, rdx);
-}
-
-
-// Input: rdx, rax are the left and right objects of a bit op.
-// Output: rax, rcx are left and right integers for a bit op.
-// Jump to conversion_failure: rdx and rax are unchanged.
-void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm,
- Label* conversion_failure,
- Register heap_number_map) {
- // Check float operands.
- Label arg1_is_object, check_undefined_arg1;
- Label arg2_is_object, check_undefined_arg2;
- Label load_arg2, done;
-
- __ JumpIfNotSmi(rdx, &arg1_is_object);
- __ SmiToInteger32(r8, rdx);
- __ jmp(&load_arg2);
-
- // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
- __ bind(&check_undefined_arg1);
- __ CompareRoot(rdx, Heap::kUndefinedValueRootIndex);
- __ j(not_equal, conversion_failure);
- __ Set(r8, 0);
- __ jmp(&load_arg2);
-
- __ bind(&arg1_is_object);
- __ cmpq(FieldOperand(rdx, HeapObject::kMapOffset), heap_number_map);
- __ j(not_equal, &check_undefined_arg1);
- // Get the untagged integer version of the rdx heap number in rcx.
- IntegerConvert(masm, r8, rdx);
-
- // Here r8 has the untagged integer, rax has a Smi or a heap number.
- __ bind(&load_arg2);
- // Test if arg2 is a Smi.
- __ JumpIfNotSmi(rax, &arg2_is_object);
- __ SmiToInteger32(rcx, rax);
- __ jmp(&done);
-
- // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
- __ bind(&check_undefined_arg2);
- __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
- __ j(not_equal, conversion_failure);
- __ Set(rcx, 0);
- __ jmp(&done);
-
- __ bind(&arg2_is_object);
- __ cmpq(FieldOperand(rax, HeapObject::kMapOffset), heap_number_map);
- __ j(not_equal, &check_undefined_arg2);
- // Get the untagged integer version of the rax heap number in rcx.
- IntegerConvert(masm, rcx, rax);
- __ bind(&done);
- __ movl(rax, r8);
-}
-
-
-void FloatingPointHelper::LoadSSE2SmiOperands(MacroAssembler* masm) {
- __ SmiToInteger32(kScratchRegister, rdx);
- __ cvtlsi2sd(xmm0, kScratchRegister);
- __ SmiToInteger32(kScratchRegister, rax);
- __ cvtlsi2sd(xmm1, kScratchRegister);
-}
-
-
-void FloatingPointHelper::LoadSSE2NumberOperands(MacroAssembler* masm) {
- Label load_smi_rdx, load_nonsmi_rax, load_smi_rax, done;
- // Load operand in rdx into xmm0.
- __ JumpIfSmi(rdx, &load_smi_rdx);
- __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
- // Load operand in rax into xmm1.
- __ JumpIfSmi(rax, &load_smi_rax);
- __ bind(&load_nonsmi_rax);
- __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
- __ jmp(&done);
-
- __ bind(&load_smi_rdx);
- __ SmiToInteger32(kScratchRegister, rdx);
- __ cvtlsi2sd(xmm0, kScratchRegister);
- __ JumpIfNotSmi(rax, &load_nonsmi_rax);
-
- __ bind(&load_smi_rax);
- __ SmiToInteger32(kScratchRegister, rax);
- __ cvtlsi2sd(xmm1, kScratchRegister);
-
- __ bind(&done);
-}
-
-
-void FloatingPointHelper::LoadSSE2UnknownOperands(MacroAssembler* masm,
- Label* not_numbers) {
- Label load_smi_rdx, load_nonsmi_rax, load_smi_rax, load_float_rax, done;
- // Load operand in rdx into xmm0, or branch to not_numbers.
- __ LoadRoot(rcx, Heap::kHeapNumberMapRootIndex);
- __ JumpIfSmi(rdx, &load_smi_rdx);
- __ cmpq(FieldOperand(rdx, HeapObject::kMapOffset), rcx);
- __ j(not_equal, not_numbers); // Argument in rdx is not a number.
- __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
- // Load operand in rax into xmm1, or branch to not_numbers.
- __ JumpIfSmi(rax, &load_smi_rax);
-
- __ bind(&load_nonsmi_rax);
- __ cmpq(FieldOperand(rax, HeapObject::kMapOffset), rcx);
- __ j(not_equal, not_numbers);
- __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
- __ jmp(&done);
-
- __ bind(&load_smi_rdx);
- __ SmiToInteger32(kScratchRegister, rdx);
- __ cvtlsi2sd(xmm0, kScratchRegister);
- __ JumpIfNotSmi(rax, &load_nonsmi_rax);
-
- __ bind(&load_smi_rax);
- __ SmiToInteger32(kScratchRegister, rax);
- __ cvtlsi2sd(xmm1, kScratchRegister);
- __ bind(&done);
-}
-
-
-void FloatingPointHelper::NumbersToSmis(MacroAssembler* masm,
- Register first,
- Register second,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* on_success,
- Label* on_not_smis,
- ConvertUndefined convert_undefined) {
- Register heap_number_map = scratch3;
- Register smi_result = scratch1;
- Label done, maybe_undefined_first, maybe_undefined_second, first_done;
-
- __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
-
- Label first_smi;
- __ JumpIfSmi(first, &first_smi, Label::kNear);
- __ cmpq(FieldOperand(first, HeapObject::kMapOffset), heap_number_map);
- __ j(not_equal,
- (convert_undefined == CONVERT_UNDEFINED_TO_ZERO)
- ? &maybe_undefined_first
- : on_not_smis);
- // Convert HeapNumber to smi if possible.
- __ movsd(xmm0, FieldOperand(first, HeapNumber::kValueOffset));
- __ movq(scratch2, xmm0);
- __ cvttsd2siq(smi_result, xmm0);
- // Check if conversion was successful by converting back and
- // comparing to the original double's bits.
- __ cvtlsi2sd(xmm1, smi_result);
- __ movq(kScratchRegister, xmm1);
- __ cmpq(scratch2, kScratchRegister);
- __ j(not_equal, on_not_smis);
- __ Integer32ToSmi(first, smi_result);
-
- __ bind(&first_done);
- __ JumpIfSmi(second, (on_success != NULL) ? on_success : &done);
- __ bind(&first_smi);
- __ AssertNotSmi(second);
- __ cmpq(FieldOperand(second, HeapObject::kMapOffset), heap_number_map);
- __ j(not_equal,
- (convert_undefined == CONVERT_UNDEFINED_TO_ZERO)
- ? &maybe_undefined_second
- : on_not_smis);
- // Convert second to smi, if possible.
- __ movsd(xmm0, FieldOperand(second, HeapNumber::kValueOffset));
- __ movq(scratch2, xmm0);
- __ cvttsd2siq(smi_result, xmm0);
- __ cvtlsi2sd(xmm1, smi_result);
- __ movq(kScratchRegister, xmm1);
- __ cmpq(scratch2, kScratchRegister);
- __ j(not_equal, on_not_smis);
- __ Integer32ToSmi(second, smi_result);
- if (on_success != NULL) {
- __ jmp(on_success);
- } else {
- __ jmp(&done);
- }
-
- __ bind(&maybe_undefined_first);
- __ CompareRoot(first, Heap::kUndefinedValueRootIndex);
- __ j(not_equal, on_not_smis);
- __ xor_(first, first);
- __ jmp(&first_done);
-
- __ bind(&maybe_undefined_second);
- __ CompareRoot(second, Heap::kUndefinedValueRootIndex);
- __ j(not_equal, on_not_smis);
- __ xor_(second, second);
- if (on_success != NULL) {
- __ jmp(on_success);
- }
- // Else: fall through.
-
- __ bind(&done);
-}
-
-
-void MathPowStub::Generate(MacroAssembler* masm) {
- // Choose register conforming to calling convention (when bailing out).
-#ifdef _WIN64
- const Register exponent = rdx;
-#else
- const Register exponent = rdi;
-#endif
- const Register base = rax;
- const Register scratch = rcx;
- const XMMRegister double_result = xmm3;
- const XMMRegister double_base = xmm2;
- const XMMRegister double_exponent = xmm1;
- const XMMRegister double_scratch = xmm4;
-
- Label call_runtime, done, exponent_not_smi, int_exponent;
-
- // Save 1 in double_result - we need this several times later on.
- __ movq(scratch, Immediate(1));
- __ cvtlsi2sd(double_result, scratch);
-
- if (exponent_type_ == ON_STACK) {
- Label base_is_smi, unpack_exponent;
- // The exponent and base are supplied as arguments on the stack.
- // This can only happen if the stub is called from non-optimized code.
- // Load input parameters from stack.
- __ movq(base, Operand(rsp, 2 * kPointerSize));
- __ movq(exponent, Operand(rsp, 1 * kPointerSize));
- __ JumpIfSmi(base, &base_is_smi, Label::kNear);
- __ CompareRoot(FieldOperand(base, HeapObject::kMapOffset),
- Heap::kHeapNumberMapRootIndex);
- __ j(not_equal, &call_runtime);
-
- __ movsd(double_base, FieldOperand(base, HeapNumber::kValueOffset));
- __ jmp(&unpack_exponent, Label::kNear);
-
- __ bind(&base_is_smi);
- __ SmiToInteger32(base, base);
- __ cvtlsi2sd(double_base, base);
- __ bind(&unpack_exponent);
-
- __ JumpIfNotSmi(exponent, &exponent_not_smi, Label::kNear);
- __ SmiToInteger32(exponent, exponent);
- __ jmp(&int_exponent);
-
- __ bind(&exponent_not_smi);
- __ CompareRoot(FieldOperand(exponent, HeapObject::kMapOffset),
- Heap::kHeapNumberMapRootIndex);
- __ j(not_equal, &call_runtime);
- __ movsd(double_exponent, FieldOperand(exponent, HeapNumber::kValueOffset));
- } else if (exponent_type_ == TAGGED) {
- __ JumpIfNotSmi(exponent, &exponent_not_smi, Label::kNear);
- __ SmiToInteger32(exponent, exponent);
- __ jmp(&int_exponent);
-
- __ bind(&exponent_not_smi);
- __ movsd(double_exponent, FieldOperand(exponent, HeapNumber::kValueOffset));
- }
-
- if (exponent_type_ != INTEGER) {
- Label fast_power;
- // Detect integer exponents stored as double.
- __ cvttsd2si(exponent, double_exponent);
- // Skip to runtime if possibly NaN (indicated by the indefinite integer).
- __ cmpl(exponent, Immediate(0x80000000u));
- __ j(equal, &call_runtime);
- __ cvtlsi2sd(double_scratch, exponent);
- // Already ruled out NaNs for exponent.
- __ ucomisd(double_exponent, double_scratch);
- __ j(equal, &int_exponent);
-
- if (exponent_type_ == ON_STACK) {
- // Detect square root case. Crankshaft detects constant +/-0.5 at
- // compile time and uses DoMathPowHalf instead. We then skip this check
- // for non-constant cases of +/-0.5 as these hardly occur.
- Label continue_sqrt, continue_rsqrt, not_plus_half;
- // Test for 0.5.
- // Load double_scratch with 0.5.
- __ movq(scratch, V8_UINT64_C(0x3FE0000000000000), RelocInfo::NONE64);
- __ movq(double_scratch, scratch);
- // Already ruled out NaNs for exponent.
- __ ucomisd(double_scratch, double_exponent);
- __ j(not_equal, &not_plus_half, Label::kNear);
-
- // Calculates square root of base. Check for the special case of
- // Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
- // According to IEEE-754, double-precision -Infinity has the highest
- // 12 bits set and the lowest 52 bits cleared.
- __ movq(scratch, V8_UINT64_C(0xFFF0000000000000), RelocInfo::NONE64);
- __ movq(double_scratch, scratch);
- __ ucomisd(double_scratch, double_base);
- // Comparing -Infinity with NaN results in "unordered", which sets the
- // zero flag as if both were equal. However, it also sets the carry flag.
- __ j(not_equal, &continue_sqrt, Label::kNear);
- __ j(carry, &continue_sqrt, Label::kNear);
-
- // Set result to Infinity in the special case.
- __ xorps(double_result, double_result);
- __ subsd(double_result, double_scratch);
- __ jmp(&done);
-
- __ bind(&continue_sqrt);
- // sqrtsd returns -0 when input is -0. ECMA spec requires +0.
- __ xorps(double_scratch, double_scratch);
- __ addsd(double_scratch, double_base); // Convert -0 to 0.
- __ sqrtsd(double_result, double_scratch);
- __ jmp(&done);
-
- // Test for -0.5.
- __ bind(&not_plus_half);
- // Load double_scratch with -0.5 by substracting 1.
- __ subsd(double_scratch, double_result);
- // Already ruled out NaNs for exponent.
- __ ucomisd(double_scratch, double_exponent);
- __ j(not_equal, &fast_power, Label::kNear);
-
- // Calculates reciprocal of square root of base. Check for the special
- // case of Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
- // According to IEEE-754, double-precision -Infinity has the highest
- // 12 bits set and the lowest 52 bits cleared.
- __ movq(scratch, V8_UINT64_C(0xFFF0000000000000), RelocInfo::NONE64);
- __ movq(double_scratch, scratch);
- __ ucomisd(double_scratch, double_base);
- // Comparing -Infinity with NaN results in "unordered", which sets the
- // zero flag as if both were equal. However, it also sets the carry flag.
- __ j(not_equal, &continue_rsqrt, Label::kNear);
- __ j(carry, &continue_rsqrt, Label::kNear);
-
- // Set result to 0 in the special case.
- __ xorps(double_result, double_result);
- __ jmp(&done);
-
- __ bind(&continue_rsqrt);
- // sqrtsd returns -0 when input is -0. ECMA spec requires +0.
- __ xorps(double_exponent, double_exponent);
- __ addsd(double_exponent, double_base); // Convert -0 to +0.
- __ sqrtsd(double_exponent, double_exponent);
- __ divsd(double_result, double_exponent);
- __ jmp(&done);
- }
-
- // Using FPU instructions to calculate power.
- Label fast_power_failed;
- __ bind(&fast_power);
- __ fnclex(); // Clear flags to catch exceptions later.
- // Transfer (B)ase and (E)xponent onto the FPU register stack.
- __ subq(rsp, Immediate(kDoubleSize));
- __ movsd(Operand(rsp, 0), double_exponent);
- __ fld_d(Operand(rsp, 0)); // E
- __ movsd(Operand(rsp, 0), double_base);
- __ fld_d(Operand(rsp, 0)); // B, E
-
- // Exponent is in st(1) and base is in st(0)
- // B ^ E = (2^(E * log2(B)) - 1) + 1 = (2^X - 1) + 1 for X = E * log2(B)
- // FYL2X calculates st(1) * log2(st(0))
- __ fyl2x(); // X
- __ fld(0); // X, X
- __ frndint(); // rnd(X), X
- __ fsub(1); // rnd(X), X-rnd(X)
- __ fxch(1); // X - rnd(X), rnd(X)
- // F2XM1 calculates 2^st(0) - 1 for -1 < st(0) < 1
- __ f2xm1(); // 2^(X-rnd(X)) - 1, rnd(X)
- __ fld1(); // 1, 2^(X-rnd(X)) - 1, rnd(X)
- __ faddp(1); // 2^(X-rnd(X)), rnd(X)
- // FSCALE calculates st(0) * 2^st(1)
- __ fscale(); // 2^X, rnd(X)
- __ fstp(1);
- // Bail out to runtime in case of exceptions in the status word.
- __ fnstsw_ax();
- __ testb(rax, Immediate(0x5F)); // Check for all but precision exception.
- __ j(not_zero, &fast_power_failed, Label::kNear);
- __ fstp_d(Operand(rsp, 0));
- __ movsd(double_result, Operand(rsp, 0));
- __ addq(rsp, Immediate(kDoubleSize));
- __ jmp(&done);
-
- __ bind(&fast_power_failed);
- __ fninit();
- __ addq(rsp, Immediate(kDoubleSize));
- __ jmp(&call_runtime);
- }
-
- // Calculate power with integer exponent.
- __ bind(&int_exponent);
- const XMMRegister double_scratch2 = double_exponent;
- // Back up exponent as we need to check if exponent is negative later.
- __ movq(scratch, exponent); // Back up exponent.
- __ movsd(double_scratch, double_base); // Back up base.
- __ movsd(double_scratch2, double_result); // Load double_exponent with 1.
-
- // Get absolute value of exponent.
- Label no_neg, while_true, while_false;
- __ testl(scratch, scratch);
- __ j(positive, &no_neg, Label::kNear);
- __ negl(scratch);
- __ bind(&no_neg);
-
- __ j(zero, &while_false, Label::kNear);
- __ shrl(scratch, Immediate(1));
- // Above condition means CF==0 && ZF==0. This means that the
- // bit that has been shifted out is 0 and the result is not 0.
- __ j(above, &while_true, Label::kNear);
- __ movsd(double_result, double_scratch);
- __ j(zero, &while_false, Label::kNear);
-
- __ bind(&while_true);
- __ shrl(scratch, Immediate(1));
- __ mulsd(double_scratch, double_scratch);
- __ j(above, &while_true, Label::kNear);
- __ mulsd(double_result, double_scratch);
- __ j(not_zero, &while_true);
-
- __ bind(&while_false);
- // If the exponent is negative, return 1/result.
- __ testl(exponent, exponent);
- __ j(greater, &done);
- __ divsd(double_scratch2, double_result);
- __ movsd(double_result, double_scratch2);
- // Test whether result is zero. Bail out to check for subnormal result.
- // Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
- __ xorps(double_scratch2, double_scratch2);
- __ ucomisd(double_scratch2, double_result);
- // double_exponent aliased as double_scratch2 has already been overwritten
- // and may not have contained the exponent value in the first place when the
- // input was a smi. We reset it with exponent value before bailing out.
- __ j(not_equal, &done);
- __ cvtlsi2sd(double_exponent, exponent);
-
- // Returning or bailing out.
- Counters* counters = masm->isolate()->counters();
- if (exponent_type_ == ON_STACK) {
- // The arguments are still on the stack.
- __ bind(&call_runtime);
- __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
-
- // The stub is called from non-optimized code, which expects the result
- // as heap number in eax.
- __ bind(&done);
- __ AllocateHeapNumber(rax, rcx, &call_runtime);
- __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), double_result);
- __ IncrementCounter(counters->math_pow(), 1);
- __ ret(2 * kPointerSize);
- } else {
- __ bind(&call_runtime);
- // Move base to the correct argument register. Exponent is already in xmm1.
- __ movsd(xmm0, double_base);
- ASSERT(double_exponent.is(xmm1));
- {
- AllowExternalCallThatCantCauseGC scope(masm);
- __ PrepareCallCFunction(2);
- __ CallCFunction(
- ExternalReference::power_double_double_function(masm->isolate()), 2);
- }
- // Return value is in xmm0.
- __ movsd(double_result, xmm0);
- // Restore context register.
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
-
- __ bind(&done);
- __ IncrementCounter(counters->math_pow(), 1);
- __ ret(0);
- }
-}
-
-
-void ArrayLengthStub::Generate(MacroAssembler* masm) {
- Label miss;
- Register receiver;
- if (kind() == Code::KEYED_LOAD_IC) {
- // ----------- S t a t e -------------
- // -- rax : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- __ Cmp(rax, masm->isolate()->factory()->length_string());
- receiver = rdx;
- } else {
- ASSERT(kind() == Code::LOAD_IC);
- // ----------- S t a t e -------------
- // -- rax : receiver
- // -- rcx : name
- // -- rsp[0] : return address
- // -----------------------------------
- receiver = rax;
- }
-
- StubCompiler::GenerateLoadArrayLength(masm, receiver, r8, &miss);
- __ bind(&miss);
- StubCompiler::GenerateLoadMiss(masm, kind());
-}
-
-
-void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
- Label miss;
- Register receiver;
- if (kind() == Code::KEYED_LOAD_IC) {
- // ----------- S t a t e -------------
- // -- rax : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- __ Cmp(rax, masm->isolate()->factory()->prototype_string());
- receiver = rdx;
- } else {
- ASSERT(kind() == Code::LOAD_IC);
- // ----------- S t a t e -------------
- // -- rax : receiver
- // -- rcx : name
- // -- rsp[0] : return address
- // -----------------------------------
- receiver = rax;
- }
-
- StubCompiler::GenerateLoadFunctionPrototype(masm, receiver, r8, r9, &miss);
- __ bind(&miss);
- StubCompiler::GenerateLoadMiss(masm, kind());
-}
-
-
-void StringLengthStub::Generate(MacroAssembler* masm) {
- Label miss;
- Register receiver;
- if (kind() == Code::KEYED_LOAD_IC) {
- // ----------- S t a t e -------------
- // -- rax : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- __ Cmp(rax, masm->isolate()->factory()->length_string());
- receiver = rdx;
- } else {
- ASSERT(kind() == Code::LOAD_IC);
- // ----------- S t a t e -------------
- // -- rax : receiver
- // -- rcx : name
- // -- rsp[0] : return address
- // -----------------------------------
- receiver = rax;
- }
-
- StubCompiler::GenerateLoadStringLength(masm, receiver, r8, r9, &miss,
- support_wrapper_);
- __ bind(&miss);
- StubCompiler::GenerateLoadMiss(masm, kind());
-}
-
-
-void StoreArrayLengthStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- //
- // This accepts as a receiver anything JSArray::SetElementsLength accepts
- // (currently anything except for external arrays which means anything with
- // elements of FixedArray type). Value must be a number, but only smis are
- // accepted as the most common case.
-
- Label miss;
-
- Register receiver = rdx;
- Register value = rax;
- Register scratch = rbx;
- if (kind() == Code::KEYED_STORE_IC) {
- __ Cmp(rcx, masm->isolate()->factory()->length_string());
- }
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, &miss);
-
- // Check that the object is a JS array.
- __ CmpObjectType(receiver, JS_ARRAY_TYPE, scratch);
- __ j(not_equal, &miss);
-
- // Check that elements are FixedArray.
- // We rely on StoreIC_ArrayLength below to deal with all types of
- // fast elements (including COW).
- __ movq(scratch, FieldOperand(receiver, JSArray::kElementsOffset));
- __ CmpObjectType(scratch, FIXED_ARRAY_TYPE, scratch);
- __ j(not_equal, &miss);
-
- // Check that the array has fast properties, otherwise the length
- // property might have been redefined.
- __ movq(scratch, FieldOperand(receiver, JSArray::kPropertiesOffset));
- __ CompareRoot(FieldOperand(scratch, FixedArray::kMapOffset),
- Heap::kHashTableMapRootIndex);
- __ j(equal, &miss);
-
- // Check that value is a smi.
- __ JumpIfNotSmi(value, &miss);
-
- // Prepare tail call to StoreIC_ArrayLength.
- __ pop(scratch);
- __ push(receiver);
- __ push(value);
- __ push(scratch); // return address
-
- ExternalReference ref =
- ExternalReference(IC_Utility(IC::kStoreIC_ArrayLength), masm->isolate());
- __ TailCallExternalReference(ref, 2, 1);
-
- __ bind(&miss);
-
- StubCompiler::GenerateStoreMiss(masm, kind());
-}
-
-
-void LoadFieldStub::Generate(MacroAssembler* masm) {
- StubCompiler::DoGenerateFastPropertyLoad(masm, rax, reg_, inobject_, index_);
- __ ret(0);
-}
-
-
-void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
- // The key is in rdx and the parameter count is in rax.
-
- // The displacement is used for skipping the frame pointer on the
- // stack. It is the offset of the last parameter (if any) relative
- // to the frame pointer.
- static const int kDisplacement = 1 * kPointerSize;
-
- // Check that the key is a smi.
- Label slow;
- __ JumpIfNotSmi(rdx, &slow);
-
- // Check if the calling frame is an arguments adaptor frame. We look at the
- // context offset, and if the frame is not a regular one, then we find a
- // Smi instead of the context. We can't use SmiCompare here, because that
- // only works for comparing two smis.
- Label adaptor;
- __ movq(rbx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
- __ Cmp(Operand(rbx, StandardFrameConstants::kContextOffset),
- Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
- __ j(equal, &adaptor);
-
- // Check index against formal parameters count limit passed in
- // through register rax. Use unsigned comparison to get negative
- // check for free.
- __ cmpq(rdx, rax);
- __ j(above_equal, &slow);
-
- // Read the argument from the stack and return it.
- SmiIndex index = masm->SmiToIndex(rax, rax, kPointerSizeLog2);
- __ lea(rbx, Operand(rbp, index.reg, index.scale, 0));
- index = masm->SmiToNegativeIndex(rdx, rdx, kPointerSizeLog2);
- __ movq(rax, Operand(rbx, index.reg, index.scale, kDisplacement));
- __ Ret();
-
- // Arguments adaptor case: Check index against actual arguments
- // limit found in the arguments adaptor frame. Use unsigned
- // comparison to get negative check for free.
- __ bind(&adaptor);
- __ movq(rcx, Operand(rbx, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ cmpq(rdx, rcx);
- __ j(above_equal, &slow);
-
- // Read the argument from the stack and return it.
- index = masm->SmiToIndex(rax, rcx, kPointerSizeLog2);
- __ lea(rbx, Operand(rbx, index.reg, index.scale, 0));
- index = masm->SmiToNegativeIndex(rdx, rdx, kPointerSizeLog2);
- __ movq(rax, Operand(rbx, index.reg, index.scale, kDisplacement));
- __ Ret();
-
- // Slow-case: Handle non-smi or out-of-bounds access to arguments
- // by calling the runtime system.
- __ bind(&slow);
- __ pop(rbx); // Return address.
- __ push(rdx);
- __ push(rbx);
- __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
-}
-
-
-void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
- // Stack layout:
- // rsp[0] : return address
- // rsp[8] : number of parameters (tagged)
- // rsp[16] : receiver displacement
- // rsp[24] : function
- // Registers used over the whole function:
- // rbx: the mapped parameter count (untagged)
- // rax: the allocated object (tagged).
-
- Factory* factory = masm->isolate()->factory();
-
- __ SmiToInteger64(rbx, Operand(rsp, 1 * kPointerSize));
- // rbx = parameter count (untagged)
-
- // Check if the calling frame is an arguments adaptor frame.
- Label runtime;
- Label adaptor_frame, try_allocate;
- __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
- __ movq(rcx, Operand(rdx, StandardFrameConstants::kContextOffset));
- __ Cmp(rcx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
- __ j(equal, &adaptor_frame);
-
- // No adaptor, parameter count = argument count.
- __ movq(rcx, rbx);
- __ jmp(&try_allocate, Label::kNear);
-
- // We have an adaptor frame. Patch the parameters pointer.
- __ bind(&adaptor_frame);
- __ SmiToInteger64(rcx,
- Operand(rdx,
- ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ lea(rdx, Operand(rdx, rcx, times_pointer_size,
- StandardFrameConstants::kCallerSPOffset));
- __ movq(Operand(rsp, 2 * kPointerSize), rdx);
-
- // rbx = parameter count (untagged)
- // rcx = argument count (untagged)
- // Compute the mapped parameter count = min(rbx, rcx) in rbx.
- __ cmpq(rbx, rcx);
- __ j(less_equal, &try_allocate, Label::kNear);
- __ movq(rbx, rcx);
-
- __ bind(&try_allocate);
-
- // Compute the sizes of backing store, parameter map, and arguments object.
- // 1. Parameter map, has 2 extra words containing context and backing store.
- const int kParameterMapHeaderSize =
- FixedArray::kHeaderSize + 2 * kPointerSize;
- Label no_parameter_map;
- __ xor_(r8, r8);
- __ testq(rbx, rbx);
- __ j(zero, &no_parameter_map, Label::kNear);
- __ lea(r8, Operand(rbx, times_pointer_size, kParameterMapHeaderSize));
- __ bind(&no_parameter_map);
-
- // 2. Backing store.
- __ lea(r8, Operand(r8, rcx, times_pointer_size, FixedArray::kHeaderSize));
-
- // 3. Arguments object.
- __ addq(r8, Immediate(Heap::kArgumentsObjectSize));
-
- // Do the allocation of all three objects in one go.
- __ AllocateInNewSpace(r8, rax, rdx, rdi, &runtime, TAG_OBJECT);
-
- // rax = address of new object(s) (tagged)
- // rcx = argument count (untagged)
- // Get the arguments boilerplate from the current native context into rdi.
- Label has_mapped_parameters, copy;
- __ movq(rdi, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ movq(rdi, FieldOperand(rdi, GlobalObject::kNativeContextOffset));
- __ testq(rbx, rbx);
- __ j(not_zero, &has_mapped_parameters, Label::kNear);
-
- const int kIndex = Context::ARGUMENTS_BOILERPLATE_INDEX;
- __ movq(rdi, Operand(rdi, Context::SlotOffset(kIndex)));
- __ jmp(&copy, Label::kNear);
-
- const int kAliasedIndex = Context::ALIASED_ARGUMENTS_BOILERPLATE_INDEX;
- __ bind(&has_mapped_parameters);
- __ movq(rdi, Operand(rdi, Context::SlotOffset(kAliasedIndex)));
- __ bind(&copy);
-
- // rax = address of new object (tagged)
- // rbx = mapped parameter count (untagged)
- // rcx = argument count (untagged)
- // rdi = address of boilerplate object (tagged)
- // Copy the JS object part.
- for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
- __ movq(rdx, FieldOperand(rdi, i));
- __ movq(FieldOperand(rax, i), rdx);
- }
-
- // Set up the callee in-object property.
- STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
- __ movq(rdx, Operand(rsp, 3 * kPointerSize));
- __ movq(FieldOperand(rax, JSObject::kHeaderSize +
- Heap::kArgumentsCalleeIndex * kPointerSize),
- rdx);
-
- // Use the length (smi tagged) and set that as an in-object property too.
- // Note: rcx is tagged from here on.
- STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
- __ Integer32ToSmi(rcx, rcx);
- __ movq(FieldOperand(rax, JSObject::kHeaderSize +
- Heap::kArgumentsLengthIndex * kPointerSize),
- rcx);
-
- // Set up the elements pointer in the allocated arguments object.
- // If we allocated a parameter map, edi will point there, otherwise to the
- // backing store.
- __ lea(rdi, Operand(rax, Heap::kArgumentsObjectSize));
- __ movq(FieldOperand(rax, JSObject::kElementsOffset), rdi);
-
- // rax = address of new object (tagged)
- // rbx = mapped parameter count (untagged)
- // rcx = argument count (tagged)
- // rdi = address of parameter map or backing store (tagged)
-
- // Initialize parameter map. If there are no mapped arguments, we're done.
- Label skip_parameter_map;
- __ testq(rbx, rbx);
- __ j(zero, &skip_parameter_map);
-
- __ LoadRoot(kScratchRegister, Heap::kNonStrictArgumentsElementsMapRootIndex);
- // rbx contains the untagged argument count. Add 2 and tag to write.
- __ movq(FieldOperand(rdi, FixedArray::kMapOffset), kScratchRegister);
- __ Integer64PlusConstantToSmi(r9, rbx, 2);
- __ movq(FieldOperand(rdi, FixedArray::kLengthOffset), r9);
- __ movq(FieldOperand(rdi, FixedArray::kHeaderSize + 0 * kPointerSize), rsi);
- __ lea(r9, Operand(rdi, rbx, times_pointer_size, kParameterMapHeaderSize));
- __ movq(FieldOperand(rdi, FixedArray::kHeaderSize + 1 * kPointerSize), r9);
-
- // Copy the parameter slots and the holes in the arguments.
- // We need to fill in mapped_parameter_count slots. They index the context,
- // where parameters are stored in reverse order, at
- // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
- // The mapped parameter thus need to get indices
- // MIN_CONTEXT_SLOTS+parameter_count-1 ..
- // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
- // We loop from right to left.
- Label parameters_loop, parameters_test;
-
- // Load tagged parameter count into r9.
- __ Integer32ToSmi(r9, rbx);
- __ Move(r8, Smi::FromInt(Context::MIN_CONTEXT_SLOTS));
- __ addq(r8, Operand(rsp, 1 * kPointerSize));
- __ subq(r8, r9);
- __ Move(r11, factory->the_hole_value());
- __ movq(rdx, rdi);
- __ lea(rdi, Operand(rdi, rbx, times_pointer_size, kParameterMapHeaderSize));
- // r9 = loop variable (tagged)
- // r8 = mapping index (tagged)
- // r11 = the hole value
- // rdx = address of parameter map (tagged)
- // rdi = address of backing store (tagged)
- __ jmp(&parameters_test, Label::kNear);
-
- __ bind(&parameters_loop);
- __ SmiSubConstant(r9, r9, Smi::FromInt(1));
- __ SmiToInteger64(kScratchRegister, r9);
- __ movq(FieldOperand(rdx, kScratchRegister,
- times_pointer_size,
- kParameterMapHeaderSize),
- r8);
- __ movq(FieldOperand(rdi, kScratchRegister,
- times_pointer_size,
- FixedArray::kHeaderSize),
- r11);
- __ SmiAddConstant(r8, r8, Smi::FromInt(1));
- __ bind(&parameters_test);
- __ SmiTest(r9);
- __ j(not_zero, &parameters_loop, Label::kNear);
-
- __ bind(&skip_parameter_map);
-
- // rcx = argument count (tagged)
- // rdi = address of backing store (tagged)
- // Copy arguments header and remaining slots (if there are any).
- __ Move(FieldOperand(rdi, FixedArray::kMapOffset),
- factory->fixed_array_map());
- __ movq(FieldOperand(rdi, FixedArray::kLengthOffset), rcx);
-
- Label arguments_loop, arguments_test;
- __ movq(r8, rbx);
- __ movq(rdx, Operand(rsp, 2 * kPointerSize));
- // Untag rcx for the loop below.
- __ SmiToInteger64(rcx, rcx);
- __ lea(kScratchRegister, Operand(r8, times_pointer_size, 0));
- __ subq(rdx, kScratchRegister);
- __ jmp(&arguments_test, Label::kNear);
-
- __ bind(&arguments_loop);
- __ subq(rdx, Immediate(kPointerSize));
- __ movq(r9, Operand(rdx, 0));
- __ movq(FieldOperand(rdi, r8,
- times_pointer_size,
- FixedArray::kHeaderSize),
- r9);
- __ addq(r8, Immediate(1));
-
- __ bind(&arguments_test);
- __ cmpq(r8, rcx);
- __ j(less, &arguments_loop, Label::kNear);
-
- // Return and remove the on-stack parameters.
- __ ret(3 * kPointerSize);
-
- // Do the runtime call to allocate the arguments object.
- // rcx = argument count (untagged)
- __ bind(&runtime);
- __ Integer32ToSmi(rcx, rcx);
- __ movq(Operand(rsp, 1 * kPointerSize), rcx); // Patch argument count.
- __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
-}
-
-
-void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) {
- // esp[0] : return address
- // esp[8] : number of parameters
- // esp[16] : receiver displacement
- // esp[24] : function
-
- // Check if the calling frame is an arguments adaptor frame.
- Label runtime;
- __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
- __ movq(rcx, Operand(rdx, StandardFrameConstants::kContextOffset));
- __ Cmp(rcx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
- __ j(not_equal, &runtime);
-
- // Patch the arguments.length and the parameters pointer.
- __ movq(rcx, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ movq(Operand(rsp, 1 * kPointerSize), rcx);
- __ SmiToInteger64(rcx, rcx);
- __ lea(rdx, Operand(rdx, rcx, times_pointer_size,
- StandardFrameConstants::kCallerSPOffset));
- __ movq(Operand(rsp, 2 * kPointerSize), rdx);
-
- __ bind(&runtime);
- __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
-}
-
-
-void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
- // rsp[0] : return address
- // rsp[8] : number of parameters
- // rsp[16] : receiver displacement
- // rsp[24] : function
-
- // Check if the calling frame is an arguments adaptor frame.
- Label adaptor_frame, try_allocate, runtime;
- __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
- __ movq(rcx, Operand(rdx, StandardFrameConstants::kContextOffset));
- __ Cmp(rcx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
- __ j(equal, &adaptor_frame);
-
- // Get the length from the frame.
- __ movq(rcx, Operand(rsp, 1 * kPointerSize));
- __ SmiToInteger64(rcx, rcx);
- __ jmp(&try_allocate);
-
- // Patch the arguments.length and the parameters pointer.
- __ bind(&adaptor_frame);
- __ movq(rcx, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ movq(Operand(rsp, 1 * kPointerSize), rcx);
- __ SmiToInteger64(rcx, rcx);
- __ lea(rdx, Operand(rdx, rcx, times_pointer_size,
- StandardFrameConstants::kCallerSPOffset));
- __ movq(Operand(rsp, 2 * kPointerSize), rdx);
-
- // Try the new space allocation. Start out with computing the size of
- // the arguments object and the elements array.
- Label add_arguments_object;
- __ bind(&try_allocate);
- __ testq(rcx, rcx);
- __ j(zero, &add_arguments_object, Label::kNear);
- __ lea(rcx, Operand(rcx, times_pointer_size, FixedArray::kHeaderSize));
- __ bind(&add_arguments_object);
- __ addq(rcx, Immediate(Heap::kArgumentsObjectSizeStrict));
-
- // Do the allocation of both objects in one go.
- __ AllocateInNewSpace(rcx, rax, rdx, rbx, &runtime, TAG_OBJECT);
-
- // Get the arguments boilerplate from the current native context.
- __ movq(rdi, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ movq(rdi, FieldOperand(rdi, GlobalObject::kNativeContextOffset));
- const int offset =
- Context::SlotOffset(Context::STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX);
- __ movq(rdi, Operand(rdi, offset));
-
- // Copy the JS object part.
- for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
- __ movq(rbx, FieldOperand(rdi, i));
- __ movq(FieldOperand(rax, i), rbx);
- }
-
- // Get the length (smi tagged) and set that as an in-object property too.
- STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
- __ movq(rcx, Operand(rsp, 1 * kPointerSize));
- __ movq(FieldOperand(rax, JSObject::kHeaderSize +
- Heap::kArgumentsLengthIndex * kPointerSize),
- rcx);
-
- // If there are no actual arguments, we're done.
- Label done;
- __ testq(rcx, rcx);
- __ j(zero, &done);
-
- // Get the parameters pointer from the stack.
- __ movq(rdx, Operand(rsp, 2 * kPointerSize));
-
- // Set up the elements pointer in the allocated arguments object and
- // initialize the header in the elements fixed array.
- __ lea(rdi, Operand(rax, Heap::kArgumentsObjectSizeStrict));
- __ movq(FieldOperand(rax, JSObject::kElementsOffset), rdi);
- __ LoadRoot(kScratchRegister, Heap::kFixedArrayMapRootIndex);
- __ movq(FieldOperand(rdi, FixedArray::kMapOffset), kScratchRegister);
-
-
- __ movq(FieldOperand(rdi, FixedArray::kLengthOffset), rcx);
- // Untag the length for the loop below.
- __ SmiToInteger64(rcx, rcx);
-
- // Copy the fixed array slots.
- Label loop;
- __ bind(&loop);
- __ movq(rbx, Operand(rdx, -1 * kPointerSize)); // Skip receiver.
- __ movq(FieldOperand(rdi, FixedArray::kHeaderSize), rbx);
- __ addq(rdi, Immediate(kPointerSize));
- __ subq(rdx, Immediate(kPointerSize));
- __ decq(rcx);
- __ j(not_zero, &loop);
-
- // Return and remove the on-stack parameters.
- __ bind(&done);
- __ ret(3 * kPointerSize);
-
- // Do the runtime call to allocate the arguments object.
- __ bind(&runtime);
- __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1);
-}
-
-
-void RegExpExecStub::Generate(MacroAssembler* masm) {
- // Just jump directly to runtime if native RegExp is not selected at compile
- // time or if regexp entry in generated code is turned off runtime switch or
- // at compilation.
-#ifdef V8_INTERPRETED_REGEXP
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
-#else // V8_INTERPRETED_REGEXP
-
- // Stack frame on entry.
- // rsp[0]: return address
- // rsp[8]: last_match_info (expected JSArray)
- // rsp[16]: previous index
- // rsp[24]: subject string
- // rsp[32]: JSRegExp object
-
- static const int kLastMatchInfoOffset = 1 * kPointerSize;
- static const int kPreviousIndexOffset = 2 * kPointerSize;
- static const int kSubjectOffset = 3 * kPointerSize;
- static const int kJSRegExpOffset = 4 * kPointerSize;
-
- Label runtime;
- // Ensure that a RegExp stack is allocated.
- Isolate* isolate = masm->isolate();
- ExternalReference address_of_regexp_stack_memory_address =
- ExternalReference::address_of_regexp_stack_memory_address(isolate);
- ExternalReference address_of_regexp_stack_memory_size =
- ExternalReference::address_of_regexp_stack_memory_size(isolate);
- __ Load(kScratchRegister, address_of_regexp_stack_memory_size);
- __ testq(kScratchRegister, kScratchRegister);
- __ j(zero, &runtime);
-
- // Check that the first argument is a JSRegExp object.
- __ movq(rax, Operand(rsp, kJSRegExpOffset));
- __ JumpIfSmi(rax, &runtime);
- __ CmpObjectType(rax, JS_REGEXP_TYPE, kScratchRegister);
- __ j(not_equal, &runtime);
-
- // Check that the RegExp has been compiled (data contains a fixed array).
- __ movq(rax, FieldOperand(rax, JSRegExp::kDataOffset));
- if (FLAG_debug_code) {
- Condition is_smi = masm->CheckSmi(rax);
- __ Check(NegateCondition(is_smi),
- "Unexpected type for RegExp data, FixedArray expected");
- __ CmpObjectType(rax, FIXED_ARRAY_TYPE, kScratchRegister);
- __ Check(equal, "Unexpected type for RegExp data, FixedArray expected");
- }
-
- // rax: RegExp data (FixedArray)
- // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
- __ SmiToInteger32(rbx, FieldOperand(rax, JSRegExp::kDataTagOffset));
- __ cmpl(rbx, Immediate(JSRegExp::IRREGEXP));
- __ j(not_equal, &runtime);
-
- // rax: RegExp data (FixedArray)
- // Check that the number of captures fit in the static offsets vector buffer.
- __ SmiToInteger32(rdx,
- FieldOperand(rax, JSRegExp::kIrregexpCaptureCountOffset));
- // Check (number_of_captures + 1) * 2 <= offsets vector size
- // Or number_of_captures <= offsets vector size / 2 - 1
- STATIC_ASSERT(Isolate::kJSRegexpStaticOffsetsVectorSize >= 2);
- __ cmpl(rdx, Immediate(Isolate::kJSRegexpStaticOffsetsVectorSize / 2 - 1));
- __ j(above, &runtime);
-
- // Reset offset for possibly sliced string.
- __ Set(r14, 0);
- __ movq(rdi, Operand(rsp, kSubjectOffset));
- __ JumpIfSmi(rdi, &runtime);
- __ movq(r15, rdi); // Make a copy of the original subject string.
- __ movq(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
- __ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
- // rax: RegExp data (FixedArray)
- // rdi: subject string
- // r15: subject string
- // Handle subject string according to its encoding and representation:
- // (1) Sequential two byte? If yes, go to (9).
- // (2) Sequential one byte? If yes, go to (6).
- // (3) Anything but sequential or cons? If yes, go to (7).
- // (4) Cons string. If the string is flat, replace subject with first string.
- // Otherwise bailout.
- // (5a) Is subject sequential two byte? If yes, go to (9).
- // (5b) Is subject external? If yes, go to (8).
- // (6) One byte sequential. Load regexp code for one byte.
- // (E) Carry on.
- /// [...]
-
- // Deferred code at the end of the stub:
- // (7) Not a long external string? If yes, go to (10).
- // (8) External string. Make it, offset-wise, look like a sequential string.
- // (8a) Is the external string one byte? If yes, go to (6).
- // (9) Two byte sequential. Load regexp code for one byte. Go to (E).
- // (10) Short external string or not a string? If yes, bail out to runtime.
- // (11) Sliced string. Replace subject with parent. Go to (5a).
-
- Label seq_one_byte_string /* 6 */, seq_two_byte_string /* 9 */,
- external_string /* 8 */, check_underlying /* 5a */,
- not_seq_nor_cons /* 7 */, check_code /* E */,
- not_long_external /* 10 */;
-
- // (1) Sequential two byte? If yes, go to (9).
- __ andb(rbx, Immediate(kIsNotStringMask |
- kStringRepresentationMask |
- kStringEncodingMask |
- kShortExternalStringMask));
- STATIC_ASSERT((kStringTag | kSeqStringTag | kTwoByteStringTag) == 0);
- __ j(zero, &seq_two_byte_string); // Go to (9).
-
- // (2) Sequential one byte? If yes, go to (6).
- // Any other sequential string must be one byte.
- __ andb(rbx, Immediate(kIsNotStringMask |
- kStringRepresentationMask |
- kShortExternalStringMask));
- __ j(zero, &seq_one_byte_string, Label::kNear); // Go to (6).
-
- // (3) Anything but sequential or cons? If yes, go to (7).
- // We check whether the subject string is a cons, since sequential strings
- // have already been covered.
- STATIC_ASSERT(kConsStringTag < kExternalStringTag);
- STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
- STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
- STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
- __ cmpq(rbx, Immediate(kExternalStringTag));
- __ j(greater_equal, &not_seq_nor_cons); // Go to (7).
-
- // (4) Cons string. Check that it's flat.
- // Replace subject with first string and reload instance type.
- __ CompareRoot(FieldOperand(rdi, ConsString::kSecondOffset),
- Heap::kempty_stringRootIndex);
- __ j(not_equal, &runtime);
- __ movq(rdi, FieldOperand(rdi, ConsString::kFirstOffset));
- __ bind(&check_underlying);
- __ movq(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
- __ movq(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
-
- // (5a) Is subject sequential two byte? If yes, go to (9).
- __ testb(rbx, Immediate(kStringRepresentationMask | kStringEncodingMask));
- STATIC_ASSERT((kSeqStringTag | kTwoByteStringTag) == 0);
- __ j(zero, &seq_two_byte_string); // Go to (9).
- // (5b) Is subject external? If yes, go to (8).
- __ testb(rbx, Immediate(kStringRepresentationMask));
- // The underlying external string is never a short external string.
- STATIC_CHECK(ExternalString::kMaxShortLength < ConsString::kMinLength);
- STATIC_CHECK(ExternalString::kMaxShortLength < SlicedString::kMinLength);
- __ j(not_zero, &external_string); // Go to (8)
-
- // (6) One byte sequential. Load regexp code for one byte.
- __ bind(&seq_one_byte_string);
- // rax: RegExp data (FixedArray)
- __ movq(r11, FieldOperand(rax, JSRegExp::kDataAsciiCodeOffset));
- __ Set(rcx, 1); // Type is one byte.
-
- // (E) Carry on. String handling is done.
- __ bind(&check_code);
- // r11: irregexp code
- // Check that the irregexp code has been generated for the actual string
- // encoding. If it has, the field contains a code object otherwise it contains
- // smi (code flushing support)
- __ JumpIfSmi(r11, &runtime);
-
- // rdi: sequential subject string (or look-alike, external string)
- // r15: original subject string
- // rcx: encoding of subject string (1 if ASCII, 0 if two_byte);
- // r11: code
- // Load used arguments before starting to push arguments for call to native
- // RegExp code to avoid handling changing stack height.
- // We have to use r15 instead of rdi to load the length because rdi might
- // have been only made to look like a sequential string when it actually
- // is an external string.
- __ movq(rbx, Operand(rsp, kPreviousIndexOffset));
- __ JumpIfNotSmi(rbx, &runtime);
- __ SmiCompare(rbx, FieldOperand(r15, String::kLengthOffset));
- __ j(above_equal, &runtime);
- __ SmiToInteger64(rbx, rbx);
-
- // rdi: subject string
- // rbx: previous index
- // rcx: encoding of subject string (1 if ASCII 0 if two_byte);
- // r11: code
- // All checks done. Now push arguments for native regexp code.
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->regexp_entry_native(), 1);
-
- // Isolates: note we add an additional parameter here (isolate pointer).
- static const int kRegExpExecuteArguments = 9;
- int argument_slots_on_stack =
- masm->ArgumentStackSlotsForCFunctionCall(kRegExpExecuteArguments);
- __ EnterApiExitFrame(argument_slots_on_stack);
-
- // Argument 9: Pass current isolate address.
- // __ movq(Operand(rsp, (argument_slots_on_stack - 1) * kPointerSize),
- // Immediate(ExternalReference::isolate_address()));
- __ LoadAddress(kScratchRegister, ExternalReference::isolate_address());
- __ movq(Operand(rsp, (argument_slots_on_stack - 1) * kPointerSize),
- kScratchRegister);
-
- // Argument 8: Indicate that this is a direct call from JavaScript.
- __ movq(Operand(rsp, (argument_slots_on_stack - 2) * kPointerSize),
- Immediate(1));
-
- // Argument 7: Start (high end) of backtracking stack memory area.
- __ movq(kScratchRegister, address_of_regexp_stack_memory_address);
- __ movq(r9, Operand(kScratchRegister, 0));
- __ movq(kScratchRegister, address_of_regexp_stack_memory_size);
- __ addq(r9, Operand(kScratchRegister, 0));
- __ movq(Operand(rsp, (argument_slots_on_stack - 3) * kPointerSize), r9);
-
- // Argument 6: Set the number of capture registers to zero to force global
- // regexps to behave as non-global. This does not affect non-global regexps.
- // Argument 6 is passed in r9 on Linux and on the stack on Windows.
-#ifdef _WIN64
- __ movq(Operand(rsp, (argument_slots_on_stack - 4) * kPointerSize),
- Immediate(0));
-#else
- __ Set(r9, 0);
-#endif
-
- // Argument 5: static offsets vector buffer.
- __ LoadAddress(r8,
- ExternalReference::address_of_static_offsets_vector(isolate));
- // Argument 5 passed in r8 on Linux and on the stack on Windows.
-#ifdef _WIN64
- __ movq(Operand(rsp, (argument_slots_on_stack - 5) * kPointerSize), r8);
-#endif
-
- // First four arguments are passed in registers on both Linux and Windows.
-#ifdef _WIN64
- Register arg4 = r9;
- Register arg3 = r8;
- Register arg2 = rdx;
- Register arg1 = rcx;
-#else
- Register arg4 = rcx;
- Register arg3 = rdx;
- Register arg2 = rsi;
- Register arg1 = rdi;
-#endif
-
- // Keep track on aliasing between argX defined above and the registers used.
- // rdi: subject string
- // rbx: previous index
- // rcx: encoding of subject string (1 if ASCII 0 if two_byte);
- // r11: code
- // r14: slice offset
- // r15: original subject string
-
- // Argument 2: Previous index.
- __ movq(arg2, rbx);
-
- // Argument 4: End of string data
- // Argument 3: Start of string data
- Label setup_two_byte, setup_rest, got_length, length_not_from_slice;
- // Prepare start and end index of the input.
- // Load the length from the original sliced string if that is the case.
- __ addq(rbx, r14);
- __ SmiToInteger32(arg3, FieldOperand(r15, String::kLengthOffset));
- __ addq(r14, arg3); // Using arg3 as scratch.
-
- // rbx: start index of the input
- // r14: end index of the input
- // r15: original subject string
- __ testb(rcx, rcx); // Last use of rcx as encoding of subject string.
- __ j(zero, &setup_two_byte, Label::kNear);
- __ lea(arg4, FieldOperand(rdi, r14, times_1, SeqOneByteString::kHeaderSize));
- __ lea(arg3, FieldOperand(rdi, rbx, times_1, SeqOneByteString::kHeaderSize));
- __ jmp(&setup_rest, Label::kNear);
- __ bind(&setup_two_byte);
- __ lea(arg4, FieldOperand(rdi, r14, times_2, SeqTwoByteString::kHeaderSize));
- __ lea(arg3, FieldOperand(rdi, rbx, times_2, SeqTwoByteString::kHeaderSize));
- __ bind(&setup_rest);
-
- // Argument 1: Original subject string.
- // The original subject is in the previous stack frame. Therefore we have to
- // use rbp, which points exactly to one pointer size below the previous rsp.
- // (Because creating a new stack frame pushes the previous rbp onto the stack
- // and thereby moves up rsp by one kPointerSize.)
- __ movq(arg1, r15);
-
- // Locate the code entry and call it.
- __ addq(r11, Immediate(Code::kHeaderSize - kHeapObjectTag));
- __ call(r11);
-
- __ LeaveApiExitFrame();
-
- // Check the result.
- Label success;
- Label exception;
- __ cmpl(rax, Immediate(1));
- // We expect exactly one result since we force the called regexp to behave
- // as non-global.
- __ j(equal, &success, Label::kNear);
- __ cmpl(rax, Immediate(NativeRegExpMacroAssembler::EXCEPTION));
- __ j(equal, &exception);
- __ cmpl(rax, Immediate(NativeRegExpMacroAssembler::FAILURE));
- // If none of the above, it can only be retry.
- // Handle that in the runtime system.
- __ j(not_equal, &runtime);
-
- // For failure return null.
- __ LoadRoot(rax, Heap::kNullValueRootIndex);
- __ ret(4 * kPointerSize);
-
- // Load RegExp data.
- __ bind(&success);
- __ movq(rax, Operand(rsp, kJSRegExpOffset));
- __ movq(rcx, FieldOperand(rax, JSRegExp::kDataOffset));
- __ SmiToInteger32(rax,
- FieldOperand(rcx, JSRegExp::kIrregexpCaptureCountOffset));
- // Calculate number of capture registers (number_of_captures + 1) * 2.
- __ leal(rdx, Operand(rax, rax, times_1, 2));
-
- // rdx: Number of capture registers
- // Check that the fourth object is a JSArray object.
- __ movq(r15, Operand(rsp, kLastMatchInfoOffset));
- __ JumpIfSmi(r15, &runtime);
- __ CmpObjectType(r15, JS_ARRAY_TYPE, kScratchRegister);
- __ j(not_equal, &runtime);
- // Check that the JSArray is in fast case.
- __ movq(rbx, FieldOperand(r15, JSArray::kElementsOffset));
- __ movq(rax, FieldOperand(rbx, HeapObject::kMapOffset));
- __ CompareRoot(rax, Heap::kFixedArrayMapRootIndex);
- __ j(not_equal, &runtime);
- // Check that the last match info has space for the capture registers and the
- // additional information. Ensure no overflow in add.
- STATIC_ASSERT(FixedArray::kMaxLength < kMaxInt - FixedArray::kLengthOffset);
- __ SmiToInteger32(rax, FieldOperand(rbx, FixedArray::kLengthOffset));
- __ subl(rax, Immediate(RegExpImpl::kLastMatchOverhead));
- __ cmpl(rdx, rax);
- __ j(greater, &runtime);
-
- // rbx: last_match_info backing store (FixedArray)
- // rdx: number of capture registers
- // Store the capture count.
- __ Integer32ToSmi(kScratchRegister, rdx);
- __ movq(FieldOperand(rbx, RegExpImpl::kLastCaptureCountOffset),
- kScratchRegister);
- // Store last subject and last input.
- __ movq(rax, Operand(rsp, kSubjectOffset));
- __ movq(FieldOperand(rbx, RegExpImpl::kLastSubjectOffset), rax);
- __ movq(rcx, rax);
- __ RecordWriteField(rbx,
- RegExpImpl::kLastSubjectOffset,
- rax,
- rdi,
- kDontSaveFPRegs);
- __ movq(rax, rcx);
- __ movq(FieldOperand(rbx, RegExpImpl::kLastInputOffset), rax);
- __ RecordWriteField(rbx,
- RegExpImpl::kLastInputOffset,
- rax,
- rdi,
- kDontSaveFPRegs);
-
- // Get the static offsets vector filled by the native regexp code.
- __ LoadAddress(rcx,
- ExternalReference::address_of_static_offsets_vector(isolate));
-
- // rbx: last_match_info backing store (FixedArray)
- // rcx: offsets vector
- // rdx: number of capture registers
- Label next_capture, done;
- // Capture register counter starts from number of capture registers and
- // counts down until wraping after zero.
- __ bind(&next_capture);
- __ subq(rdx, Immediate(1));
- __ j(negative, &done, Label::kNear);
- // Read the value from the static offsets vector buffer and make it a smi.
- __ movl(rdi, Operand(rcx, rdx, times_int_size, 0));
- __ Integer32ToSmi(rdi, rdi);
- // Store the smi value in the last match info.
- __ movq(FieldOperand(rbx,
- rdx,
- times_pointer_size,
- RegExpImpl::kFirstCaptureOffset),
- rdi);
- __ jmp(&next_capture);
- __ bind(&done);
-
- // Return last match info.
- __ movq(rax, r15);
- __ ret(4 * kPointerSize);
-
- __ bind(&exception);
- // Result must now be exception. If there is no pending exception already a
- // stack overflow (on the backtrack stack) was detected in RegExp code but
- // haven't created the exception yet. Handle that in the runtime system.
- // TODO(592): Rerunning the RegExp to get the stack overflow exception.
- ExternalReference pending_exception_address(
- Isolate::kPendingExceptionAddress, isolate);
- Operand pending_exception_operand =
- masm->ExternalOperand(pending_exception_address, rbx);
- __ movq(rax, pending_exception_operand);
- __ LoadRoot(rdx, Heap::kTheHoleValueRootIndex);
- __ cmpq(rax, rdx);
- __ j(equal, &runtime);
- __ movq(pending_exception_operand, rdx);
-
- __ CompareRoot(rax, Heap::kTerminationExceptionRootIndex);
- Label termination_exception;
- __ j(equal, &termination_exception, Label::kNear);
- __ Throw(rax);
-
- __ bind(&termination_exception);
- __ ThrowUncatchable(rax);
-
- // Do the runtime call to execute the regexp.
- __ bind(&runtime);
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
-
- // Deferred code for string handling.
- // (7) Not a long external string? If yes, go to (10).
- __ bind(&not_seq_nor_cons);
- // Compare flags are still set from (3).
- __ j(greater, &not_long_external, Label::kNear); // Go to (10).
-
- // (8) External string. Short external strings have been ruled out.
- __ bind(&external_string);
- __ movq(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
- __ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
- if (FLAG_debug_code) {
- // Assert that we do not have a cons or slice (indirect strings) here.
- // Sequential strings have already been ruled out.
- __ testb(rbx, Immediate(kIsIndirectStringMask));
- __ Assert(zero, "external string expected, but not found");
- }
- __ movq(rdi, FieldOperand(rdi, ExternalString::kResourceDataOffset));
- // Move the pointer so that offset-wise, it looks like a sequential string.
- STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
- __ subq(rdi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
- STATIC_ASSERT(kTwoByteStringTag == 0);
- // (8a) Is the external string one byte? If yes, go to (6).
- __ testb(rbx, Immediate(kStringEncodingMask));
- __ j(not_zero, &seq_one_byte_string); // Goto (6).
-
- // rdi: subject string (flat two-byte)
- // rax: RegExp data (FixedArray)
- // (9) Two byte sequential. Load regexp code for one byte. Go to (E).
- __ bind(&seq_two_byte_string);
- __ movq(r11, FieldOperand(rax, JSRegExp::kDataUC16CodeOffset));
- __ Set(rcx, 0); // Type is two byte.
- __ jmp(&check_code); // Go to (E).
-
- // (10) Not a string or a short external string? If yes, bail out to runtime.
- __ bind(&not_long_external);
- // Catch non-string subject or short external string.
- STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0);
- __ testb(rbx, Immediate(kIsNotStringMask | kShortExternalStringMask));
- __ j(not_zero, &runtime);
-
- // (11) Sliced string. Replace subject with parent. Go to (5a).
- // Load offset into r14 and replace subject string with parent.
- __ SmiToInteger32(r14, FieldOperand(rdi, SlicedString::kOffsetOffset));
- __ movq(rdi, FieldOperand(rdi, SlicedString::kParentOffset));
- __ jmp(&check_underlying);
-#endif // V8_INTERPRETED_REGEXP
-}
-
-
-void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
- const int kMaxInlineLength = 100;
- Label slowcase;
- Label done;
- __ movq(r8, Operand(rsp, kPointerSize * 3));
- __ JumpIfNotSmi(r8, &slowcase);
- __ SmiToInteger32(rbx, r8);
- __ cmpl(rbx, Immediate(kMaxInlineLength));
- __ j(above, &slowcase);
- // Smi-tagging is equivalent to multiplying by 2.
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize == 1);
- // Allocate RegExpResult followed by FixedArray with size in rbx.
- // JSArray: [Map][empty properties][Elements][Length-smi][index][input]
- // Elements: [Map][Length][..elements..]
- __ AllocateInNewSpace(JSRegExpResult::kSize + FixedArray::kHeaderSize,
- times_pointer_size,
- rbx, // In: Number of elements.
- rax, // Out: Start of allocation (tagged).
- rcx, // Out: End of allocation.
- rdx, // Scratch register
- &slowcase,
- TAG_OBJECT);
- // rax: Start of allocated area, object-tagged.
- // rbx: Number of array elements as int32.
- // r8: Number of array elements as smi.
-
- // Set JSArray map to global.regexp_result_map().
- __ movq(rdx, ContextOperand(rsi, Context::GLOBAL_OBJECT_INDEX));
- __ movq(rdx, FieldOperand(rdx, GlobalObject::kNativeContextOffset));
- __ movq(rdx, ContextOperand(rdx, Context::REGEXP_RESULT_MAP_INDEX));
- __ movq(FieldOperand(rax, HeapObject::kMapOffset), rdx);
-
- // Set empty properties FixedArray.
- __ LoadRoot(kScratchRegister, Heap::kEmptyFixedArrayRootIndex);
- __ movq(FieldOperand(rax, JSObject::kPropertiesOffset), kScratchRegister);
-
- // Set elements to point to FixedArray allocated right after the JSArray.
- __ lea(rcx, Operand(rax, JSRegExpResult::kSize));
- __ movq(FieldOperand(rax, JSObject::kElementsOffset), rcx);
-
- // Set input, index and length fields from arguments.
- __ movq(r8, Operand(rsp, kPointerSize * 1));
- __ movq(FieldOperand(rax, JSRegExpResult::kInputOffset), r8);
- __ movq(r8, Operand(rsp, kPointerSize * 2));
- __ movq(FieldOperand(rax, JSRegExpResult::kIndexOffset), r8);
- __ movq(r8, Operand(rsp, kPointerSize * 3));
- __ movq(FieldOperand(rax, JSArray::kLengthOffset), r8);
-
- // Fill out the elements FixedArray.
- // rax: JSArray.
- // rcx: FixedArray.
- // rbx: Number of elements in array as int32.
-
- // Set map.
- __ LoadRoot(kScratchRegister, Heap::kFixedArrayMapRootIndex);
- __ movq(FieldOperand(rcx, HeapObject::kMapOffset), kScratchRegister);
- // Set length.
- __ Integer32ToSmi(rdx, rbx);
- __ movq(FieldOperand(rcx, FixedArray::kLengthOffset), rdx);
- // Fill contents of fixed-array with undefined.
- __ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
- __ lea(rcx, FieldOperand(rcx, FixedArray::kHeaderSize));
- // Fill fixed array elements with undefined.
- // rax: JSArray.
- // rbx: Number of elements in array that remains to be filled, as int32.
- // rcx: Start of elements in FixedArray.
- // rdx: undefined.
- Label loop;
- __ testl(rbx, rbx);
- __ bind(&loop);
- __ j(less_equal, &done); // Jump if rcx is negative or zero.
- __ subl(rbx, Immediate(1));
- __ movq(Operand(rcx, rbx, times_pointer_size, 0), rdx);
- __ jmp(&loop);
-
- __ bind(&done);
- __ ret(3 * kPointerSize);
-
- __ bind(&slowcase);
- __ TailCallRuntime(Runtime::kRegExpConstructResult, 3, 1);
-}
-
-
-void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
- Register object,
- Register result,
- Register scratch1,
- Register scratch2,
- bool object_is_smi,
- Label* not_found) {
- // Use of registers. Register result is used as a temporary.
- Register number_string_cache = result;
- Register mask = scratch1;
- Register scratch = scratch2;
-
- // Load the number string cache.
- __ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
-
- // Make the hash mask from the length of the number string cache. It
- // contains two elements (number and string) for each cache entry.
- __ SmiToInteger32(
- mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
- __ shrl(mask, Immediate(1));
- __ subq(mask, Immediate(1)); // Make mask.
-
- // Calculate the entry in the number string cache. The hash value in the
- // number string cache for smis is just the smi value, and the hash for
- // doubles is the xor of the upper and lower words. See
- // Heap::GetNumberStringCache.
- Label is_smi;
- Label load_result_from_cache;
- Factory* factory = masm->isolate()->factory();
- if (!object_is_smi) {
- __ JumpIfSmi(object, &is_smi);
- __ CheckMap(object,
- factory->heap_number_map(),
- not_found,
- DONT_DO_SMI_CHECK);
-
- STATIC_ASSERT(8 == kDoubleSize);
- __ movl(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
- __ xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset));
- GenerateConvertHashCodeToIndex(masm, scratch, mask);
-
- Register index = scratch;
- Register probe = mask;
- __ movq(probe,
- FieldOperand(number_string_cache,
- index,
- times_1,
- FixedArray::kHeaderSize));
- __ JumpIfSmi(probe, not_found);
- __ movsd(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
- __ movsd(xmm1, FieldOperand(probe, HeapNumber::kValueOffset));
- __ ucomisd(xmm0, xmm1);
- __ j(parity_even, not_found); // Bail out if NaN is involved.
- __ j(not_equal, not_found); // The cache did not contain this value.
- __ jmp(&load_result_from_cache);
- }
-
- __ bind(&is_smi);
- __ SmiToInteger32(scratch, object);
- GenerateConvertHashCodeToIndex(masm, scratch, mask);
-
- Register index = scratch;
- // Check if the entry is the smi we are looking for.
- __ cmpq(object,
- FieldOperand(number_string_cache,
- index,
- times_1,
- FixedArray::kHeaderSize));
- __ j(not_equal, not_found);
-
- // Get the result from the cache.
- __ bind(&load_result_from_cache);
- __ movq(result,
- FieldOperand(number_string_cache,
- index,
- times_1,
- FixedArray::kHeaderSize + kPointerSize));
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->number_to_string_native(), 1);
-}
-
-
-void NumberToStringStub::GenerateConvertHashCodeToIndex(MacroAssembler* masm,
- Register hash,
- Register mask) {
- __ and_(hash, mask);
- // Each entry in string cache consists of two pointer sized fields,
- // but times_twice_pointer_size (multiplication by 16) scale factor
- // is not supported by addrmode on x64 platform.
- // So we have to premultiply entry index before lookup.
- __ shl(hash, Immediate(kPointerSizeLog2 + 1));
-}
-
-
-void NumberToStringStub::Generate(MacroAssembler* masm) {
- Label runtime;
-
- __ movq(rbx, Operand(rsp, kPointerSize));
-
- // Generate code to lookup number in the number string cache.
- GenerateLookupNumberStringCache(masm, rbx, rax, r8, r9, false, &runtime);
- __ ret(1 * kPointerSize);
-
- __ bind(&runtime);
- // Handle number to string in the runtime system if not found in the cache.
- __ TailCallRuntime(Runtime::kNumberToStringSkipCache, 1, 1);
-}
-
-
-static int NegativeComparisonResult(Condition cc) {
- ASSERT(cc != equal);
- ASSERT((cc == less) || (cc == less_equal)
- || (cc == greater) || (cc == greater_equal));
- return (cc == greater || cc == greater_equal) ? LESS : GREATER;
-}
-
-
-static void CheckInputType(MacroAssembler* masm,
- Register input,
- CompareIC::State expected,
- Label* fail) {
- Label ok;
- if (expected == CompareIC::SMI) {
- __ JumpIfNotSmi(input, fail);
- } else if (expected == CompareIC::NUMBER) {
- __ JumpIfSmi(input, &ok);
- __ CompareMap(input, masm->isolate()->factory()->heap_number_map(), NULL);
- __ j(not_equal, fail);
- }
- // We could be strict about internalized/non-internalized here, but as long as
- // hydrogen doesn't care, the stub doesn't have to care either.
- __ bind(&ok);
-}
-
-
-static void BranchIfNotInternalizedString(MacroAssembler* masm,
- Label* label,
- Register object,
- Register scratch) {
- __ JumpIfSmi(object, label);
- __ movq(scratch, FieldOperand(object, HeapObject::kMapOffset));
- __ movzxbq(scratch,
- FieldOperand(scratch, Map::kInstanceTypeOffset));
- // Ensure that no non-strings have the internalized bit set.
- STATIC_ASSERT(LAST_TYPE < kNotStringTag + kIsInternalizedMask);
- STATIC_ASSERT(kInternalizedTag != 0);
- __ testb(scratch, Immediate(kIsInternalizedMask));
- __ j(zero, label);
-}
-
-
-void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
- Label check_unequal_objects, done;
- Condition cc = GetCondition();
- Factory* factory = masm->isolate()->factory();
-
- Label miss;
- CheckInputType(masm, rdx, left_, &miss);
- CheckInputType(masm, rax, right_, &miss);
-
- // Compare two smis.
- Label non_smi, smi_done;
- __ JumpIfNotBothSmi(rax, rdx, &non_smi);
- __ subq(rdx, rax);
- __ j(no_overflow, &smi_done);
- __ not_(rdx); // Correct sign in case of overflow. rdx cannot be 0 here.
- __ bind(&smi_done);
- __ movq(rax, rdx);
- __ ret(0);
- __ bind(&non_smi);
-
- // The compare stub returns a positive, negative, or zero 64-bit integer
- // value in rax, corresponding to result of comparing the two inputs.
- // NOTICE! This code is only reached after a smi-fast-case check, so
- // it is certain that at least one operand isn't a smi.
-
- {
- Label not_user_equal, user_equal;
- __ JumpIfSmi(rax, &not_user_equal);
- __ JumpIfSmi(rdx, &not_user_equal);
-
- __ CmpObjectType(rax, JS_OBJECT_TYPE, rbx);
- __ j(not_equal, &not_user_equal);
-
- __ CmpObjectType(rdx, JS_OBJECT_TYPE, rcx);
- __ j(not_equal, &not_user_equal);
-
- __ testb(FieldOperand(rbx, Map::kBitField2Offset),
- Immediate(1 << Map::kUseUserObjectComparison));
- __ j(not_zero, &user_equal);
- __ testb(FieldOperand(rcx, Map::kBitField2Offset),
- Immediate(1 << Map::kUseUserObjectComparison));
- __ j(not_zero, &user_equal);
-
- __ jmp(&not_user_equal);
-
- __ bind(&user_equal);
-
- __ pop(rbx); // Return address.
- __ push(rax);
- __ push(rdx);
- __ push(rbx);
- __ TailCallRuntime(Runtime::kUserObjectEquals, 2, 1);
-
- __ bind(&not_user_equal);
- }
-
- // Two identical objects are equal unless they are both NaN or undefined.
- {
- Label not_identical;
- __ cmpq(rax, rdx);
- __ j(not_equal, &not_identical, Label::kNear);
-
- if (cc != equal) {
- // Check for undefined. undefined OP undefined is false even though
- // undefined == undefined.
- Label check_for_nan;
- __ CompareRoot(rdx, Heap::kUndefinedValueRootIndex);
- __ j(not_equal, &check_for_nan, Label::kNear);
- __ Set(rax, NegativeComparisonResult(cc));
- __ ret(0);
- __ bind(&check_for_nan);
- }
-
- // Test for NaN. Sadly, we can't just compare to FACTORY->nan_value(),
- // so we do the second best thing - test it ourselves.
- Label heap_number;
- // If it's not a heap number, then return equal for (in)equality operator.
- __ Cmp(FieldOperand(rdx, HeapObject::kMapOffset),
- factory->heap_number_map());
- __ j(equal, &heap_number, Label::kNear);
- if (cc != equal) {
- // Call runtime on identical objects. Otherwise return equal.
- __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rcx);
- __ j(above_equal, &not_identical, Label::kNear);
- }
- __ Set(rax, EQUAL);
- __ ret(0);
-
- __ bind(&heap_number);
- // It is a heap number, so return equal if it's not NaN.
- // For NaN, return 1 for every condition except greater and
- // greater-equal. Return -1 for them, so the comparison yields
- // false for all conditions except not-equal.
- __ Set(rax, EQUAL);
- __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
- __ ucomisd(xmm0, xmm0);
- __ setcc(parity_even, rax);
- // rax is 0 for equal non-NaN heapnumbers, 1 for NaNs.
- if (cc == greater_equal || cc == greater) {
- __ neg(rax);
- }
- __ ret(0);
-
- __ bind(&not_identical);
- }
-
- if (cc == equal) { // Both strict and non-strict.
- Label slow; // Fallthrough label.
-
- // If we're doing a strict equality comparison, we don't have to do
- // type conversion, so we generate code to do fast comparison for objects
- // and oddballs. Non-smi numbers and strings still go through the usual
- // slow-case code.
- if (strict()) {
- // If either is a Smi (we know that not both are), then they can only
- // be equal if the other is a HeapNumber. If so, use the slow case.
- {
- Label not_smis;
- __ SelectNonSmi(rbx, rax, rdx, &not_smis);
-
- // Check if the non-smi operand is a heap number.
- __ Cmp(FieldOperand(rbx, HeapObject::kMapOffset),
- factory->heap_number_map());
- // If heap number, handle it in the slow case.
- __ j(equal, &slow);
- // Return non-equal. ebx (the lower half of rbx) is not zero.
- __ movq(rax, rbx);
- __ ret(0);
-
- __ bind(&not_smis);
- }
-
- // If either operand is a JSObject or an oddball value, then they are not
- // equal since their pointers are different
- // There is no test for undetectability in strict equality.
-
- // If the first object is a JS object, we have done pointer comparison.
- STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
- Label first_non_object;
- __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rcx);
- __ j(below, &first_non_object, Label::kNear);
- // Return non-zero (eax (not rax) is not zero)
- Label return_not_equal;
- STATIC_ASSERT(kHeapObjectTag != 0);
- __ bind(&return_not_equal);
- __ ret(0);
-
- __ bind(&first_non_object);
- // Check for oddballs: true, false, null, undefined.
- __ CmpInstanceType(rcx, ODDBALL_TYPE);
- __ j(equal, &return_not_equal);
-
- __ CmpObjectType(rdx, FIRST_SPEC_OBJECT_TYPE, rcx);
- __ j(above_equal, &return_not_equal);
-
- // Check for oddballs: true, false, null, undefined.
- __ CmpInstanceType(rcx, ODDBALL_TYPE);
- __ j(equal, &return_not_equal);
-
- // Fall through to the general case.
- }
- __ bind(&slow);
- }
-
- // Generate the number comparison code.
- Label non_number_comparison;
- Label unordered;
- FloatingPointHelper::LoadSSE2UnknownOperands(masm, &non_number_comparison);
- __ xorl(rax, rax);
- __ xorl(rcx, rcx);
- __ ucomisd(xmm0, xmm1);
-
- // Don't base result on EFLAGS when a NaN is involved.
- __ j(parity_even, &unordered, Label::kNear);
- // Return a result of -1, 0, or 1, based on EFLAGS.
- __ setcc(above, rax);
- __ setcc(below, rcx);
- __ subq(rax, rcx);
- __ ret(0);
-
- // If one of the numbers was NaN, then the result is always false.
- // The cc is never not-equal.
- __ bind(&unordered);
- ASSERT(cc != not_equal);
- if (cc == less || cc == less_equal) {
- __ Set(rax, 1);
- } else {
- __ Set(rax, -1);
- }
- __ ret(0);
-
- // The number comparison code did not provide a valid result.
- __ bind(&non_number_comparison);
-
- // Fast negative check for internalized-to-internalized equality.
- Label check_for_strings;
- if (cc == equal) {
- BranchIfNotInternalizedString(
- masm, &check_for_strings, rax, kScratchRegister);
- BranchIfNotInternalizedString(
- masm, &check_for_strings, rdx, kScratchRegister);
-
- // We've already checked for object identity, so if both operands are
- // internalized strings they aren't equal. Register eax (not rax) already
- // holds a non-zero value, which indicates not equal, so just return.
- __ ret(0);
- }
-
- __ bind(&check_for_strings);
-
- __ JumpIfNotBothSequentialAsciiStrings(
- rdx, rax, rcx, rbx, &check_unequal_objects);
-
- // Inline comparison of ASCII strings.
- if (cc == equal) {
- StringCompareStub::GenerateFlatAsciiStringEquals(masm,
- rdx,
- rax,
- rcx,
- rbx);
- } else {
- StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
- rdx,
- rax,
- rcx,
- rbx,
- rdi,
- r8);
- }
-
-#ifdef DEBUG
- __ Abort("Unexpected fall-through from string comparison");
-#endif
-
- __ bind(&check_unequal_objects);
- if (cc == equal && !strict()) {
- // Not strict equality. Objects are unequal if
- // they are both JSObjects and not undetectable,
- // and their pointers are different.
- Label not_both_objects, return_unequal;
- // At most one is a smi, so we can test for smi by adding the two.
- // A smi plus a heap object has the low bit set, a heap object plus
- // a heap object has the low bit clear.
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagMask == 1);
- __ lea(rcx, Operand(rax, rdx, times_1, 0));
- __ testb(rcx, Immediate(kSmiTagMask));
- __ j(not_zero, &not_both_objects, Label::kNear);
- __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rbx);
- __ j(below, &not_both_objects, Label::kNear);
- __ CmpObjectType(rdx, FIRST_SPEC_OBJECT_TYPE, rcx);
- __ j(below, &not_both_objects, Label::kNear);
- __ testb(FieldOperand(rbx, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsUndetectable));
- __ j(zero, &return_unequal, Label::kNear);
- __ testb(FieldOperand(rcx, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsUndetectable));
- __ j(zero, &return_unequal, Label::kNear);
- // The objects are both undetectable, so they both compare as the value
- // undefined, and are equal.
- __ Set(rax, EQUAL);
- __ bind(&return_unequal);
- // Return non-equal by returning the non-zero object pointer in rax,
- // or return equal if we fell through to here.
- __ ret(0);
- __ bind(&not_both_objects);
- }
-
- // Push arguments below the return address to prepare jump to builtin.
- __ pop(rcx);
- __ push(rdx);
- __ push(rax);
-
- // Figure out which native to call and setup the arguments.
- Builtins::JavaScript builtin;
- if (cc == equal) {
- builtin = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
- } else {
- builtin = Builtins::COMPARE;
- __ Push(Smi::FromInt(NegativeComparisonResult(cc)));
- }
-
- // Restore return address on the stack.
- __ push(rcx);
-
- // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
- // tagged as a small integer.
- __ InvokeBuiltin(builtin, JUMP_FUNCTION);
-
- __ bind(&miss);
- GenerateMiss(masm);
-}
-
-
-void StackCheckStub::Generate(MacroAssembler* masm) {
- __ TailCallRuntime(Runtime::kStackGuard, 0, 1);
-}
-
-
-void InterruptStub::Generate(MacroAssembler* masm) {
- __ TailCallRuntime(Runtime::kInterrupt, 0, 1);
-}
-
-
-static void GenerateRecordCallTargetNoArray(MacroAssembler* masm) {
- // Cache the called function in a global property cell. Cache states
- // are uninitialized, monomorphic (indicated by a JSFunction), and
- // megamorphic.
- // rbx : cache cell for call target
- // rdi : the function to call
- ASSERT(!FLAG_optimize_constructed_arrays);
- Isolate* isolate = masm->isolate();
- Label initialize, done;
-
- // Load the cache state into rcx.
- __ movq(rcx, FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset));
-
- // A monomorphic cache hit or an already megamorphic state: invoke the
- // function without changing the state.
- __ cmpq(rcx, rdi);
- __ j(equal, &done, Label::kNear);
- __ Cmp(rcx, TypeFeedbackCells::MegamorphicSentinel(isolate));
- __ j(equal, &done, Label::kNear);
-
- // A monomorphic miss (i.e, here the cache is not uninitialized) goes
- // megamorphic.
- __ Cmp(rcx, TypeFeedbackCells::UninitializedSentinel(isolate));
- __ j(equal, &initialize, Label::kNear);
- // MegamorphicSentinel is an immortal immovable object (undefined) so no
- // write-barrier is needed.
- __ Move(FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset),
- TypeFeedbackCells::MegamorphicSentinel(isolate));
- __ jmp(&done, Label::kNear);
-
- // An uninitialized cache is patched with the function.
- __ bind(&initialize);
- __ movq(FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset), rdi);
- // No need for a write barrier here - cells are rescanned.
-
- __ bind(&done);
-}
-
-
-static void GenerateRecordCallTarget(MacroAssembler* masm) {
- // Cache the called function in a global property cell. Cache states
- // are uninitialized, monomorphic (indicated by a JSFunction), and
- // megamorphic.
- // rbx : cache cell for call target
- // rdi : the function to call
- ASSERT(FLAG_optimize_constructed_arrays);
- Isolate* isolate = masm->isolate();
- Label initialize, done, miss, megamorphic, not_array_function;
-
- // Load the cache state into rcx.
- __ movq(rcx, FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset));
-
- // A monomorphic cache hit or an already megamorphic state: invoke the
- // function without changing the state.
- __ cmpq(rcx, rdi);
- __ j(equal, &done);
- __ Cmp(rcx, TypeFeedbackCells::MegamorphicSentinel(isolate));
- __ j(equal, &done);
-
- // Special handling of the Array() function, which caches not only the
- // monomorphic Array function but the initial ElementsKind with special
- // sentinels
- Handle<Object> terminal_kind_sentinel =
- TypeFeedbackCells::MonomorphicArraySentinel(isolate,
- LAST_FAST_ELEMENTS_KIND);
- __ Cmp(rcx, terminal_kind_sentinel);
- __ j(not_equal, &miss);
- // Make sure the function is the Array() function
- __ LoadArrayFunction(rcx);
- __ cmpq(rdi, rcx);
- __ j(not_equal, &megamorphic);
- __ jmp(&done);
-
- __ bind(&miss);
-
- // A monomorphic miss (i.e, here the cache is not uninitialized) goes
- // megamorphic.
- __ Cmp(rcx, TypeFeedbackCells::UninitializedSentinel(isolate));
- __ j(equal, &initialize);
- // MegamorphicSentinel is an immortal immovable object (undefined) so no
- // write-barrier is needed.
- __ bind(&megamorphic);
- __ Move(FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset),
- TypeFeedbackCells::MegamorphicSentinel(isolate));
- __ jmp(&done, Label::kNear);
-
- // An uninitialized cache is patched with the function or sentinel to
- // indicate the ElementsKind if function is the Array constructor.
- __ bind(&initialize);
- // Make sure the function is the Array() function
- __ LoadArrayFunction(rcx);
- __ cmpq(rdi, rcx);
- __ j(not_equal, &not_array_function);
-
- // The target function is the Array constructor, install a sentinel value in
- // the constructor's type info cell that will track the initial ElementsKind
- // that should be used for the array when its constructed.
- Handle<Object> initial_kind_sentinel =
- TypeFeedbackCells::MonomorphicArraySentinel(isolate,
- GetInitialFastElementsKind());
- __ Move(FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset),
- initial_kind_sentinel);
- __ jmp(&done);
-
- __ bind(&not_array_function);
- __ movq(FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset), rdi);
- // No need for a write barrier here - cells are rescanned.
-
- __ bind(&done);
-}
-
-
-void CallFunctionStub::Generate(MacroAssembler* masm) {
- // rbx : cache cell for call target
- // rdi : the function to call
- Isolate* isolate = masm->isolate();
- Label slow, non_function;
-
- // The receiver might implicitly be the global object. This is
- // indicated by passing the hole as the receiver to the call
- // function stub.
- if (ReceiverMightBeImplicit()) {
- Label call;
- // Get the receiver from the stack.
- // +1 ~ return address
- __ movq(rax, Operand(rsp, (argc_ + 1) * kPointerSize));
- // Call as function is indicated with the hole.
- __ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
- __ j(not_equal, &call, Label::kNear);
- // Patch the receiver on the stack with the global receiver object.
- __ movq(rcx, GlobalObjectOperand());
- __ movq(rcx, FieldOperand(rcx, GlobalObject::kGlobalReceiverOffset));
- __ movq(Operand(rsp, (argc_ + 1) * kPointerSize), rcx);
- __ bind(&call);
- }
-
- // Check that the function really is a JavaScript function.
- __ JumpIfSmi(rdi, &non_function);
- // Goto slow case if we do not have a function.
- __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
- __ j(not_equal, &slow);
-
- if (RecordCallTarget()) {
- if (FLAG_optimize_constructed_arrays) {
- GenerateRecordCallTarget(masm);
- } else {
- GenerateRecordCallTargetNoArray(masm);
- }
- }
-
- // Fast-case: Just invoke the function.
- ParameterCount actual(argc_);
-
- if (ReceiverMightBeImplicit()) {
- Label call_as_function;
- __ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
- __ j(equal, &call_as_function);
- __ InvokeFunction(rdi,
- actual,
- JUMP_FUNCTION,
- NullCallWrapper(),
- CALL_AS_METHOD);
- __ bind(&call_as_function);
- }
- __ InvokeFunction(rdi,
- actual,
- JUMP_FUNCTION,
- NullCallWrapper(),
- CALL_AS_FUNCTION);
-
- // Slow-case: Non-function called.
- __ bind(&slow);
- if (RecordCallTarget()) {
- // If there is a call target cache, mark it megamorphic in the
- // non-function case. MegamorphicSentinel is an immortal immovable
- // object (undefined) so no write barrier is needed.
- __ Move(FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset),
- TypeFeedbackCells::MegamorphicSentinel(isolate));
- }
- // Check for function proxy.
- __ CmpInstanceType(rcx, JS_FUNCTION_PROXY_TYPE);
- __ j(not_equal, &non_function);
- __ pop(rcx);
- __ push(rdi); // put proxy as additional argument under return address
- __ push(rcx);
- __ Set(rax, argc_ + 1);
- __ Set(rbx, 0);
- __ SetCallKind(rcx, CALL_AS_METHOD);
- __ GetBuiltinEntry(rdx, Builtins::CALL_FUNCTION_PROXY);
- {
- Handle<Code> adaptor =
- masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
- __ jmp(adaptor, RelocInfo::CODE_TARGET);
- }
-
- // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
- // of the original receiver from the call site).
- __ bind(&non_function);
- __ movq(Operand(rsp, (argc_ + 1) * kPointerSize), rdi);
- __ Set(rax, argc_);
- __ Set(rbx, 0);
- __ SetCallKind(rcx, CALL_AS_METHOD);
- __ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION);
- Handle<Code> adaptor =
- Isolate::Current()->builtins()->ArgumentsAdaptorTrampoline();
- __ Jump(adaptor, RelocInfo::CODE_TARGET);
-}
-
-
-void CallConstructStub::Generate(MacroAssembler* masm) {
- // rax : number of arguments
- // rbx : cache cell for call target
- // rdi : constructor function
- Label slow, non_function_call;
-
- // Check that function is not a smi.
- __ JumpIfSmi(rdi, &non_function_call);
- // Check that function is a JSFunction.
- __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
- __ j(not_equal, &slow);
-
- if (RecordCallTarget()) {
- if (FLAG_optimize_constructed_arrays) {
- GenerateRecordCallTarget(masm);
- } else {
- GenerateRecordCallTargetNoArray(masm);
- }
- }
-
- // Jump to the function-specific construct stub.
- Register jmp_reg = FLAG_optimize_constructed_arrays ? rcx : rbx;
- __ movq(jmp_reg, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
- __ movq(jmp_reg, FieldOperand(jmp_reg,
- SharedFunctionInfo::kConstructStubOffset));
- __ lea(jmp_reg, FieldOperand(jmp_reg, Code::kHeaderSize));
- __ jmp(jmp_reg);
-
- // rdi: called object
- // rax: number of arguments
- // rcx: object map
- Label do_call;
- __ bind(&slow);
- __ CmpInstanceType(rcx, JS_FUNCTION_PROXY_TYPE);
- __ j(not_equal, &non_function_call);
- __ GetBuiltinEntry(rdx, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
- __ jmp(&do_call);
-
- __ bind(&non_function_call);
- __ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
- __ bind(&do_call);
- // Set expected number of arguments to zero (not changing rax).
- __ Set(rbx, 0);
- __ SetCallKind(rcx, CALL_AS_METHOD);
- __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
- RelocInfo::CODE_TARGET);
-}
-
-
-bool CEntryStub::NeedsImmovableCode() {
- return false;
-}
-
-
-bool CEntryStub::IsPregenerated() {
-#ifdef _WIN64
- return result_size_ == 1;
-#else
- return true;
-#endif
-}
-
-
-void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
- CEntryStub::GenerateAheadOfTime(isolate);
- StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
- // It is important that the store buffer overflow stubs are generated first.
- RecordWriteStub::GenerateFixedRegStubsAheadOfTime(isolate);
-}
-
-
-void CodeStub::GenerateFPStubs(Isolate* isolate) {
-}
-
-
-void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
- CEntryStub stub(1, kDontSaveFPRegs);
- stub.GetCode(isolate)->set_is_pregenerated(true);
- CEntryStub save_doubles(1, kSaveFPRegs);
- save_doubles.GetCode(isolate)->set_is_pregenerated(true);
-}
-
-
-static void JumpIfOOM(MacroAssembler* masm,
- Register value,
- Register scratch,
- Label* oom_label) {
- __ movq(scratch, value);
- STATIC_ASSERT(Failure::OUT_OF_MEMORY_EXCEPTION == 3);
- STATIC_ASSERT(kFailureTag == 3);
- __ and_(scratch, Immediate(0xf));
- __ cmpq(scratch, Immediate(0xf));
- __ j(equal, oom_label);
-}
-
-
-void CEntryStub::GenerateCore(MacroAssembler* masm,
- Label* throw_normal_exception,
- Label* throw_termination_exception,
- Label* throw_out_of_memory_exception,
- bool do_gc,
- bool always_allocate_scope) {
- // rax: result parameter for PerformGC, if any.
- // rbx: pointer to C function (C callee-saved).
- // rbp: frame pointer (restored after C call).
- // rsp: stack pointer (restored after C call).
- // r14: number of arguments including receiver (C callee-saved).
- // r15: pointer to the first argument (C callee-saved).
- // This pointer is reused in LeaveExitFrame(), so it is stored in a
- // callee-saved register.
-
- // Simple results returned in rax (both AMD64 and Win64 calling conventions).
- // Complex results must be written to address passed as first argument.
- // AMD64 calling convention: a struct of two pointers in rax+rdx
-
- // Check stack alignment.
- if (FLAG_debug_code) {
- __ CheckStackAlignment();
- }
-
- if (do_gc) {
- // Pass failure code returned from last attempt as first argument to
- // PerformGC. No need to use PrepareCallCFunction/CallCFunction here as the
- // stack is known to be aligned. This function takes one argument which is
- // passed in register.
-#ifdef _WIN64
- __ movq(rcx, rax);
-#else // _WIN64
- __ movq(rdi, rax);
-#endif
- __ movq(kScratchRegister,
- ExternalReference::perform_gc_function(masm->isolate()));
- __ call(kScratchRegister);
- }
-
- ExternalReference scope_depth =
- ExternalReference::heap_always_allocate_scope_depth(masm->isolate());
- if (always_allocate_scope) {
- Operand scope_depth_operand = masm->ExternalOperand(scope_depth);
- __ incl(scope_depth_operand);
- }
-
- // Call C function.
-#ifdef _WIN64
- // Windows 64-bit ABI passes arguments in rcx, rdx, r8, r9
- // Store Arguments object on stack, below the 4 WIN64 ABI parameter slots.
- __ movq(StackSpaceOperand(0), r14); // argc.
- __ movq(StackSpaceOperand(1), r15); // argv.
- if (result_size_ < 2) {
- // Pass a pointer to the Arguments object as the first argument.
- // Return result in single register (rax).
- __ lea(rcx, StackSpaceOperand(0));
- __ LoadAddress(rdx, ExternalReference::isolate_address());
- } else {
- ASSERT_EQ(2, result_size_);
- // Pass a pointer to the result location as the first argument.
- __ lea(rcx, StackSpaceOperand(2));
- // Pass a pointer to the Arguments object as the second argument.
- __ lea(rdx, StackSpaceOperand(0));
- __ LoadAddress(r8, ExternalReference::isolate_address());
- }
-
-#else // _WIN64
- // GCC passes arguments in rdi, rsi, rdx, rcx, r8, r9.
- __ movq(rdi, r14); // argc.
- __ movq(rsi, r15); // argv.
- __ movq(rdx, ExternalReference::isolate_address());
-#endif
- __ call(rbx);
- // Result is in rax - do not destroy this register!
-
- if (always_allocate_scope) {
- Operand scope_depth_operand = masm->ExternalOperand(scope_depth);
- __ decl(scope_depth_operand);
- }
-
- // Check for failure result.
- Label failure_returned;
- STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
-#ifdef _WIN64
- // If return value is on the stack, pop it to registers.
- if (result_size_ > 1) {
- ASSERT_EQ(2, result_size_);
- // Read result values stored on stack. Result is stored
- // above the four argument mirror slots and the two
- // Arguments object slots.
- __ movq(rax, Operand(rsp, 6 * kPointerSize));
- __ movq(rdx, Operand(rsp, 7 * kPointerSize));
- }
-#endif
- __ lea(rcx, Operand(rax, 1));
- // Lower 2 bits of rcx are 0 iff rax has failure tag.
- __ testl(rcx, Immediate(kFailureTagMask));
- __ j(zero, &failure_returned);
-
- // Exit the JavaScript to C++ exit frame.
- __ LeaveExitFrame(save_doubles_);
- __ ret(0);
-
- // Handling of failure.
- __ bind(&failure_returned);
-
- Label retry;
- // If the returned exception is RETRY_AFTER_GC continue at retry label
- STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0);
- __ testl(rax, Immediate(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize));
- __ j(zero, &retry, Label::kNear);
-
- // Special handling of out of memory exceptions.
- JumpIfOOM(masm, rax, kScratchRegister, throw_out_of_memory_exception);
-
- // Retrieve the pending exception and clear the variable.
- ExternalReference pending_exception_address(
- Isolate::kPendingExceptionAddress, masm->isolate());
- Operand pending_exception_operand =
- masm->ExternalOperand(pending_exception_address);
- __ movq(rax, pending_exception_operand);
- __ LoadRoot(rdx, Heap::kTheHoleValueRootIndex);
- __ movq(pending_exception_operand, rdx);
-
- // Special handling of termination exceptions which are uncatchable
- // by javascript code.
- __ CompareRoot(rax, Heap::kTerminationExceptionRootIndex);
- __ j(equal, throw_termination_exception);
-
- // Handle normal exception.
- __ jmp(throw_normal_exception);
-
- // Retry.
- __ bind(&retry);
-}
-
-
-void CEntryStub::Generate(MacroAssembler* masm) {
- // rax: number of arguments including receiver
- // rbx: pointer to C function (C callee-saved)
- // rbp: frame pointer of calling JS frame (restored after C call)
- // rsp: stack pointer (restored after C call)
- // rsi: current context (restored)
-
- // NOTE: Invocations of builtins may return failure objects
- // instead of a proper result. The builtin entry handles
- // this by performing a garbage collection and retrying the
- // builtin once.
-
- // Enter the exit frame that transitions from JavaScript to C++.
-#ifdef _WIN64
- int arg_stack_space = (result_size_ < 2 ? 2 : 4);
-#else
- int arg_stack_space = 0;
-#endif
- __ EnterExitFrame(arg_stack_space, save_doubles_);
-
- // rax: Holds the context at this point, but should not be used.
- // On entry to code generated by GenerateCore, it must hold
- // a failure result if the collect_garbage argument to GenerateCore
- // is true. This failure result can be the result of code
- // generated by a previous call to GenerateCore. The value
- // of rax is then passed to Runtime::PerformGC.
- // rbx: pointer to builtin function (C callee-saved).
- // rbp: frame pointer of exit frame (restored after C call).
- // rsp: stack pointer (restored after C call).
- // r14: number of arguments including receiver (C callee-saved).
- // r15: argv pointer (C callee-saved).
-
- Label throw_normal_exception;
- Label throw_termination_exception;
- Label throw_out_of_memory_exception;
-
- // Call into the runtime system.
- GenerateCore(masm,
- &throw_normal_exception,
- &throw_termination_exception,
- &throw_out_of_memory_exception,
- false,
- false);
-
- // Do space-specific GC and retry runtime call.
- GenerateCore(masm,
- &throw_normal_exception,
- &throw_termination_exception,
- &throw_out_of_memory_exception,
- true,
- false);
-
- // Do full GC and retry runtime call one final time.
- Failure* failure = Failure::InternalError();
- __ movq(rax, failure, RelocInfo::NONE64);
- GenerateCore(masm,
- &throw_normal_exception,
- &throw_termination_exception,
- &throw_out_of_memory_exception,
- true,
- true);
-
- __ bind(&throw_out_of_memory_exception);
- // Set external caught exception to false.
- Isolate* isolate = masm->isolate();
- ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress,
- isolate);
- __ Set(rax, static_cast<int64_t>(false));
- __ Store(external_caught, rax);
-
- // Set pending exception and rax to out of memory exception.
- ExternalReference pending_exception(Isolate::kPendingExceptionAddress,
- isolate);
- Label already_have_failure;
- JumpIfOOM(masm, rax, kScratchRegister, &already_have_failure);
- __ movq(rax, Failure::OutOfMemoryException(0x1), RelocInfo::NONE64);
- __ bind(&already_have_failure);
- __ Store(pending_exception, rax);
- // Fall through to the next label.
-
- __ bind(&throw_termination_exception);
- __ ThrowUncatchable(rax);
-
- __ bind(&throw_normal_exception);
- __ Throw(rax);
-}
-
-
-void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
- Label invoke, handler_entry, exit;
- Label not_outermost_js, not_outermost_js_2;
- { // NOLINT. Scope block confuses linter.
- MacroAssembler::NoRootArrayScope uninitialized_root_register(masm);
- // Set up frame.
- __ push(rbp);
- __ movq(rbp, rsp);
-
- // Push the stack frame type marker twice.
- int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
- // Scratch register is neither callee-save, nor an argument register on any
- // platform. It's free to use at this point.
- // Cannot use smi-register for loading yet.
- __ movq(kScratchRegister,
- reinterpret_cast<uint64_t>(Smi::FromInt(marker)),
- RelocInfo::NONE64);
- __ push(kScratchRegister); // context slot
- __ push(kScratchRegister); // function slot
- // Save callee-saved registers (X64/Win64 calling conventions).
- __ push(r12);
- __ push(r13);
- __ push(r14);
- __ push(r15);
-#ifdef _WIN64
- __ push(rdi); // Only callee save in Win64 ABI, argument in AMD64 ABI.
- __ push(rsi); // Only callee save in Win64 ABI, argument in AMD64 ABI.
-#endif
- __ push(rbx);
- // TODO(X64): On Win64, if we ever use XMM6-XMM15, the low low 64 bits are
- // callee save as well.
-
- // Set up the roots and smi constant registers.
- // Needs to be done before any further smi loads.
- __ InitializeSmiConstantRegister();
- __ InitializeRootRegister();
- }
-
- Isolate* isolate = masm->isolate();
-
- // Save copies of the top frame descriptor on the stack.
- ExternalReference c_entry_fp(Isolate::kCEntryFPAddress, isolate);
- {
- Operand c_entry_fp_operand = masm->ExternalOperand(c_entry_fp);
- __ push(c_entry_fp_operand);
- }
-
- // If this is the outermost JS call, set js_entry_sp value.
- ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate);
- __ Load(rax, js_entry_sp);
- __ testq(rax, rax);
- __ j(not_zero, &not_outermost_js);
- __ Push(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME));
- __ movq(rax, rbp);
- __ Store(js_entry_sp, rax);
- Label cont;
- __ jmp(&cont);
- __ bind(&not_outermost_js);
- __ Push(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME));
- __ bind(&cont);
-
- // Jump to a faked try block that does the invoke, with a faked catch
- // block that sets the pending exception.
- __ jmp(&invoke);
- __ bind(&handler_entry);
- handler_offset_ = handler_entry.pos();
- // Caught exception: Store result (exception) in the pending exception
- // field in the JSEnv and return a failure sentinel.
- ExternalReference pending_exception(Isolate::kPendingExceptionAddress,
- isolate);
- __ Store(pending_exception, rax);
- __ movq(rax, Failure::Exception(), RelocInfo::NONE64);
- __ jmp(&exit);
-
- // Invoke: Link this frame into the handler chain. There's only one
- // handler block in this code object, so its index is 0.
- __ bind(&invoke);
- __ PushTryHandler(StackHandler::JS_ENTRY, 0);
-
- // Clear any pending exceptions.
- __ LoadRoot(rax, Heap::kTheHoleValueRootIndex);
- __ Store(pending_exception, rax);
-
- // Fake a receiver (NULL).
- __ push(Immediate(0)); // receiver
-
- // Invoke the function by calling through JS entry trampoline builtin and
- // pop the faked function when we return. We load the address from an
- // external reference instead of inlining the call target address directly
- // in the code, because the builtin stubs may not have been generated yet
- // at the time this code is generated.
- if (is_construct) {
- ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
- isolate);
- __ Load(rax, construct_entry);
- } else {
- ExternalReference entry(Builtins::kJSEntryTrampoline, isolate);
- __ Load(rax, entry);
- }
- __ lea(kScratchRegister, FieldOperand(rax, Code::kHeaderSize));
- __ call(kScratchRegister);
-
- // Unlink this frame from the handler chain.
- __ PopTryHandler();
-
- __ bind(&exit);
- // Check if the current stack frame is marked as the outermost JS frame.
- __ pop(rbx);
- __ Cmp(rbx, Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME));
- __ j(not_equal, &not_outermost_js_2);
- __ movq(kScratchRegister, js_entry_sp);
- __ movq(Operand(kScratchRegister, 0), Immediate(0));
- __ bind(&not_outermost_js_2);
-
- // Restore the top frame descriptor from the stack.
- { Operand c_entry_fp_operand = masm->ExternalOperand(c_entry_fp);
- __ pop(c_entry_fp_operand);
- }
-
- // Restore callee-saved registers (X64 conventions).
- __ pop(rbx);
-#ifdef _WIN64
- // Callee save on in Win64 ABI, arguments/volatile in AMD64 ABI.
- __ pop(rsi);
- __ pop(rdi);
-#endif
- __ pop(r15);
- __ pop(r14);
- __ pop(r13);
- __ pop(r12);
- __ addq(rsp, Immediate(2 * kPointerSize)); // remove markers
-
- // Restore frame pointer and return.
- __ pop(rbp);
- __ ret(0);
-}
-
-
-void InstanceofStub::Generate(MacroAssembler* masm) {
- // Implements "value instanceof function" operator.
- // Expected input state with no inline cache:
- // rsp[0] : return address
- // rsp[1] : function pointer
- // rsp[2] : value
- // Expected input state with an inline one-element cache:
- // rsp[0] : return address
- // rsp[1] : offset from return address to location of inline cache
- // rsp[2] : function pointer
- // rsp[3] : value
- // Returns a bitwise zero to indicate that the value
- // is and instance of the function and anything else to
- // indicate that the value is not an instance.
-
- static const int kOffsetToMapCheckValue = 2;
- static const int kOffsetToResultValue = 18;
- // The last 4 bytes of the instruction sequence
- // movq(rdi, FieldOperand(rax, HeapObject::kMapOffset))
- // Move(kScratchRegister, FACTORY->the_hole_value())
- // in front of the hole value address.
- static const unsigned int kWordBeforeMapCheckValue = 0xBA49FF78;
- // The last 4 bytes of the instruction sequence
- // __ j(not_equal, &cache_miss);
- // __ LoadRoot(ToRegister(instr->result()), Heap::kTheHoleValueRootIndex);
- // before the offset of the hole value in the root array.
- static const unsigned int kWordBeforeResultValue = 0x458B4909;
- // Only the inline check flag is supported on X64.
- ASSERT(flags_ == kNoFlags || HasCallSiteInlineCheck());
- int extra_stack_space = HasCallSiteInlineCheck() ? kPointerSize : 0;
-
- // Get the object - go slow case if it's a smi.
- Label slow;
-
- __ movq(rax, Operand(rsp, 2 * kPointerSize + extra_stack_space));
- __ JumpIfSmi(rax, &slow);
-
- // Check that the left hand is a JS object. Leave its map in rax.
- __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rax);
- __ j(below, &slow);
- __ CmpInstanceType(rax, LAST_SPEC_OBJECT_TYPE);
- __ j(above, &slow);
-
- // Get the prototype of the function.
- __ movq(rdx, Operand(rsp, 1 * kPointerSize + extra_stack_space));
- // rdx is function, rax is map.
-
- // If there is a call site cache don't look in the global cache, but do the
- // real lookup and update the call site cache.
- if (!HasCallSiteInlineCheck()) {
- // Look up the function and the map in the instanceof cache.
- Label miss;
- __ CompareRoot(rdx, Heap::kInstanceofCacheFunctionRootIndex);
- __ j(not_equal, &miss, Label::kNear);
- __ CompareRoot(rax, Heap::kInstanceofCacheMapRootIndex);
- __ j(not_equal, &miss, Label::kNear);
- __ LoadRoot(rax, Heap::kInstanceofCacheAnswerRootIndex);
- __ ret(2 * kPointerSize);
- __ bind(&miss);
- }
-
- __ TryGetFunctionPrototype(rdx, rbx, &slow, true);
-
- // Check that the function prototype is a JS object.
- __ JumpIfSmi(rbx, &slow);
- __ CmpObjectType(rbx, FIRST_SPEC_OBJECT_TYPE, kScratchRegister);
- __ j(below, &slow);
- __ CmpInstanceType(kScratchRegister, LAST_SPEC_OBJECT_TYPE);
- __ j(above, &slow);
-
- // Register mapping:
- // rax is object map.
- // rdx is function.
- // rbx is function prototype.
- if (!HasCallSiteInlineCheck()) {
- __ StoreRoot(rdx, Heap::kInstanceofCacheFunctionRootIndex);
- __ StoreRoot(rax, Heap::kInstanceofCacheMapRootIndex);
- } else {
- // Get return address and delta to inlined map check.
- __ movq(kScratchRegister, Operand(rsp, 0 * kPointerSize));
- __ subq(kScratchRegister, Operand(rsp, 1 * kPointerSize));
- if (FLAG_debug_code) {
- __ movl(rdi, Immediate(kWordBeforeMapCheckValue));
- __ cmpl(Operand(kScratchRegister, kOffsetToMapCheckValue - 4), rdi);
- __ Assert(equal, "InstanceofStub unexpected call site cache (check).");
- }
- __ movq(kScratchRegister,
- Operand(kScratchRegister, kOffsetToMapCheckValue));
- __ movq(Operand(kScratchRegister, 0), rax);
- }
-
- __ movq(rcx, FieldOperand(rax, Map::kPrototypeOffset));
-
- // Loop through the prototype chain looking for the function prototype.
- Label loop, is_instance, is_not_instance;
- __ LoadRoot(kScratchRegister, Heap::kNullValueRootIndex);
- __ bind(&loop);
- __ cmpq(rcx, rbx);
- __ j(equal, &is_instance, Label::kNear);
- __ cmpq(rcx, kScratchRegister);
- // The code at is_not_instance assumes that kScratchRegister contains a
- // non-zero GCable value (the null object in this case).
- __ j(equal, &is_not_instance, Label::kNear);
- __ movq(rcx, FieldOperand(rcx, HeapObject::kMapOffset));
- __ movq(rcx, FieldOperand(rcx, Map::kPrototypeOffset));
- __ jmp(&loop);
-
- __ bind(&is_instance);
- if (!HasCallSiteInlineCheck()) {
- __ xorl(rax, rax);
- // Store bitwise zero in the cache. This is a Smi in GC terms.
- STATIC_ASSERT(kSmiTag == 0);
- __ StoreRoot(rax, Heap::kInstanceofCacheAnswerRootIndex);
- } else {
- // Store offset of true in the root array at the inline check site.
- int true_offset = 0x100 +
- (Heap::kTrueValueRootIndex << kPointerSizeLog2) - kRootRegisterBias;
- // Assert it is a 1-byte signed value.
- ASSERT(true_offset >= 0 && true_offset < 0x100);
- __ movl(rax, Immediate(true_offset));
- __ movq(kScratchRegister, Operand(rsp, 0 * kPointerSize));
- __ subq(kScratchRegister, Operand(rsp, 1 * kPointerSize));
- __ movb(Operand(kScratchRegister, kOffsetToResultValue), rax);
- if (FLAG_debug_code) {
- __ movl(rax, Immediate(kWordBeforeResultValue));
- __ cmpl(Operand(kScratchRegister, kOffsetToResultValue - 4), rax);
- __ Assert(equal, "InstanceofStub unexpected call site cache (mov).");
- }
- __ Set(rax, 0);
- }
- __ ret(2 * kPointerSize + extra_stack_space);
-
- __ bind(&is_not_instance);
- if (!HasCallSiteInlineCheck()) {
- // We have to store a non-zero value in the cache.
- __ StoreRoot(kScratchRegister, Heap::kInstanceofCacheAnswerRootIndex);
- } else {
- // Store offset of false in the root array at the inline check site.
- int false_offset = 0x100 +
- (Heap::kFalseValueRootIndex << kPointerSizeLog2) - kRootRegisterBias;
- // Assert it is a 1-byte signed value.
- ASSERT(false_offset >= 0 && false_offset < 0x100);
- __ movl(rax, Immediate(false_offset));
- __ movq(kScratchRegister, Operand(rsp, 0 * kPointerSize));
- __ subq(kScratchRegister, Operand(rsp, 1 * kPointerSize));
- __ movb(Operand(kScratchRegister, kOffsetToResultValue), rax);
- if (FLAG_debug_code) {
- __ movl(rax, Immediate(kWordBeforeResultValue));
- __ cmpl(Operand(kScratchRegister, kOffsetToResultValue - 4), rax);
- __ Assert(equal, "InstanceofStub unexpected call site cache (mov)");
- }
- }
- __ ret(2 * kPointerSize + extra_stack_space);
-
- // Slow-case: Go through the JavaScript implementation.
- __ bind(&slow);
- if (HasCallSiteInlineCheck()) {
- // Remove extra value from the stack.
- __ pop(rcx);
- __ pop(rax);
- __ push(rcx);
- }
- __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
-}
-
-
-// Passing arguments in registers is not supported.
-Register InstanceofStub::left() { return no_reg; }
-
-
-Register InstanceofStub::right() { return no_reg; }
-
-
-// -------------------------------------------------------------------------
-// StringCharCodeAtGenerator
-
-void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
- Label flat_string;
- Label ascii_string;
- Label got_char_code;
- Label sliced_string;
-
- // If the receiver is a smi trigger the non-string case.
- __ JumpIfSmi(object_, receiver_not_string_);
-
- // Fetch the instance type of the receiver into result register.
- __ movq(result_, FieldOperand(object_, HeapObject::kMapOffset));
- __ movzxbl(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
- // If the receiver is not a string trigger the non-string case.
- __ testb(result_, Immediate(kIsNotStringMask));
- __ j(not_zero, receiver_not_string_);
-
- // If the index is non-smi trigger the non-smi case.
- __ JumpIfNotSmi(index_, &index_not_smi_);
- __ bind(&got_smi_index_);
-
- // Check for index out of range.
- __ SmiCompare(index_, FieldOperand(object_, String::kLengthOffset));
- __ j(above_equal, index_out_of_range_);
-
- __ SmiToInteger32(index_, index_);
-
- StringCharLoadGenerator::Generate(
- masm, object_, index_, result_, &call_runtime_);
-
- __ Integer32ToSmi(result_, result_);
- __ bind(&exit_);
-}
-
-
-void StringCharCodeAtGenerator::GenerateSlow(
- MacroAssembler* masm,
- const RuntimeCallHelper& call_helper) {
- __ Abort("Unexpected fallthrough to CharCodeAt slow case");
-
- Factory* factory = masm->isolate()->factory();
- // Index is not a smi.
- __ bind(&index_not_smi_);
- // If index is a heap number, try converting it to an integer.
- __ CheckMap(index_,
- factory->heap_number_map(),
- index_not_number_,
- DONT_DO_SMI_CHECK);
- call_helper.BeforeCall(masm);
- __ push(object_);
- __ push(index_); // Consumed by runtime conversion function.
- if (index_flags_ == STRING_INDEX_IS_NUMBER) {
- __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
- } else {
- ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
- // NumberToSmi discards numbers that are not exact integers.
- __ CallRuntime(Runtime::kNumberToSmi, 1);
- }
- if (!index_.is(rax)) {
- // Save the conversion result before the pop instructions below
- // have a chance to overwrite it.
- __ movq(index_, rax);
- }
- __ pop(object_);
- // Reload the instance type.
- __ movq(result_, FieldOperand(object_, HeapObject::kMapOffset));
- __ movzxbl(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
- call_helper.AfterCall(masm);
- // If index is still not a smi, it must be out of range.
- __ JumpIfNotSmi(index_, index_out_of_range_);
- // Otherwise, return to the fast path.
- __ jmp(&got_smi_index_);
-
- // Call runtime. We get here when the receiver is a string and the
- // index is a number, but the code of getting the actual character
- // is too complex (e.g., when the string needs to be flattened).
- __ bind(&call_runtime_);
- call_helper.BeforeCall(masm);
- __ push(object_);
- __ Integer32ToSmi(index_, index_);
- __ push(index_);
- __ CallRuntime(Runtime::kStringCharCodeAt, 2);
- if (!result_.is(rax)) {
- __ movq(result_, rax);
- }
- call_helper.AfterCall(masm);
- __ jmp(&exit_);
-
- __ Abort("Unexpected fallthrough from CharCodeAt slow case");
-}
-
-
-// -------------------------------------------------------------------------
-// StringCharFromCodeGenerator
-
-void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
- // Fast case of Heap::LookupSingleCharacterStringFromCode.
- __ JumpIfNotSmi(code_, &slow_case_);
- __ SmiCompare(code_, Smi::FromInt(String::kMaxOneByteCharCode));
- __ j(above, &slow_case_);
-
- __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
- SmiIndex index = masm->SmiToIndex(kScratchRegister, code_, kPointerSizeLog2);
- __ movq(result_, FieldOperand(result_, index.reg, index.scale,
- FixedArray::kHeaderSize));
- __ CompareRoot(result_, Heap::kUndefinedValueRootIndex);
- __ j(equal, &slow_case_);
- __ bind(&exit_);
-}
-
-
-void StringCharFromCodeGenerator::GenerateSlow(
- MacroAssembler* masm,
- const RuntimeCallHelper& call_helper) {
- __ Abort("Unexpected fallthrough to CharFromCode slow case");
-
- __ bind(&slow_case_);
- call_helper.BeforeCall(masm);
- __ push(code_);
- __ CallRuntime(Runtime::kCharFromCode, 1);
- if (!result_.is(rax)) {
- __ movq(result_, rax);
- }
- call_helper.AfterCall(masm);
- __ jmp(&exit_);
-
- __ Abort("Unexpected fallthrough from CharFromCode slow case");
-}
-
-
-void StringAddStub::Generate(MacroAssembler* masm) {
- Label call_runtime, call_builtin;
- Builtins::JavaScript builtin_id = Builtins::ADD;
-
- // Load the two arguments.
- __ movq(rax, Operand(rsp, 2 * kPointerSize)); // First argument (left).
- __ movq(rdx, Operand(rsp, 1 * kPointerSize)); // Second argument (right).
-
- // Make sure that both arguments are strings if not known in advance.
- if (flags_ == NO_STRING_ADD_FLAGS) {
- __ JumpIfSmi(rax, &call_runtime);
- __ CmpObjectType(rax, FIRST_NONSTRING_TYPE, r8);
- __ j(above_equal, &call_runtime);
-
- // First argument is a a string, test second.
- __ JumpIfSmi(rdx, &call_runtime);
- __ CmpObjectType(rdx, FIRST_NONSTRING_TYPE, r9);
- __ j(above_equal, &call_runtime);
- } else {
- // Here at least one of the arguments is definitely a string.
- // We convert the one that is not known to be a string.
- if ((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) == 0) {
- ASSERT((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) != 0);
- GenerateConvertArgument(masm, 2 * kPointerSize, rax, rbx, rcx, rdi,
- &call_builtin);
- builtin_id = Builtins::STRING_ADD_RIGHT;
- } else if ((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) == 0) {
- ASSERT((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) != 0);
- GenerateConvertArgument(masm, 1 * kPointerSize, rdx, rbx, rcx, rdi,
- &call_builtin);
- builtin_id = Builtins::STRING_ADD_LEFT;
- }
- }
-
- // Both arguments are strings.
- // rax: first string
- // rdx: second string
- // Check if either of the strings are empty. In that case return the other.
- Label second_not_zero_length, both_not_zero_length;
- __ movq(rcx, FieldOperand(rdx, String::kLengthOffset));
- __ SmiTest(rcx);
- __ j(not_zero, &second_not_zero_length, Label::kNear);
- // Second string is empty, result is first string which is already in rax.
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->string_add_native(), 1);
- __ ret(2 * kPointerSize);
- __ bind(&second_not_zero_length);
- __ movq(rbx, FieldOperand(rax, String::kLengthOffset));
- __ SmiTest(rbx);
- __ j(not_zero, &both_not_zero_length, Label::kNear);
- // First string is empty, result is second string which is in rdx.
- __ movq(rax, rdx);
- __ IncrementCounter(counters->string_add_native(), 1);
- __ ret(2 * kPointerSize);
-
- // Both strings are non-empty.
- // rax: first string
- // rbx: length of first string
- // rcx: length of second string
- // rdx: second string
- // r8: map of first string (if flags_ == NO_STRING_ADD_FLAGS)
- // r9: map of second string (if flags_ == NO_STRING_ADD_FLAGS)
- Label string_add_flat_result, longer_than_two;
- __ bind(&both_not_zero_length);
-
- // If arguments where known to be strings, maps are not loaded to r8 and r9
- // by the code above.
- if (flags_ != NO_STRING_ADD_FLAGS) {
- __ movq(r8, FieldOperand(rax, HeapObject::kMapOffset));
- __ movq(r9, FieldOperand(rdx, HeapObject::kMapOffset));
- }
- // Get the instance types of the two strings as they will be needed soon.
- __ movzxbl(r8, FieldOperand(r8, Map::kInstanceTypeOffset));
- __ movzxbl(r9, FieldOperand(r9, Map::kInstanceTypeOffset));
-
- // Look at the length of the result of adding the two strings.
- STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue / 2);
- __ SmiAdd(rbx, rbx, rcx);
- // Use the string table when adding two one character strings, as it
- // helps later optimizations to return an internalized string here.
- __ SmiCompare(rbx, Smi::FromInt(2));
- __ j(not_equal, &longer_than_two);
-
- // Check that both strings are non-external ASCII strings.
- __ JumpIfBothInstanceTypesAreNotSequentialAscii(r8, r9, rbx, rcx,
- &call_runtime);
-
- // Get the two characters forming the sub string.
- __ movzxbq(rbx, FieldOperand(rax, SeqOneByteString::kHeaderSize));
- __ movzxbq(rcx, FieldOperand(rdx, SeqOneByteString::kHeaderSize));
-
- // Try to lookup two character string in string table. If it is not found
- // just allocate a new one.
- Label make_two_character_string, make_flat_ascii_string;
- StringHelper::GenerateTwoCharacterStringTableProbe(
- masm, rbx, rcx, r14, r11, rdi, r15, &make_two_character_string);
- __ IncrementCounter(counters->string_add_native(), 1);
- __ ret(2 * kPointerSize);
-
- __ bind(&make_two_character_string);
- __ Set(rdi, 2);
- __ AllocateAsciiString(rax, rdi, r8, r9, r11, &call_runtime);
- // rbx - first byte: first character
- // rbx - second byte: *maybe* second character
- // Make sure that the second byte of rbx contains the second character.
- __ movzxbq(rcx, FieldOperand(rdx, SeqOneByteString::kHeaderSize));
- __ shll(rcx, Immediate(kBitsPerByte));
- __ orl(rbx, rcx);
- // Write both characters to the new string.
- __ movw(FieldOperand(rax, SeqOneByteString::kHeaderSize), rbx);
- __ IncrementCounter(counters->string_add_native(), 1);
- __ ret(2 * kPointerSize);
-
- __ bind(&longer_than_two);
- // Check if resulting string will be flat.
- __ SmiCompare(rbx, Smi::FromInt(ConsString::kMinLength));
- __ j(below, &string_add_flat_result);
- // Handle exceptionally long strings in the runtime system.
- STATIC_ASSERT((String::kMaxLength & 0x80000000) == 0);
- __ SmiCompare(rbx, Smi::FromInt(String::kMaxLength));
- __ j(above, &call_runtime);
-
- // If result is not supposed to be flat, allocate a cons string object. If
- // both strings are ASCII the result is an ASCII cons string.
- // rax: first string
- // rbx: length of resulting flat string
- // rdx: second string
- // r8: instance type of first string
- // r9: instance type of second string
- Label non_ascii, allocated, ascii_data;
- __ movl(rcx, r8);
- __ and_(rcx, r9);
- STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
- STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
- __ testl(rcx, Immediate(kStringEncodingMask));
- __ j(zero, &non_ascii);
- __ bind(&ascii_data);
- // Allocate an ASCII cons string.
- __ AllocateAsciiConsString(rcx, rdi, no_reg, &call_runtime);
- __ bind(&allocated);
- // Fill the fields of the cons string.
- __ movq(FieldOperand(rcx, ConsString::kLengthOffset), rbx);
- __ movq(FieldOperand(rcx, ConsString::kHashFieldOffset),
- Immediate(String::kEmptyHashField));
- __ movq(FieldOperand(rcx, ConsString::kFirstOffset), rax);
- __ movq(FieldOperand(rcx, ConsString::kSecondOffset), rdx);
- __ movq(rax, rcx);
- __ IncrementCounter(counters->string_add_native(), 1);
- __ ret(2 * kPointerSize);
- __ bind(&non_ascii);
- // At least one of the strings is two-byte. Check whether it happens
- // to contain only ASCII characters.
- // rcx: first instance type AND second instance type.
- // r8: first instance type.
- // r9: second instance type.
- __ testb(rcx, Immediate(kAsciiDataHintMask));
- __ j(not_zero, &ascii_data);
- __ xor_(r8, r9);
- STATIC_ASSERT(kOneByteStringTag != 0 && kAsciiDataHintTag != 0);
- __ andb(r8, Immediate(kOneByteStringTag | kAsciiDataHintTag));
- __ cmpb(r8, Immediate(kOneByteStringTag | kAsciiDataHintTag));
- __ j(equal, &ascii_data);
- // Allocate a two byte cons string.
- __ AllocateTwoByteConsString(rcx, rdi, no_reg, &call_runtime);
- __ jmp(&allocated);
-
- // We cannot encounter sliced strings or cons strings here since:
- STATIC_ASSERT(SlicedString::kMinLength >= ConsString::kMinLength);
- // Handle creating a flat result from either external or sequential strings.
- // Locate the first characters' locations.
- // rax: first string
- // rbx: length of resulting flat string as smi
- // rdx: second string
- // r8: instance type of first string
- // r9: instance type of first string
- Label first_prepared, second_prepared;
- Label first_is_sequential, second_is_sequential;
- __ bind(&string_add_flat_result);
-
- __ SmiToInteger32(r14, FieldOperand(rax, SeqString::kLengthOffset));
- // r14: length of first string
- STATIC_ASSERT(kSeqStringTag == 0);
- __ testb(r8, Immediate(kStringRepresentationMask));
- __ j(zero, &first_is_sequential, Label::kNear);
- // Rule out short external string and load string resource.
- STATIC_ASSERT(kShortExternalStringTag != 0);
- __ testb(r8, Immediate(kShortExternalStringMask));
- __ j(not_zero, &call_runtime);
- __ movq(rcx, FieldOperand(rax, ExternalString::kResourceDataOffset));
- __ jmp(&first_prepared, Label::kNear);
- __ bind(&first_is_sequential);
- STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize);
- __ lea(rcx, FieldOperand(rax, SeqOneByteString::kHeaderSize));
- __ bind(&first_prepared);
-
- // Check whether both strings have same encoding.
- __ xorl(r8, r9);
- __ testb(r8, Immediate(kStringEncodingMask));
- __ j(not_zero, &call_runtime);
-
- __ SmiToInteger32(r15, FieldOperand(rdx, SeqString::kLengthOffset));
- // r15: length of second string
- STATIC_ASSERT(kSeqStringTag == 0);
- __ testb(r9, Immediate(kStringRepresentationMask));
- __ j(zero, &second_is_sequential, Label::kNear);
- // Rule out short external string and load string resource.
- STATIC_ASSERT(kShortExternalStringTag != 0);
- __ testb(r9, Immediate(kShortExternalStringMask));
- __ j(not_zero, &call_runtime);
- __ movq(rdx, FieldOperand(rdx, ExternalString::kResourceDataOffset));
- __ jmp(&second_prepared, Label::kNear);
- __ bind(&second_is_sequential);
- STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize);
- __ lea(rdx, FieldOperand(rdx, SeqOneByteString::kHeaderSize));
- __ bind(&second_prepared);
-
- Label non_ascii_string_add_flat_result;
- // r9: instance type of second string
- // First string and second string have the same encoding.
- STATIC_ASSERT(kTwoByteStringTag == 0);
- __ SmiToInteger32(rbx, rbx);
- __ testb(r9, Immediate(kStringEncodingMask));
- __ j(zero, &non_ascii_string_add_flat_result);
-
- __ bind(&make_flat_ascii_string);
- // Both strings are ASCII strings. As they are short they are both flat.
- __ AllocateAsciiString(rax, rbx, rdi, r8, r9, &call_runtime);
- // rax: result string
- // Locate first character of result.
- __ lea(rbx, FieldOperand(rax, SeqOneByteString::kHeaderSize));
- // rcx: first char of first string
- // rbx: first character of result
- // r14: length of first string
- StringHelper::GenerateCopyCharacters(masm, rbx, rcx, r14, true);
- // rbx: next character of result
- // rdx: first char of second string
- // r15: length of second string
- StringHelper::GenerateCopyCharacters(masm, rbx, rdx, r15, true);
- __ IncrementCounter(counters->string_add_native(), 1);
- __ ret(2 * kPointerSize);
-
- __ bind(&non_ascii_string_add_flat_result);
- // Both strings are ASCII strings. As they are short they are both flat.
- __ AllocateTwoByteString(rax, rbx, rdi, r8, r9, &call_runtime);
- // rax: result string
- // Locate first character of result.
- __ lea(rbx, FieldOperand(rax, SeqTwoByteString::kHeaderSize));
- // rcx: first char of first string
- // rbx: first character of result
- // r14: length of first string
- StringHelper::GenerateCopyCharacters(masm, rbx, rcx, r14, false);
- // rbx: next character of result
- // rdx: first char of second string
- // r15: length of second string
- StringHelper::GenerateCopyCharacters(masm, rbx, rdx, r15, false);
- __ IncrementCounter(counters->string_add_native(), 1);
- __ ret(2 * kPointerSize);
-
- // Just jump to runtime to add the two strings.
- __ bind(&call_runtime);
- __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
-
- if (call_builtin.is_linked()) {
- __ bind(&call_builtin);
- __ InvokeBuiltin(builtin_id, JUMP_FUNCTION);
- }
-}
-
-
-void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
- int stack_offset,
- Register arg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* slow) {
- // First check if the argument is already a string.
- Label not_string, done;
- __ JumpIfSmi(arg, &not_string);
- __ CmpObjectType(arg, FIRST_NONSTRING_TYPE, scratch1);
- __ j(below, &done);
-
- // Check the number to string cache.
- Label not_cached;
- __ bind(&not_string);
- // Puts the cached result into scratch1.
- NumberToStringStub::GenerateLookupNumberStringCache(masm,
- arg,
- scratch1,
- scratch2,
- scratch3,
- false,
- &not_cached);
- __ movq(arg, scratch1);
- __ movq(Operand(rsp, stack_offset), arg);
- __ jmp(&done);
-
- // Check if the argument is a safe string wrapper.
- __ bind(&not_cached);
- __ JumpIfSmi(arg, slow);
- __ CmpObjectType(arg, JS_VALUE_TYPE, scratch1); // map -> scratch1.
- __ j(not_equal, slow);
- __ testb(FieldOperand(scratch1, Map::kBitField2Offset),
- Immediate(1 << Map::kStringWrapperSafeForDefaultValueOf));
- __ j(zero, slow);
- __ movq(arg, FieldOperand(arg, JSValue::kValueOffset));
- __ movq(Operand(rsp, stack_offset), arg);
-
- __ bind(&done);
-}
-
-
-void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
- Register dest,
- Register src,
- Register count,
- bool ascii) {
- Label loop;
- __ bind(&loop);
- // This loop just copies one character at a time, as it is only used for very
- // short strings.
- if (ascii) {
- __ movb(kScratchRegister, Operand(src, 0));
- __ movb(Operand(dest, 0), kScratchRegister);
- __ incq(src);
- __ incq(dest);
- } else {
- __ movzxwl(kScratchRegister, Operand(src, 0));
- __ movw(Operand(dest, 0), kScratchRegister);
- __ addq(src, Immediate(2));
- __ addq(dest, Immediate(2));
- }
- __ decl(count);
- __ j(not_zero, &loop);
-}
-
-
-void StringHelper::GenerateCopyCharactersREP(MacroAssembler* masm,
- Register dest,
- Register src,
- Register count,
- bool ascii) {
- // Copy characters using rep movs of doublewords. Align destination on 4 byte
- // boundary before starting rep movs. Copy remaining characters after running
- // rep movs.
- // Count is positive int32, dest and src are character pointers.
- ASSERT(dest.is(rdi)); // rep movs destination
- ASSERT(src.is(rsi)); // rep movs source
- ASSERT(count.is(rcx)); // rep movs count
-
- // Nothing to do for zero characters.
- Label done;
- __ testl(count, count);
- __ j(zero, &done, Label::kNear);
-
- // Make count the number of bytes to copy.
- if (!ascii) {
- STATIC_ASSERT(2 == sizeof(uc16));
- __ addl(count, count);
- }
-
- // Don't enter the rep movs if there are less than 4 bytes to copy.
- Label last_bytes;
- __ testl(count, Immediate(~7));
- __ j(zero, &last_bytes, Label::kNear);
-
- // Copy from edi to esi using rep movs instruction.
- __ movl(kScratchRegister, count);
- __ shr(count, Immediate(3)); // Number of doublewords to copy.
- __ repmovsq();
-
- // Find number of bytes left.
- __ movl(count, kScratchRegister);
- __ and_(count, Immediate(7));
-
- // Check if there are more bytes to copy.
- __ bind(&last_bytes);
- __ testl(count, count);
- __ j(zero, &done, Label::kNear);
-
- // Copy remaining characters.
- Label loop;
- __ bind(&loop);
- __ movb(kScratchRegister, Operand(src, 0));
- __ movb(Operand(dest, 0), kScratchRegister);
- __ incq(src);
- __ incq(dest);
- __ decl(count);
- __ j(not_zero, &loop);
-
- __ bind(&done);
-}
-
-void StringHelper::GenerateTwoCharacterStringTableProbe(MacroAssembler* masm,
- Register c1,
- Register c2,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Label* not_found) {
- // Register scratch3 is the general scratch register in this function.
- Register scratch = scratch3;
-
- // Make sure that both characters are not digits as such strings has a
- // different hash algorithm. Don't try to look for these in the string table.
- Label not_array_index;
- __ leal(scratch, Operand(c1, -'0'));
- __ cmpl(scratch, Immediate(static_cast<int>('9' - '0')));
- __ j(above, &not_array_index, Label::kNear);
- __ leal(scratch, Operand(c2, -'0'));
- __ cmpl(scratch, Immediate(static_cast<int>('9' - '0')));
- __ j(below_equal, not_found);
-
- __ bind(&not_array_index);
- // Calculate the two character string hash.
- Register hash = scratch1;
- GenerateHashInit(masm, hash, c1, scratch);
- GenerateHashAddCharacter(masm, hash, c2, scratch);
- GenerateHashGetHash(masm, hash, scratch);
-
- // Collect the two characters in a register.
- Register chars = c1;
- __ shl(c2, Immediate(kBitsPerByte));
- __ orl(chars, c2);
-
- // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
- // hash: hash of two character string.
-
- // Load the string table.
- Register string_table = c2;
- __ LoadRoot(string_table, Heap::kStringTableRootIndex);
-
- // Calculate capacity mask from the string table capacity.
- Register mask = scratch2;
- __ SmiToInteger32(mask,
- FieldOperand(string_table, StringTable::kCapacityOffset));
- __ decl(mask);
-
- Register map = scratch4;
-
- // Registers
- // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
- // hash: hash of two character string (32-bit int)
- // string_table: string table
- // mask: capacity mask (32-bit int)
- // map: -
- // scratch: -
-
- // Perform a number of probes in the string table.
- static const int kProbes = 4;
- Label found_in_string_table;
- Label next_probe[kProbes];
- Register candidate = scratch; // Scratch register contains candidate.
- for (int i = 0; i < kProbes; i++) {
- // Calculate entry in string table.
- __ movl(scratch, hash);
- if (i > 0) {
- __ addl(scratch, Immediate(StringTable::GetProbeOffset(i)));
- }
- __ andl(scratch, mask);
-
- // Load the entry from the string table.
- STATIC_ASSERT(StringTable::kEntrySize == 1);
- __ movq(candidate,
- FieldOperand(string_table,
- scratch,
- times_pointer_size,
- StringTable::kElementsStartOffset));
-
- // If entry is undefined no string with this hash can be found.
- Label is_string;
- __ CmpObjectType(candidate, ODDBALL_TYPE, map);
- __ j(not_equal, &is_string, Label::kNear);
-
- __ CompareRoot(candidate, Heap::kUndefinedValueRootIndex);
- __ j(equal, not_found);
- // Must be the hole (deleted entry).
- if (FLAG_debug_code) {
- __ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
- __ cmpq(kScratchRegister, candidate);
- __ Assert(equal, "oddball in string table is not undefined or the hole");
- }
- __ jmp(&next_probe[i]);
-
- __ bind(&is_string);
-
- // If length is not 2 the string is not a candidate.
- __ SmiCompare(FieldOperand(candidate, String::kLengthOffset),
- Smi::FromInt(2));
- __ j(not_equal, &next_probe[i]);
-
- // We use kScratchRegister as a temporary register in assumption that
- // JumpIfInstanceTypeIsNotSequentialAscii does not use it implicitly
- Register temp = kScratchRegister;
-
- // Check that the candidate is a non-external ASCII string.
- __ movzxbl(temp, FieldOperand(map, Map::kInstanceTypeOffset));
- __ JumpIfInstanceTypeIsNotSequentialAscii(
- temp, temp, &next_probe[i]);
-
- // Check if the two characters match.
- __ movl(temp, FieldOperand(candidate, SeqOneByteString::kHeaderSize));
- __ andl(temp, Immediate(0x0000ffff));
- __ cmpl(chars, temp);
- __ j(equal, &found_in_string_table);
- __ bind(&next_probe[i]);
- }
-
- // No matching 2 character string found by probing.
- __ jmp(not_found);
-
- // Scratch register contains result when we fall through to here.
- Register result = candidate;
- __ bind(&found_in_string_table);
- if (!result.is(rax)) {
- __ movq(rax, result);
- }
-}
-
-
-void StringHelper::GenerateHashInit(MacroAssembler* masm,
- Register hash,
- Register character,
- Register scratch) {
- // hash = (seed + character) + ((seed + character) << 10);
- __ LoadRoot(scratch, Heap::kHashSeedRootIndex);
- __ SmiToInteger32(scratch, scratch);
- __ addl(scratch, character);
- __ movl(hash, scratch);
- __ shll(scratch, Immediate(10));
- __ addl(hash, scratch);
- // hash ^= hash >> 6;
- __ movl(scratch, hash);
- __ shrl(scratch, Immediate(6));
- __ xorl(hash, scratch);
-}
-
-
-void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
- Register hash,
- Register character,
- Register scratch) {
- // hash += character;
- __ addl(hash, character);
- // hash += hash << 10;
- __ movl(scratch, hash);
- __ shll(scratch, Immediate(10));
- __ addl(hash, scratch);
- // hash ^= hash >> 6;
- __ movl(scratch, hash);
- __ shrl(scratch, Immediate(6));
- __ xorl(hash, scratch);
-}
-
-
-void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
- Register hash,
- Register scratch) {
- // hash += hash << 3;
- __ leal(hash, Operand(hash, hash, times_8, 0));
- // hash ^= hash >> 11;
- __ movl(scratch, hash);
- __ shrl(scratch, Immediate(11));
- __ xorl(hash, scratch);
- // hash += hash << 15;
- __ movl(scratch, hash);
- __ shll(scratch, Immediate(15));
- __ addl(hash, scratch);
-
- __ andl(hash, Immediate(String::kHashBitMask));
-
- // if (hash == 0) hash = 27;
- Label hash_not_zero;
- __ j(not_zero, &hash_not_zero);
- __ Set(hash, StringHasher::kZeroHash);
- __ bind(&hash_not_zero);
-}
-
-void SubStringStub::Generate(MacroAssembler* masm) {
- Label runtime;
-
- // Stack frame on entry.
- // rsp[0]: return address
- // rsp[8]: to
- // rsp[16]: from
- // rsp[24]: string
-
- const int kToOffset = 1 * kPointerSize;
- const int kFromOffset = kToOffset + kPointerSize;
- const int kStringOffset = kFromOffset + kPointerSize;
- const int kArgumentsSize = (kStringOffset + kPointerSize) - kToOffset;
-
- // Make sure first argument is a string.
- __ movq(rax, Operand(rsp, kStringOffset));
- STATIC_ASSERT(kSmiTag == 0);
- __ testl(rax, Immediate(kSmiTagMask));
- __ j(zero, &runtime);
- Condition is_string = masm->IsObjectStringType(rax, rbx, rbx);
- __ j(NegateCondition(is_string), &runtime);
-
- // rax: string
- // rbx: instance type
- // Calculate length of sub string using the smi values.
- __ movq(rcx, Operand(rsp, kToOffset));
- __ movq(rdx, Operand(rsp, kFromOffset));
- __ JumpUnlessBothNonNegativeSmi(rcx, rdx, &runtime);
-
- __ SmiSub(rcx, rcx, rdx); // Overflow doesn't happen.
- __ cmpq(rcx, FieldOperand(rax, String::kLengthOffset));
- Label not_original_string;
- // Shorter than original string's length: an actual substring.
- __ j(below, &not_original_string, Label::kNear);
- // Longer than original string's length or negative: unsafe arguments.
- __ j(above, &runtime);
- // Return original string.
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->sub_string_native(), 1);
- __ ret(kArgumentsSize);
- __ bind(&not_original_string);
-
- Label single_char;
- __ SmiCompare(rcx, Smi::FromInt(1));
- __ j(equal, &single_char);
-
- __ SmiToInteger32(rcx, rcx);
-
- // rax: string
- // rbx: instance type
- // rcx: sub string length
- // rdx: from index (smi)
- // Deal with different string types: update the index if necessary
- // and put the underlying string into edi.
- Label underlying_unpacked, sliced_string, seq_or_external_string;
- // If the string is not indirect, it can only be sequential or external.
- STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
- STATIC_ASSERT(kIsIndirectStringMask != 0);
- __ testb(rbx, Immediate(kIsIndirectStringMask));
- __ j(zero, &seq_or_external_string, Label::kNear);
-
- __ testb(rbx, Immediate(kSlicedNotConsMask));
- __ j(not_zero, &sliced_string, Label::kNear);
- // Cons string. Check whether it is flat, then fetch first part.
- // Flat cons strings have an empty second part.
- __ CompareRoot(FieldOperand(rax, ConsString::kSecondOffset),
- Heap::kempty_stringRootIndex);
- __ j(not_equal, &runtime);
- __ movq(rdi, FieldOperand(rax, ConsString::kFirstOffset));
- // Update instance type.
- __ movq(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
- __ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
- __ jmp(&underlying_unpacked, Label::kNear);
-
- __ bind(&sliced_string);
- // Sliced string. Fetch parent and correct start index by offset.
- __ addq(rdx, FieldOperand(rax, SlicedString::kOffsetOffset));
- __ movq(rdi, FieldOperand(rax, SlicedString::kParentOffset));
- // Update instance type.
- __ movq(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
- __ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
- __ jmp(&underlying_unpacked, Label::kNear);
-
- __ bind(&seq_or_external_string);
- // Sequential or external string. Just move string to the correct register.
- __ movq(rdi, rax);
-
- __ bind(&underlying_unpacked);
-
- if (FLAG_string_slices) {
- Label copy_routine;
- // rdi: underlying subject string
- // rbx: instance type of underlying subject string
- // rdx: adjusted start index (smi)
- // rcx: length
- // If coming from the make_two_character_string path, the string
- // is too short to be sliced anyways.
- __ cmpq(rcx, Immediate(SlicedString::kMinLength));
- // Short slice. Copy instead of slicing.
- __ j(less, &copy_routine);
- // Allocate new sliced string. At this point we do not reload the instance
- // type including the string encoding because we simply rely on the info
- // provided by the original string. It does not matter if the original
- // string's encoding is wrong because we always have to recheck encoding of
- // the newly created string's parent anyways due to externalized strings.
- Label two_byte_slice, set_slice_header;
- STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
- STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
- __ testb(rbx, Immediate(kStringEncodingMask));
- __ j(zero, &two_byte_slice, Label::kNear);
- __ AllocateAsciiSlicedString(rax, rbx, r14, &runtime);
- __ jmp(&set_slice_header, Label::kNear);
- __ bind(&two_byte_slice);
- __ AllocateTwoByteSlicedString(rax, rbx, r14, &runtime);
- __ bind(&set_slice_header);
- __ Integer32ToSmi(rcx, rcx);
- __ movq(FieldOperand(rax, SlicedString::kLengthOffset), rcx);
- __ movq(FieldOperand(rax, SlicedString::kHashFieldOffset),
- Immediate(String::kEmptyHashField));
- __ movq(FieldOperand(rax, SlicedString::kParentOffset), rdi);
- __ movq(FieldOperand(rax, SlicedString::kOffsetOffset), rdx);
- __ IncrementCounter(counters->sub_string_native(), 1);
- __ ret(kArgumentsSize);
-
- __ bind(&copy_routine);
- }
-
- // rdi: underlying subject string
- // rbx: instance type of underlying subject string
- // rdx: adjusted start index (smi)
- // rcx: length
- // The subject string can only be external or sequential string of either
- // encoding at this point.
- Label two_byte_sequential, sequential_string;
- STATIC_ASSERT(kExternalStringTag != 0);
- STATIC_ASSERT(kSeqStringTag == 0);
- __ testb(rbx, Immediate(kExternalStringTag));
- __ j(zero, &sequential_string);
-
- // Handle external string.
- // Rule out short external strings.
- STATIC_CHECK(kShortExternalStringTag != 0);
- __ testb(rbx, Immediate(kShortExternalStringMask));
- __ j(not_zero, &runtime);
- __ movq(rdi, FieldOperand(rdi, ExternalString::kResourceDataOffset));
- // Move the pointer so that offset-wise, it looks like a sequential string.
- STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
- __ subq(rdi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
-
- __ bind(&sequential_string);
- STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
- __ testb(rbx, Immediate(kStringEncodingMask));
- __ j(zero, &two_byte_sequential);
-
- // Allocate the result.
- __ AllocateAsciiString(rax, rcx, r11, r14, r15, &runtime);
-
- // rax: result string
- // rcx: result string length
- __ movq(r14, rsi); // esi used by following code.
- { // Locate character of sub string start.
- SmiIndex smi_as_index = masm->SmiToIndex(rdx, rdx, times_1);
- __ lea(rsi, Operand(rdi, smi_as_index.reg, smi_as_index.scale,
- SeqOneByteString::kHeaderSize - kHeapObjectTag));
- }
- // Locate first character of result.
- __ lea(rdi, FieldOperand(rax, SeqOneByteString::kHeaderSize));
-
- // rax: result string
- // rcx: result length
- // rdi: first character of result
- // rsi: character of sub string start
- // r14: original value of rsi
- StringHelper::GenerateCopyCharactersREP(masm, rdi, rsi, rcx, true);
- __ movq(rsi, r14); // Restore rsi.
- __ IncrementCounter(counters->sub_string_native(), 1);
- __ ret(kArgumentsSize);
-
- __ bind(&two_byte_sequential);
- // Allocate the result.
- __ AllocateTwoByteString(rax, rcx, r11, r14, r15, &runtime);
-
- // rax: result string
- // rcx: result string length
- __ movq(r14, rsi); // esi used by following code.
- { // Locate character of sub string start.
- SmiIndex smi_as_index = masm->SmiToIndex(rdx, rdx, times_2);
- __ lea(rsi, Operand(rdi, smi_as_index.reg, smi_as_index.scale,
- SeqOneByteString::kHeaderSize - kHeapObjectTag));
- }
- // Locate first character of result.
- __ lea(rdi, FieldOperand(rax, SeqTwoByteString::kHeaderSize));
-
- // rax: result string
- // rcx: result length
- // rdi: first character of result
- // rsi: character of sub string start
- // r14: original value of rsi
- StringHelper::GenerateCopyCharactersREP(masm, rdi, rsi, rcx, false);
- __ movq(rsi, r14); // Restore esi.
- __ IncrementCounter(counters->sub_string_native(), 1);
- __ ret(kArgumentsSize);
-
- // Just jump to runtime to create the sub string.
- __ bind(&runtime);
- __ TailCallRuntime(Runtime::kSubString, 3, 1);
-
- __ bind(&single_char);
- // rax: string
- // rbx: instance type
- // rcx: sub string length (smi)
- // rdx: from index (smi)
- StringCharAtGenerator generator(
- rax, rdx, rcx, rax, &runtime, &runtime, &runtime, STRING_INDEX_IS_NUMBER);
- generator.GenerateFast(masm);
- __ ret(kArgumentsSize);
- generator.SkipSlow(masm, &runtime);
-}
-
-
-void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
- Register left,
- Register right,
- Register scratch1,
- Register scratch2) {
- Register length = scratch1;
-
- // Compare lengths.
- Label check_zero_length;
- __ movq(length, FieldOperand(left, String::kLengthOffset));
- __ SmiCompare(length, FieldOperand(right, String::kLengthOffset));
- __ j(equal, &check_zero_length, Label::kNear);
- __ Move(rax, Smi::FromInt(NOT_EQUAL));
- __ ret(0);
-
- // Check if the length is zero.
- Label compare_chars;
- __ bind(&check_zero_length);
- STATIC_ASSERT(kSmiTag == 0);
- __ SmiTest(length);
- __ j(not_zero, &compare_chars, Label::kNear);
- __ Move(rax, Smi::FromInt(EQUAL));
- __ ret(0);
-
- // Compare characters.
- __ bind(&compare_chars);
- Label strings_not_equal;
- GenerateAsciiCharsCompareLoop(masm, left, right, length, scratch2,
- &strings_not_equal, Label::kNear);
-
- // Characters are equal.
- __ Move(rax, Smi::FromInt(EQUAL));
- __ ret(0);
-
- // Characters are not equal.
- __ bind(&strings_not_equal);
- __ Move(rax, Smi::FromInt(NOT_EQUAL));
- __ ret(0);
-}
-
-
-void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
- Register left,
- Register right,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4) {
- // Ensure that you can always subtract a string length from a non-negative
- // number (e.g. another length).
- STATIC_ASSERT(String::kMaxLength < 0x7fffffff);
-
- // Find minimum length and length difference.
- __ movq(scratch1, FieldOperand(left, String::kLengthOffset));
- __ movq(scratch4, scratch1);
- __ SmiSub(scratch4,
- scratch4,
- FieldOperand(right, String::kLengthOffset));
- // Register scratch4 now holds left.length - right.length.
- const Register length_difference = scratch4;
- Label left_shorter;
- __ j(less, &left_shorter, Label::kNear);
- // The right string isn't longer that the left one.
- // Get the right string's length by subtracting the (non-negative) difference
- // from the left string's length.
- __ SmiSub(scratch1, scratch1, length_difference);
- __ bind(&left_shorter);
- // Register scratch1 now holds Min(left.length, right.length).
- const Register min_length = scratch1;
-
- Label compare_lengths;
- // If min-length is zero, go directly to comparing lengths.
- __ SmiTest(min_length);
- __ j(zero, &compare_lengths, Label::kNear);
-
- // Compare loop.
- Label result_not_equal;
- GenerateAsciiCharsCompareLoop(masm, left, right, min_length, scratch2,
- &result_not_equal, Label::kNear);
-
- // Completed loop without finding different characters.
- // Compare lengths (precomputed).
- __ bind(&compare_lengths);
- __ SmiTest(length_difference);
-#ifndef ENABLE_LATIN_1
- __ j(not_zero, &result_not_equal, Label::kNear);
-#else
- Label length_not_equal;
- __ j(not_zero, &length_not_equal, Label::kNear);
-#endif
-
- // Result is EQUAL.
- __ Move(rax, Smi::FromInt(EQUAL));
- __ ret(0);
-
- Label result_greater;
-#ifdef ENABLE_LATIN_1
- Label result_less;
- __ bind(&length_not_equal);
- __ j(greater, &result_greater, Label::kNear);
- __ jmp(&result_less, Label::kNear);
-#endif
- __ bind(&result_not_equal);
- // Unequal comparison of left to right, either character or length.
-#ifndef ENABLE_LATIN_1
- __ j(greater, &result_greater, Label::kNear);
-#else
- __ j(above, &result_greater, Label::kNear);
- __ bind(&result_less);
-#endif
-
- // Result is LESS.
- __ Move(rax, Smi::FromInt(LESS));
- __ ret(0);
-
- // Result is GREATER.
- __ bind(&result_greater);
- __ Move(rax, Smi::FromInt(GREATER));
- __ ret(0);
-}
-
-
-void StringCompareStub::GenerateAsciiCharsCompareLoop(
- MacroAssembler* masm,
- Register left,
- Register right,
- Register length,
- Register scratch,
- Label* chars_not_equal,
- Label::Distance near_jump) {
- // Change index to run from -length to -1 by adding length to string
- // start. This means that loop ends when index reaches zero, which
- // doesn't need an additional compare.
- __ SmiToInteger32(length, length);
- __ lea(left,
- FieldOperand(left, length, times_1, SeqOneByteString::kHeaderSize));
- __ lea(right,
- FieldOperand(right, length, times_1, SeqOneByteString::kHeaderSize));
- __ neg(length);
- Register index = length; // index = -length;
-
- // Compare loop.
- Label loop;
- __ bind(&loop);
- __ movb(scratch, Operand(left, index, times_1, 0));
- __ cmpb(scratch, Operand(right, index, times_1, 0));
- __ j(not_equal, chars_not_equal, near_jump);
- __ incq(index);
- __ j(not_zero, &loop);
-}
-
-
-void StringCompareStub::Generate(MacroAssembler* masm) {
- Label runtime;
-
- // Stack frame on entry.
- // rsp[0]: return address
- // rsp[8]: right string
- // rsp[16]: left string
-
- __ movq(rdx, Operand(rsp, 2 * kPointerSize)); // left
- __ movq(rax, Operand(rsp, 1 * kPointerSize)); // right
-
- // Check for identity.
- Label not_same;
- __ cmpq(rdx, rax);
- __ j(not_equal, &not_same, Label::kNear);
- __ Move(rax, Smi::FromInt(EQUAL));
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->string_compare_native(), 1);
- __ ret(2 * kPointerSize);
-
- __ bind(&not_same);
-
- // Check that both are sequential ASCII strings.
- __ JumpIfNotBothSequentialAsciiStrings(rdx, rax, rcx, rbx, &runtime);
-
- // Inline comparison of ASCII strings.
- __ IncrementCounter(counters->string_compare_native(), 1);
- // Drop arguments from the stack
- __ pop(rcx);
- __ addq(rsp, Immediate(2 * kPointerSize));
- __ push(rcx);
- GenerateCompareFlatAsciiStrings(masm, rdx, rax, rcx, rbx, rdi, r8);
-
- // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
- // tagged as a small integer.
- __ bind(&runtime);
- __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
-}
-
-
-void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::SMI);
- Label miss;
- __ JumpIfNotBothSmi(rdx, rax, &miss, Label::kNear);
-
- if (GetCondition() == equal) {
- // For equality we do not care about the sign of the result.
- __ subq(rax, rdx);
- } else {
- Label done;
- __ subq(rdx, rax);
- __ j(no_overflow, &done, Label::kNear);
- // Correct sign of result in case of overflow.
- __ not_(rdx);
- __ bind(&done);
- __ movq(rax, rdx);
- }
- __ ret(0);
-
- __ bind(&miss);
- GenerateMiss(masm);
-}
-
-
-void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::NUMBER);
-
- Label generic_stub;
- Label unordered, maybe_undefined1, maybe_undefined2;
- Label miss;
-
- if (left_ == CompareIC::SMI) {
- __ JumpIfNotSmi(rdx, &miss);
- }
- if (right_ == CompareIC::SMI) {
- __ JumpIfNotSmi(rax, &miss);
- }
-
- // Load left and right operand.
- Label done, left, left_smi, right_smi;
- __ JumpIfSmi(rax, &right_smi, Label::kNear);
- __ CompareMap(rax, masm->isolate()->factory()->heap_number_map(), NULL);
- __ j(not_equal, &maybe_undefined1, Label::kNear);
- __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
- __ jmp(&left, Label::kNear);
- __ bind(&right_smi);
- __ SmiToInteger32(rcx, rax); // Can't clobber rax yet.
- __ cvtlsi2sd(xmm1, rcx);
-
- __ bind(&left);
- __ JumpIfSmi(rdx, &left_smi, Label::kNear);
- __ CompareMap(rdx, masm->isolate()->factory()->heap_number_map(), NULL);
- __ j(not_equal, &maybe_undefined2, Label::kNear);
- __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
- __ jmp(&done);
- __ bind(&left_smi);
- __ SmiToInteger32(rcx, rdx); // Can't clobber rdx yet.
- __ cvtlsi2sd(xmm0, rcx);
-
- __ bind(&done);
- // Compare operands
- __ ucomisd(xmm0, xmm1);
-
- // Don't base result on EFLAGS when a NaN is involved.
- __ j(parity_even, &unordered, Label::kNear);
-
- // Return a result of -1, 0, or 1, based on EFLAGS.
- // Performing mov, because xor would destroy the flag register.
- __ movl(rax, Immediate(0));
- __ movl(rcx, Immediate(0));
- __ setcc(above, rax); // Add one to zero if carry clear and not equal.
- __ sbbq(rax, rcx); // Subtract one if below (aka. carry set).
- __ ret(0);
-
- __ bind(&unordered);
- __ bind(&generic_stub);
- ICCompareStub stub(op_, CompareIC::GENERIC, CompareIC::GENERIC,
- CompareIC::GENERIC);
- __ jmp(stub.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
-
- __ bind(&maybe_undefined1);
- if (Token::IsOrderedRelationalCompareOp(op_)) {
- __ Cmp(rax, masm->isolate()->factory()->undefined_value());
- __ j(not_equal, &miss);
- __ JumpIfSmi(rdx, &unordered);
- __ CmpObjectType(rdx, HEAP_NUMBER_TYPE, rcx);
- __ j(not_equal, &maybe_undefined2, Label::kNear);
- __ jmp(&unordered);
- }
-
- __ bind(&maybe_undefined2);
- if (Token::IsOrderedRelationalCompareOp(op_)) {
- __ Cmp(rdx, masm->isolate()->factory()->undefined_value());
- __ j(equal, &unordered);
- }
-
- __ bind(&miss);
- GenerateMiss(masm);
-}
-
-
-void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::INTERNALIZED_STRING);
- ASSERT(GetCondition() == equal);
-
- // Registers containing left and right operands respectively.
- Register left = rdx;
- Register right = rax;
- Register tmp1 = rcx;
- Register tmp2 = rbx;
-
- // Check that both operands are heap objects.
- Label miss;
- Condition cond = masm->CheckEitherSmi(left, right, tmp1);
- __ j(cond, &miss, Label::kNear);
-
- // Check that both operands are internalized strings.
- __ movq(tmp1, FieldOperand(left, HeapObject::kMapOffset));
- __ movq(tmp2, FieldOperand(right, HeapObject::kMapOffset));
- __ movzxbq(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
- __ movzxbq(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
- STATIC_ASSERT(kInternalizedTag != 0);
- __ and_(tmp1, tmp2);
- __ testb(tmp1, Immediate(kIsInternalizedMask));
- __ j(zero, &miss, Label::kNear);
-
- // Internalized strings are compared by identity.
- Label done;
- __ cmpq(left, right);
- // Make sure rax is non-zero. At this point input operands are
- // guaranteed to be non-zero.
- ASSERT(right.is(rax));
- __ j(not_equal, &done, Label::kNear);
- STATIC_ASSERT(EQUAL == 0);
- STATIC_ASSERT(kSmiTag == 0);
- __ Move(rax, Smi::FromInt(EQUAL));
- __ bind(&done);
- __ ret(0);
-
- __ bind(&miss);
- GenerateMiss(masm);
-}
-
-
-void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::UNIQUE_NAME);
- ASSERT(GetCondition() == equal);
-
- // Registers containing left and right operands respectively.
- Register left = rdx;
- Register right = rax;
- Register tmp1 = rcx;
- Register tmp2 = rbx;
-
- // Check that both operands are heap objects.
- Label miss;
- Condition cond = masm->CheckEitherSmi(left, right, tmp1);
- __ j(cond, &miss, Label::kNear);
-
- // Check that both operands are unique names. This leaves the instance
- // types loaded in tmp1 and tmp2.
- STATIC_ASSERT(kInternalizedTag != 0);
- __ movq(tmp1, FieldOperand(left, HeapObject::kMapOffset));
- __ movq(tmp2, FieldOperand(right, HeapObject::kMapOffset));
- __ movzxbq(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
- __ movzxbq(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
-
- Label succeed1;
- __ testb(tmp1, Immediate(kIsInternalizedMask));
- __ j(not_zero, &succeed1, Label::kNear);
- __ cmpb(tmp1, Immediate(static_cast<uint8_t>(SYMBOL_TYPE)));
- __ j(not_equal, &miss, Label::kNear);
- __ bind(&succeed1);
-
- Label succeed2;
- __ testb(tmp2, Immediate(kIsInternalizedMask));
- __ j(not_zero, &succeed2, Label::kNear);
- __ cmpb(tmp2, Immediate(static_cast<uint8_t>(SYMBOL_TYPE)));
- __ j(not_equal, &miss, Label::kNear);
- __ bind(&succeed2);
-
- // Unique names are compared by identity.
- Label done;
- __ cmpq(left, right);
- // Make sure rax is non-zero. At this point input operands are
- // guaranteed to be non-zero.
- ASSERT(right.is(rax));
- __ j(not_equal, &done, Label::kNear);
- STATIC_ASSERT(EQUAL == 0);
- STATIC_ASSERT(kSmiTag == 0);
- __ Move(rax, Smi::FromInt(EQUAL));
- __ bind(&done);
- __ ret(0);
-
- __ bind(&miss);
- GenerateMiss(masm);
-}
-
-
-void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::STRING);
- Label miss;
-
- bool equality = Token::IsEqualityOp(op_);
-
- // Registers containing left and right operands respectively.
- Register left = rdx;
- Register right = rax;
- Register tmp1 = rcx;
- Register tmp2 = rbx;
- Register tmp3 = rdi;
-
- // Check that both operands are heap objects.
- Condition cond = masm->CheckEitherSmi(left, right, tmp1);
- __ j(cond, &miss);
-
- // Check that both operands are strings. This leaves the instance
- // types loaded in tmp1 and tmp2.
- __ movq(tmp1, FieldOperand(left, HeapObject::kMapOffset));
- __ movq(tmp2, FieldOperand(right, HeapObject::kMapOffset));
- __ movzxbq(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
- __ movzxbq(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
- __ movq(tmp3, tmp1);
- STATIC_ASSERT(kNotStringTag != 0);
- __ or_(tmp3, tmp2);
- __ testb(tmp3, Immediate(kIsNotStringMask));
- __ j(not_zero, &miss);
-
- // Fast check for identical strings.
- Label not_same;
- __ cmpq(left, right);
- __ j(not_equal, &not_same, Label::kNear);
- STATIC_ASSERT(EQUAL == 0);
- STATIC_ASSERT(kSmiTag == 0);
- __ Move(rax, Smi::FromInt(EQUAL));
- __ ret(0);
-
- // Handle not identical strings.
- __ bind(&not_same);
-
- // Check that both strings are internalized strings. If they are, we're done
- // because we already know they are not identical.
- if (equality) {
- Label do_compare;
- STATIC_ASSERT(kInternalizedTag != 0);
- __ and_(tmp1, tmp2);
- __ testb(tmp1, Immediate(kIsInternalizedMask));
- __ j(zero, &do_compare, Label::kNear);
- // Make sure rax is non-zero. At this point input operands are
- // guaranteed to be non-zero.
- ASSERT(right.is(rax));
- __ ret(0);
- __ bind(&do_compare);
- }
-
- // Check that both strings are sequential ASCII.
- Label runtime;
- __ JumpIfNotBothSequentialAsciiStrings(left, right, tmp1, tmp2, &runtime);
-
- // Compare flat ASCII strings. Returns when done.
- if (equality) {
- StringCompareStub::GenerateFlatAsciiStringEquals(
- masm, left, right, tmp1, tmp2);
- } else {
- StringCompareStub::GenerateCompareFlatAsciiStrings(
- masm, left, right, tmp1, tmp2, tmp3, kScratchRegister);
- }
-
- // Handle more complex cases in runtime.
- __ bind(&runtime);
- __ pop(tmp1); // Return address.
- __ push(left);
- __ push(right);
- __ push(tmp1);
- if (equality) {
- __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
- } else {
- __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
- }
-
- __ bind(&miss);
- GenerateMiss(masm);
-}
-
-
-void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::OBJECT);
- Label miss;
- Condition either_smi = masm->CheckEitherSmi(rdx, rax);
- __ j(either_smi, &miss, Label::kNear);
-
- __ CmpObjectType(rax, JS_OBJECT_TYPE, rcx);
- __ j(not_equal, &miss, Label::kNear);
- __ testb(FieldOperand(rcx, Map::kBitField2Offset),
- Immediate(1 << Map::kUseUserObjectComparison));
- __ j(not_zero, &miss, Label::kNear);
- __ CmpObjectType(rdx, JS_OBJECT_TYPE, rcx);
- __ j(not_equal, &miss, Label::kNear);
- __ testb(FieldOperand(rcx, Map::kBitField2Offset),
- Immediate(1 << Map::kUseUserObjectComparison));
- __ j(not_zero, &miss, Label::kNear);
-
- ASSERT(GetCondition() == equal);
- __ subq(rax, rdx);
- __ ret(0);
-
- __ bind(&miss);
- GenerateMiss(masm);
-}
-
-
-void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) {
- Label miss;
- Condition either_smi = masm->CheckEitherSmi(rdx, rax);
- __ j(either_smi, &miss, Label::kNear);
-
- __ movq(rcx, FieldOperand(rax, HeapObject::kMapOffset));
- __ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
- __ Cmp(rcx, known_map_);
- __ j(not_equal, &miss, Label::kNear);
- __ testb(FieldOperand(rcx, Map::kBitField2Offset),
- Immediate(1 << Map::kUseUserObjectComparison));
- __ j(not_zero, &miss, Label::kNear);
- __ Cmp(rbx, known_map_);
- __ j(not_equal, &miss, Label::kNear);
- __ testb(FieldOperand(rbx, Map::kBitField2Offset),
- Immediate(1 << Map::kUseUserObjectComparison));
- __ j(not_zero, &miss, Label::kNear);
-
- __ subq(rax, rdx);
- __ ret(0);
-
- __ bind(&miss);
- GenerateMiss(masm);
-}
-
-
-void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
- {
- // Call the runtime system in a fresh internal frame.
- ExternalReference miss =
- ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate());
-
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(rdx);
- __ push(rax);
- __ push(rdx);
- __ push(rax);
- __ Push(Smi::FromInt(op_));
- __ CallExternalReference(miss, 3);
-
- // Compute the entry point of the rewritten stub.
- __ lea(rdi, FieldOperand(rax, Code::kHeaderSize));
- __ pop(rax);
- __ pop(rdx);
- }
-
- // Do a tail call to the rewritten stub.
- __ jmp(rdi);
-}
-
-
-void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
- Label* miss,
- Label* done,
- Register properties,
- Handle<String> name,
- Register r0) {
- // If names of slots in range from 1 to kProbes - 1 for the hash value are
- // not equal to the name and kProbes-th slot is not used (its name is the
- // undefined value), it guarantees the hash table doesn't contain the
- // property. It's true even if some slots represent deleted properties
- // (their names are the hole value).
- for (int i = 0; i < kInlinedProbes; i++) {
- // r0 points to properties hash.
- // Compute the masked index: (hash + i + i * i) & mask.
- Register index = r0;
- // Capacity is smi 2^n.
- __ SmiToInteger32(index, FieldOperand(properties, kCapacityOffset));
- __ decl(index);
- __ and_(index,
- Immediate(name->Hash() + StringDictionary::GetProbeOffset(i)));
-
- // Scale the index by multiplying by the entry size.
- ASSERT(StringDictionary::kEntrySize == 3);
- __ lea(index, Operand(index, index, times_2, 0)); // index *= 3.
-
- Register entity_name = r0;
- // Having undefined at this place means the name is not contained.
- ASSERT_EQ(kSmiTagSize, 1);
- __ movq(entity_name, Operand(properties,
- index,
- times_pointer_size,
- kElementsStartOffset - kHeapObjectTag));
- __ Cmp(entity_name, masm->isolate()->factory()->undefined_value());
- __ j(equal, done);
-
- // Stop if found the property.
- __ Cmp(entity_name, Handle<String>(name));
- __ j(equal, miss);
-
- Label the_hole;
- // Check for the hole and skip.
- __ CompareRoot(entity_name, Heap::kTheHoleValueRootIndex);
- __ j(equal, &the_hole, Label::kNear);
-
- // Check if the entry name is not an internalized string.
- __ movq(entity_name, FieldOperand(entity_name, HeapObject::kMapOffset));
- __ testb(FieldOperand(entity_name, Map::kInstanceTypeOffset),
- Immediate(kIsInternalizedMask));
- __ j(zero, miss);
-
- __ bind(&the_hole);
- }
-
- StringDictionaryLookupStub stub(properties,
- r0,
- r0,
- StringDictionaryLookupStub::NEGATIVE_LOOKUP);
- __ Push(Handle<Object>(name));
- __ push(Immediate(name->Hash()));
- __ CallStub(&stub);
- __ testq(r0, r0);
- __ j(not_zero, miss);
- __ jmp(done);
-}
-
-
-// Probe the string dictionary in the |elements| register. Jump to the
-// |done| label if a property with the given name is found leaving the
-// index into the dictionary in |r1|. Jump to the |miss| label
-// otherwise.
-void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
- Label* miss,
- Label* done,
- Register elements,
- Register name,
- Register r0,
- Register r1) {
- ASSERT(!elements.is(r0));
- ASSERT(!elements.is(r1));
- ASSERT(!name.is(r0));
- ASSERT(!name.is(r1));
-
- __ AssertString(name);
-
- __ SmiToInteger32(r0, FieldOperand(elements, kCapacityOffset));
- __ decl(r0);
-
- for (int i = 0; i < kInlinedProbes; i++) {
- // Compute the masked index: (hash + i + i * i) & mask.
- __ movl(r1, FieldOperand(name, String::kHashFieldOffset));
- __ shrl(r1, Immediate(String::kHashShift));
- if (i > 0) {
- __ addl(r1, Immediate(StringDictionary::GetProbeOffset(i)));
- }
- __ and_(r1, r0);
-
- // Scale the index by multiplying by the entry size.
- ASSERT(StringDictionary::kEntrySize == 3);
- __ lea(r1, Operand(r1, r1, times_2, 0)); // r1 = r1 * 3
-
- // Check if the key is identical to the name.
- __ cmpq(name, Operand(elements, r1, times_pointer_size,
- kElementsStartOffset - kHeapObjectTag));
- __ j(equal, done);
- }
-
- StringDictionaryLookupStub stub(elements,
- r0,
- r1,
- POSITIVE_LOOKUP);
- __ push(name);
- __ movl(r0, FieldOperand(name, String::kHashFieldOffset));
- __ shrl(r0, Immediate(String::kHashShift));
- __ push(r0);
- __ CallStub(&stub);
-
- __ testq(r0, r0);
- __ j(zero, miss);
- __ jmp(done);
-}
-
-
-void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
- // This stub overrides SometimesSetsUpAFrame() to return false. That means
- // we cannot call anything that could cause a GC from this stub.
- // Stack frame on entry:
- // esp[0 * kPointerSize]: return address.
- // esp[1 * kPointerSize]: key's hash.
- // esp[2 * kPointerSize]: key.
- // Registers:
- // dictionary_: StringDictionary to probe.
- // result_: used as scratch.
- // index_: will hold an index of entry if lookup is successful.
- // might alias with result_.
- // Returns:
- // result_ is zero if lookup failed, non zero otherwise.
-
- Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
-
- Register scratch = result_;
-
- __ SmiToInteger32(scratch, FieldOperand(dictionary_, kCapacityOffset));
- __ decl(scratch);
- __ push(scratch);
-
- // If names of slots in range from 1 to kProbes - 1 for the hash value are
- // not equal to the name and kProbes-th slot is not used (its name is the
- // undefined value), it guarantees the hash table doesn't contain the
- // property. It's true even if some slots represent deleted properties
- // (their names are the null value).
- for (int i = kInlinedProbes; i < kTotalProbes; i++) {
- // Compute the masked index: (hash + i + i * i) & mask.
- __ movq(scratch, Operand(rsp, 2 * kPointerSize));
- if (i > 0) {
- __ addl(scratch, Immediate(StringDictionary::GetProbeOffset(i)));
- }
- __ and_(scratch, Operand(rsp, 0));
-
- // Scale the index by multiplying by the entry size.
- ASSERT(StringDictionary::kEntrySize == 3);
- __ lea(index_, Operand(scratch, scratch, times_2, 0)); // index *= 3.
-
- // Having undefined at this place means the name is not contained.
- __ movq(scratch, Operand(dictionary_,
- index_,
- times_pointer_size,
- kElementsStartOffset - kHeapObjectTag));
-
- __ Cmp(scratch, masm->isolate()->factory()->undefined_value());
- __ j(equal, &not_in_dictionary);
-
- // Stop if found the property.
- __ cmpq(scratch, Operand(rsp, 3 * kPointerSize));
- __ j(equal, &in_dictionary);
-
- if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) {
- // If we hit a non internalized string key during negative lookup
- // we have to bailout as this key might be equal to the
- // key we are looking for.
-
- // Check if the entry name is not an internalized string.
- __ movq(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
- __ testb(FieldOperand(scratch, Map::kInstanceTypeOffset),
- Immediate(kIsInternalizedMask));
- __ j(zero, &maybe_in_dictionary);
- }
- }
-
- __ bind(&maybe_in_dictionary);
- // If we are doing negative lookup then probing failure should be
- // treated as a lookup success. For positive lookup probing failure
- // should be treated as lookup failure.
- if (mode_ == POSITIVE_LOOKUP) {
- __ movq(scratch, Immediate(0));
- __ Drop(1);
- __ ret(2 * kPointerSize);
- }
-
- __ bind(&in_dictionary);
- __ movq(scratch, Immediate(1));
- __ Drop(1);
- __ ret(2 * kPointerSize);
-
- __ bind(&not_in_dictionary);
- __ movq(scratch, Immediate(0));
- __ Drop(1);
- __ ret(2 * kPointerSize);
-}
-
-
-struct AheadOfTimeWriteBarrierStubList {
- Register object, value, address;
- RememberedSetAction action;
-};
-
-
-#define REG(Name) { kRegister_ ## Name ## _Code }
-
-struct AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
- // Used in RegExpExecStub.
- { REG(rbx), REG(rax), REG(rdi), EMIT_REMEMBERED_SET },
- // Used in CompileArrayPushCall.
- { REG(rbx), REG(rcx), REG(rdx), EMIT_REMEMBERED_SET },
- // Used in CompileStoreGlobal.
- { REG(rbx), REG(rcx), REG(rdx), OMIT_REMEMBERED_SET },
- // Used in StoreStubCompiler::CompileStoreField and
- // KeyedStoreStubCompiler::CompileStoreField via GenerateStoreField.
- { REG(rdx), REG(rcx), REG(rbx), EMIT_REMEMBERED_SET },
- // GenerateStoreField calls the stub with two different permutations of
- // registers. This is the second.
- { REG(rbx), REG(rcx), REG(rdx), EMIT_REMEMBERED_SET },
- // StoreIC::GenerateNormal via GenerateDictionaryStore.
- { REG(rbx), REG(r8), REG(r9), EMIT_REMEMBERED_SET },
- // KeyedStoreIC::GenerateGeneric.
- { REG(rbx), REG(rdx), REG(rcx), EMIT_REMEMBERED_SET},
- // KeyedStoreStubCompiler::GenerateStoreFastElement.
- { REG(rdi), REG(rbx), REG(rcx), EMIT_REMEMBERED_SET},
- { REG(rdx), REG(rdi), REG(rbx), EMIT_REMEMBERED_SET},
- // ElementsTransitionGenerator::GenerateMapChangeElementTransition
- // and ElementsTransitionGenerator::GenerateSmiToDouble
- // and ElementsTransitionGenerator::GenerateDoubleToObject
- { REG(rdx), REG(rbx), REG(rdi), EMIT_REMEMBERED_SET},
- { REG(rdx), REG(rbx), REG(rdi), OMIT_REMEMBERED_SET},
- // ElementsTransitionGenerator::GenerateSmiToDouble
- // and ElementsTransitionGenerator::GenerateDoubleToObject
- { REG(rdx), REG(r11), REG(r15), EMIT_REMEMBERED_SET},
- // ElementsTransitionGenerator::GenerateDoubleToObject
- { REG(r11), REG(rax), REG(r15), EMIT_REMEMBERED_SET},
- // StoreArrayLiteralElementStub::Generate
- { REG(rbx), REG(rax), REG(rcx), EMIT_REMEMBERED_SET},
- // FastNewClosureStub::Generate
- { REG(rcx), REG(rdx), REG(rbx), EMIT_REMEMBERED_SET},
- // Null termination.
- { REG(no_reg), REG(no_reg), REG(no_reg), EMIT_REMEMBERED_SET}
-};
-
-#undef REG
-
-bool RecordWriteStub::IsPregenerated() {
- for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
- !entry->object.is(no_reg);
- entry++) {
- if (object_.is(entry->object) &&
- value_.is(entry->value) &&
- address_.is(entry->address) &&
- remembered_set_action_ == entry->action &&
- save_fp_regs_mode_ == kDontSaveFPRegs) {
- return true;
- }
- }
- return false;
-}
-
-
-void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
- Isolate* isolate) {
- StoreBufferOverflowStub stub1(kDontSaveFPRegs);
- stub1.GetCode(isolate)->set_is_pregenerated(true);
- StoreBufferOverflowStub stub2(kSaveFPRegs);
- stub2.GetCode(isolate)->set_is_pregenerated(true);
-}
-
-
-void RecordWriteStub::GenerateFixedRegStubsAheadOfTime(Isolate* isolate) {
- for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
- !entry->object.is(no_reg);
- entry++) {
- RecordWriteStub stub(entry->object,
- entry->value,
- entry->address,
- entry->action,
- kDontSaveFPRegs);
- stub.GetCode(isolate)->set_is_pregenerated(true);
- }
-}
-
-
-bool CodeStub::CanUseFPRegisters() {
- return true; // Always have SSE2 on x64.
-}
-
-
-// Takes the input in 3 registers: address_ value_ and object_. A pointer to
-// the value has just been written into the object, now this stub makes sure
-// we keep the GC informed. The word in the object where the value has been
-// written is in the address register.
-void RecordWriteStub::Generate(MacroAssembler* masm) {
- Label skip_to_incremental_noncompacting;
- Label skip_to_incremental_compacting;
-
- // The first two instructions are generated with labels so as to get the
- // offset fixed up correctly by the bind(Label*) call. We patch it back and
- // forth between a compare instructions (a nop in this position) and the
- // real branch when we start and stop incremental heap marking.
- // See RecordWriteStub::Patch for details.
- __ jmp(&skip_to_incremental_noncompacting, Label::kNear);
- __ jmp(&skip_to_incremental_compacting, Label::kFar);
-
- if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
- __ RememberedSetHelper(object_,
- address_,
- value_,
- save_fp_regs_mode_,
- MacroAssembler::kReturnAtEnd);
- } else {
- __ ret(0);
- }
-
- __ bind(&skip_to_incremental_noncompacting);
- GenerateIncremental(masm, INCREMENTAL);
-
- __ bind(&skip_to_incremental_compacting);
- GenerateIncremental(masm, INCREMENTAL_COMPACTION);
-
- // Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
- // Will be checked in IncrementalMarking::ActivateGeneratedStub.
- masm->set_byte_at(0, kTwoByteNopInstruction);
- masm->set_byte_at(2, kFiveByteNopInstruction);
-}
-
-
-void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
- regs_.Save(masm);
-
- if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
- Label dont_need_remembered_set;
-
- __ movq(regs_.scratch0(), Operand(regs_.address(), 0));
- __ JumpIfNotInNewSpace(regs_.scratch0(),
- regs_.scratch0(),
- &dont_need_remembered_set);
-
- __ CheckPageFlag(regs_.object(),
- regs_.scratch0(),
- 1 << MemoryChunk::SCAN_ON_SCAVENGE,
- not_zero,
- &dont_need_remembered_set);
-
- // First notify the incremental marker if necessary, then update the
- // remembered set.
- CheckNeedsToInformIncrementalMarker(
- masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
- InformIncrementalMarker(masm, mode);
- regs_.Restore(masm);
- __ RememberedSetHelper(object_,
- address_,
- value_,
- save_fp_regs_mode_,
- MacroAssembler::kReturnAtEnd);
-
- __ bind(&dont_need_remembered_set);
- }
-
- CheckNeedsToInformIncrementalMarker(
- masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
- InformIncrementalMarker(masm, mode);
- regs_.Restore(masm);
- __ ret(0);
-}
-
-
-void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
- regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
-#ifdef _WIN64
- Register arg3 = r8;
- Register arg2 = rdx;
- Register arg1 = rcx;
-#else
- Register arg3 = rdx;
- Register arg2 = rsi;
- Register arg1 = rdi;
-#endif
- Register address =
- arg1.is(regs_.address()) ? kScratchRegister : regs_.address();
- ASSERT(!address.is(regs_.object()));
- ASSERT(!address.is(arg1));
- __ Move(address, regs_.address());
- __ Move(arg1, regs_.object());
- // TODO(gc) Can we just set address arg2 in the beginning?
- __ Move(arg2, address);
- __ LoadAddress(arg3, ExternalReference::isolate_address());
- int argument_count = 3;
-
- AllowExternalCallThatCantCauseGC scope(masm);
- __ PrepareCallCFunction(argument_count);
- if (mode == INCREMENTAL_COMPACTION) {
- __ CallCFunction(
- ExternalReference::incremental_evacuation_record_write_function(
- masm->isolate()),
- argument_count);
- } else {
- ASSERT(mode == INCREMENTAL);
- __ CallCFunction(
- ExternalReference::incremental_marking_record_write_function(
- masm->isolate()),
- argument_count);
- }
- regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_);
-}
-
-
-void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
- MacroAssembler* masm,
- OnNoNeedToInformIncrementalMarker on_no_need,
- Mode mode) {
- Label on_black;
- Label need_incremental;
- Label need_incremental_pop_object;
-
- __ movq(regs_.scratch0(), Immediate(~Page::kPageAlignmentMask));
- __ and_(regs_.scratch0(), regs_.object());
- __ movq(regs_.scratch1(),
- Operand(regs_.scratch0(),
- MemoryChunk::kWriteBarrierCounterOffset));
- __ subq(regs_.scratch1(), Immediate(1));
- __ movq(Operand(regs_.scratch0(),
- MemoryChunk::kWriteBarrierCounterOffset),
- regs_.scratch1());
- __ j(negative, &need_incremental);
-
- // Let's look at the color of the object: If it is not black we don't have
- // to inform the incremental marker.
- __ JumpIfBlack(regs_.object(),
- regs_.scratch0(),
- regs_.scratch1(),
- &on_black,
- Label::kNear);
-
- regs_.Restore(masm);
- if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
- __ RememberedSetHelper(object_,
- address_,
- value_,
- save_fp_regs_mode_,
- MacroAssembler::kReturnAtEnd);
- } else {
- __ ret(0);
- }
-
- __ bind(&on_black);
-
- // Get the value from the slot.
- __ movq(regs_.scratch0(), Operand(regs_.address(), 0));
-
- if (mode == INCREMENTAL_COMPACTION) {
- Label ensure_not_white;
-
- __ CheckPageFlag(regs_.scratch0(), // Contains value.
- regs_.scratch1(), // Scratch.
- MemoryChunk::kEvacuationCandidateMask,
- zero,
- &ensure_not_white,
- Label::kNear);
-
- __ CheckPageFlag(regs_.object(),
- regs_.scratch1(), // Scratch.
- MemoryChunk::kSkipEvacuationSlotsRecordingMask,
- zero,
- &need_incremental);
-
- __ bind(&ensure_not_white);
- }
-
- // We need an extra register for this, so we push the object register
- // temporarily.
- __ push(regs_.object());
- __ EnsureNotWhite(regs_.scratch0(), // The value.
- regs_.scratch1(), // Scratch.
- regs_.object(), // Scratch.
- &need_incremental_pop_object,
- Label::kNear);
- __ pop(regs_.object());
-
- regs_.Restore(masm);
- if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
- __ RememberedSetHelper(object_,
- address_,
- value_,
- save_fp_regs_mode_,
- MacroAssembler::kReturnAtEnd);
- } else {
- __ ret(0);
- }
-
- __ bind(&need_incremental_pop_object);
- __ pop(regs_.object());
-
- __ bind(&need_incremental);
-
- // Fall through when we need to inform the incremental marker.
-}
-
-
-void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rax : element value to store
- // -- rbx : array literal
- // -- rdi : map of array literal
- // -- rcx : element index as smi
- // -- rdx : array literal index in function
- // -- rsp[0] : return address
- // -----------------------------------
-
- Label element_done;
- Label double_elements;
- Label smi_element;
- Label slow_elements;
- Label fast_elements;
-
- __ CheckFastElements(rdi, &double_elements);
-
- // FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS
- __ JumpIfSmi(rax, &smi_element);
- __ CheckFastSmiElements(rdi, &fast_elements);
-
- // Store into the array literal requires a elements transition. Call into
- // the runtime.
-
- __ bind(&slow_elements);
- __ pop(rdi); // Pop return address and remember to put back later for tail
- // call.
- __ push(rbx);
- __ push(rcx);
- __ push(rax);
- __ movq(rbx, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- __ push(FieldOperand(rbx, JSFunction::kLiteralsOffset));
- __ push(rdx);
- __ push(rdi); // Return return address so that tail call returns to right
- // place.
- __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
-
- // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object.
- __ bind(&fast_elements);
- __ SmiToInteger32(kScratchRegister, rcx);
- __ movq(rbx, FieldOperand(rbx, JSObject::kElementsOffset));
- __ lea(rcx, FieldOperand(rbx, kScratchRegister, times_pointer_size,
- FixedArrayBase::kHeaderSize));
- __ movq(Operand(rcx, 0), rax);
- // Update the write barrier for the array store.
- __ RecordWrite(rbx, rcx, rax,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- __ ret(0);
-
- // Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or
- // FAST_*_ELEMENTS, and value is Smi.
- __ bind(&smi_element);
- __ SmiToInteger32(kScratchRegister, rcx);
- __ movq(rbx, FieldOperand(rbx, JSObject::kElementsOffset));
- __ movq(FieldOperand(rbx, kScratchRegister, times_pointer_size,
- FixedArrayBase::kHeaderSize), rax);
- __ ret(0);
-
- // Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS.
- __ bind(&double_elements);
-
- __ movq(r9, FieldOperand(rbx, JSObject::kElementsOffset));
- __ SmiToInteger32(r11, rcx);
- __ StoreNumberToDoubleElements(rax,
- r9,
- r11,
- xmm0,
- &slow_elements);
- __ ret(0);
-}
-
-
-void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
- ASSERT(!Serializer::enabled());
- CEntryStub ces(1, kSaveFPRegs);
- __ Call(ces.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
- int parameter_count_offset =
- StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
- __ movq(rbx, MemOperand(rbp, parameter_count_offset));
- masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
- __ pop(rcx);
- __ lea(rsp, MemOperand(rsp, rbx, times_pointer_size,
- extra_expression_stack_count_ * kPointerSize));
- __ jmp(rcx); // Return to IC Miss stub, continuation still on stack.
-}
-
-
-void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
- if (entry_hook_ != NULL) {
- ProfileEntryHookStub stub;
- masm->CallStub(&stub);
- }
-}
-
-
-void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
- // Save volatile registers.
- // Live registers at this point are the same as at the start of any
- // JS function:
- // o rdi: the JS function object being called (i.e. ourselves)
- // o rsi: our context
- // o rbp: our caller's frame pointer
- // o rsp: stack pointer (pointing to return address)
- // o rcx: rcx is zero for method calls and non-zero for function calls.
-#ifdef _WIN64
- const int kNumSavedRegisters = 1;
-
- __ push(rcx);
-#else
- const int kNumSavedRegisters = 3;
-
- __ push(rcx);
- __ push(rdi);
- __ push(rsi);
-#endif
-
- // Calculate the original stack pointer and store it in the second arg.
-#ifdef _WIN64
- __ lea(rdx, Operand(rsp, kNumSavedRegisters * kPointerSize));
-#else
- __ lea(rsi, Operand(rsp, kNumSavedRegisters * kPointerSize));
-#endif
-
- // Calculate the function address to the first arg.
-#ifdef _WIN64
- __ movq(rcx, Operand(rdx, 0));
- __ subq(rcx, Immediate(Assembler::kShortCallInstructionLength));
-#else
- __ movq(rdi, Operand(rsi, 0));
- __ subq(rdi, Immediate(Assembler::kShortCallInstructionLength));
-#endif
-
- // Call the entry hook function.
- __ movq(rax, &entry_hook_, RelocInfo::NONE64);
- __ movq(rax, Operand(rax, 0));
-
- AllowExternalCallThatCantCauseGC scope(masm);
-
- const int kArgumentCount = 2;
- __ PrepareCallCFunction(kArgumentCount);
- __ CallCFunction(rax, kArgumentCount);
-
- // Restore volatile regs.
-#ifdef _WIN64
- __ pop(rcx);
-#else
- __ pop(rsi);
- __ pop(rdi);
- __ pop(rcx);
-#endif
-
- __ Ret();
-}
-
-#undef __
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_X64
diff --git a/src/3rdparty/v8/src/x64/code-stubs-x64.h b/src/3rdparty/v8/src/x64/code-stubs-x64.h
deleted file mode 100644
index 675e95b..0000000
--- a/src/3rdparty/v8/src/x64/code-stubs-x64.h
+++ /dev/null
@@ -1,623 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_X64_CODE_STUBS_X64_H_
-#define V8_X64_CODE_STUBS_X64_H_
-
-#include "ic-inl.h"
-#include "type-info.h"
-
-namespace v8 {
-namespace internal {
-
-
-// Compute a transcendental math function natively, or call the
-// TranscendentalCache runtime function.
-class TranscendentalCacheStub: public PlatformCodeStub {
- public:
- enum ArgumentType {
- TAGGED = 0,
- UNTAGGED = 1 << TranscendentalCache::kTranscendentalTypeBits
- };
-
- explicit TranscendentalCacheStub(TranscendentalCache::Type type,
- ArgumentType argument_type)
- : type_(type), argument_type_(argument_type) {}
- void Generate(MacroAssembler* masm);
- static void GenerateOperation(MacroAssembler* masm,
- TranscendentalCache::Type type);
- private:
- TranscendentalCache::Type type_;
- ArgumentType argument_type_;
-
- Major MajorKey() { return TranscendentalCache; }
- int MinorKey() { return type_ | argument_type_; }
- Runtime::FunctionId RuntimeFunction();
-};
-
-
-class StoreBufferOverflowStub: public PlatformCodeStub {
- public:
- explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp)
- : save_doubles_(save_fp) { }
-
- void Generate(MacroAssembler* masm);
-
- virtual bool IsPregenerated() { return true; }
- static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
- virtual bool SometimesSetsUpAFrame() { return false; }
-
- private:
- SaveFPRegsMode save_doubles_;
-
- Major MajorKey() { return StoreBufferOverflow; }
- int MinorKey() { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; }
-};
-
-
-class UnaryOpStub: public PlatformCodeStub {
- public:
- UnaryOpStub(Token::Value op,
- UnaryOverwriteMode mode,
- UnaryOpIC::TypeInfo operand_type = UnaryOpIC::UNINITIALIZED)
- : op_(op),
- mode_(mode),
- operand_type_(operand_type) {
- }
-
- private:
- Token::Value op_;
- UnaryOverwriteMode mode_;
-
- // Operand type information determined at runtime.
- UnaryOpIC::TypeInfo operand_type_;
-
- virtual void PrintName(StringStream* stream);
-
- class ModeBits: public BitField<UnaryOverwriteMode, 0, 1> {};
- class OpBits: public BitField<Token::Value, 1, 7> {};
- class OperandTypeInfoBits: public BitField<UnaryOpIC::TypeInfo, 8, 3> {};
-
- Major MajorKey() { return UnaryOp; }
- int MinorKey() {
- return ModeBits::encode(mode_)
- | OpBits::encode(op_)
- | OperandTypeInfoBits::encode(operand_type_);
- }
-
- // Note: A lot of the helper functions below will vanish when we use virtual
- // function instead of switch more often.
- void Generate(MacroAssembler* masm);
-
- void GenerateTypeTransition(MacroAssembler* masm);
-
- void GenerateSmiStub(MacroAssembler* masm);
- void GenerateSmiStubSub(MacroAssembler* masm);
- void GenerateSmiStubBitNot(MacroAssembler* masm);
- void GenerateSmiCodeSub(MacroAssembler* masm,
- Label* non_smi,
- Label* slow,
- Label::Distance non_smi_near = Label::kFar,
- Label::Distance slow_near = Label::kFar);
- void GenerateSmiCodeBitNot(MacroAssembler* masm,
- Label* non_smi,
- Label::Distance non_smi_near);
-
- void GenerateNumberStub(MacroAssembler* masm);
- void GenerateNumberStubSub(MacroAssembler* masm);
- void GenerateNumberStubBitNot(MacroAssembler* masm);
- void GenerateHeapNumberCodeSub(MacroAssembler* masm, Label* slow);
- void GenerateHeapNumberCodeBitNot(MacroAssembler* masm, Label* slow);
-
- void GenerateGenericStub(MacroAssembler* masm);
- void GenerateGenericStubSub(MacroAssembler* masm);
- void GenerateGenericStubBitNot(MacroAssembler* masm);
- void GenerateGenericCodeFallback(MacroAssembler* masm);
-
- virtual int GetCodeKind() { return Code::UNARY_OP_IC; }
-
- virtual InlineCacheState GetICState() {
- return UnaryOpIC::ToState(operand_type_);
- }
-
- virtual void FinishCode(Handle<Code> code) {
- code->set_unary_op_type(operand_type_);
- }
-};
-
-
-class StringHelper : public AllStatic {
- public:
- // Generate code for copying characters using a simple loop. This should only
- // be used in places where the number of characters is small and the
- // additional setup and checking in GenerateCopyCharactersREP adds too much
- // overhead. Copying of overlapping regions is not supported.
- static void GenerateCopyCharacters(MacroAssembler* masm,
- Register dest,
- Register src,
- Register count,
- bool ascii);
-
- // Generate code for copying characters using the rep movs instruction.
- // Copies rcx characters from rsi to rdi. Copying of overlapping regions is
- // not supported.
- static void GenerateCopyCharactersREP(MacroAssembler* masm,
- Register dest, // Must be rdi.
- Register src, // Must be rsi.
- Register count, // Must be rcx.
- bool ascii);
-
-
- // Probe the string table for a two character string. If the string is
- // not found by probing a jump to the label not_found is performed. This jump
- // does not guarantee that the string is not in the string table. If the
- // string is found the code falls through with the string in register rax.
- static void GenerateTwoCharacterStringTableProbe(MacroAssembler* masm,
- Register c1,
- Register c2,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Label* not_found);
-
- // Generate string hash.
- static void GenerateHashInit(MacroAssembler* masm,
- Register hash,
- Register character,
- Register scratch);
- static void GenerateHashAddCharacter(MacroAssembler* masm,
- Register hash,
- Register character,
- Register scratch);
- static void GenerateHashGetHash(MacroAssembler* masm,
- Register hash,
- Register scratch);
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
-};
-
-
-// Flag that indicates how to generate code for the stub StringAddStub.
-enum StringAddFlags {
- NO_STRING_ADD_FLAGS = 0,
- // Omit left string check in stub (left is definitely a string).
- NO_STRING_CHECK_LEFT_IN_STUB = 1 << 0,
- // Omit right string check in stub (right is definitely a string).
- NO_STRING_CHECK_RIGHT_IN_STUB = 1 << 1,
- // Omit both string checks in stub.
- NO_STRING_CHECK_IN_STUB =
- NO_STRING_CHECK_LEFT_IN_STUB | NO_STRING_CHECK_RIGHT_IN_STUB
-};
-
-
-class StringAddStub: public PlatformCodeStub {
- public:
- explicit StringAddStub(StringAddFlags flags) : flags_(flags) {}
-
- private:
- Major MajorKey() { return StringAdd; }
- int MinorKey() { return flags_; }
-
- void Generate(MacroAssembler* masm);
-
- void GenerateConvertArgument(MacroAssembler* masm,
- int stack_offset,
- Register arg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* slow);
-
- const StringAddFlags flags_;
-};
-
-
-class SubStringStub: public PlatformCodeStub {
- public:
- SubStringStub() {}
-
- private:
- Major MajorKey() { return SubString; }
- int MinorKey() { return 0; }
-
- void Generate(MacroAssembler* masm);
-};
-
-
-class StringCompareStub: public PlatformCodeStub {
- public:
- StringCompareStub() {}
-
- // Compares two flat ASCII strings and returns result in rax.
- static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
- Register left,
- Register right,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4);
-
- // Compares two flat ASCII strings for equality and returns result
- // in rax.
- static void GenerateFlatAsciiStringEquals(MacroAssembler* masm,
- Register left,
- Register right,
- Register scratch1,
- Register scratch2);
-
- private:
- virtual Major MajorKey() { return StringCompare; }
- virtual int MinorKey() { return 0; }
- virtual void Generate(MacroAssembler* masm);
-
- static void GenerateAsciiCharsCompareLoop(
- MacroAssembler* masm,
- Register left,
- Register right,
- Register length,
- Register scratch,
- Label* chars_not_equal,
- Label::Distance near_jump = Label::kFar);
-};
-
-
-class NumberToStringStub: public PlatformCodeStub {
- public:
- NumberToStringStub() { }
-
- // Generate code to do a lookup in the number string cache. If the number in
- // the register object is found in the cache the generated code falls through
- // with the result in the result register. The object and the result register
- // can be the same. If the number is not found in the cache the code jumps to
- // the label not_found with only the content of register object unchanged.
- static void GenerateLookupNumberStringCache(MacroAssembler* masm,
- Register object,
- Register result,
- Register scratch1,
- Register scratch2,
- bool object_is_smi,
- Label* not_found);
-
- private:
- static void GenerateConvertHashCodeToIndex(MacroAssembler* masm,
- Register hash,
- Register mask);
-
- Major MajorKey() { return NumberToString; }
- int MinorKey() { return 0; }
-
- void Generate(MacroAssembler* masm);
-};
-
-
-class StringDictionaryLookupStub: public PlatformCodeStub {
- public:
- enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
-
- StringDictionaryLookupStub(Register dictionary,
- Register result,
- Register index,
- LookupMode mode)
- : dictionary_(dictionary), result_(result), index_(index), mode_(mode) { }
-
- void Generate(MacroAssembler* masm);
-
- static void GenerateNegativeLookup(MacroAssembler* masm,
- Label* miss,
- Label* done,
- Register properties,
- Handle<String> name,
- Register r0);
-
- static void GeneratePositiveLookup(MacroAssembler* masm,
- Label* miss,
- Label* done,
- Register elements,
- Register name,
- Register r0,
- Register r1);
-
- virtual bool SometimesSetsUpAFrame() { return false; }
-
- private:
- static const int kInlinedProbes = 4;
- static const int kTotalProbes = 20;
-
- static const int kCapacityOffset =
- StringDictionary::kHeaderSize +
- StringDictionary::kCapacityIndex * kPointerSize;
-
- static const int kElementsStartOffset =
- StringDictionary::kHeaderSize +
- StringDictionary::kElementsStartIndex * kPointerSize;
-
- Major MajorKey() { return StringDictionaryLookup; }
-
- int MinorKey() {
- return DictionaryBits::encode(dictionary_.code()) |
- ResultBits::encode(result_.code()) |
- IndexBits::encode(index_.code()) |
- LookupModeBits::encode(mode_);
- }
-
- class DictionaryBits: public BitField<int, 0, 4> {};
- class ResultBits: public BitField<int, 4, 4> {};
- class IndexBits: public BitField<int, 8, 4> {};
- class LookupModeBits: public BitField<LookupMode, 12, 1> {};
-
- Register dictionary_;
- Register result_;
- Register index_;
- LookupMode mode_;
-};
-
-
-class RecordWriteStub: public PlatformCodeStub {
- public:
- RecordWriteStub(Register object,
- Register value,
- Register address,
- RememberedSetAction remembered_set_action,
- SaveFPRegsMode fp_mode)
- : object_(object),
- value_(value),
- address_(address),
- remembered_set_action_(remembered_set_action),
- save_fp_regs_mode_(fp_mode),
- regs_(object, // An input reg.
- address, // An input reg.
- value) { // One scratch reg.
- }
-
- enum Mode {
- STORE_BUFFER_ONLY,
- INCREMENTAL,
- INCREMENTAL_COMPACTION
- };
-
- virtual bool IsPregenerated();
- static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
- virtual bool SometimesSetsUpAFrame() { return false; }
-
- static const byte kTwoByteNopInstruction = 0x3c; // Cmpb al, #imm8.
- static const byte kTwoByteJumpInstruction = 0xeb; // Jmp #imm8.
-
- static const byte kFiveByteNopInstruction = 0x3d; // Cmpl eax, #imm32.
- static const byte kFiveByteJumpInstruction = 0xe9; // Jmp #imm32.
-
- static Mode GetMode(Code* stub) {
- byte first_instruction = stub->instruction_start()[0];
- byte second_instruction = stub->instruction_start()[2];
-
- if (first_instruction == kTwoByteJumpInstruction) {
- return INCREMENTAL;
- }
-
- ASSERT(first_instruction == kTwoByteNopInstruction);
-
- if (second_instruction == kFiveByteJumpInstruction) {
- return INCREMENTAL_COMPACTION;
- }
-
- ASSERT(second_instruction == kFiveByteNopInstruction);
-
- return STORE_BUFFER_ONLY;
- }
-
- static void Patch(Code* stub, Mode mode) {
- switch (mode) {
- case STORE_BUFFER_ONLY:
- ASSERT(GetMode(stub) == INCREMENTAL ||
- GetMode(stub) == INCREMENTAL_COMPACTION);
- stub->instruction_start()[0] = kTwoByteNopInstruction;
- stub->instruction_start()[2] = kFiveByteNopInstruction;
- break;
- case INCREMENTAL:
- ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
- stub->instruction_start()[0] = kTwoByteJumpInstruction;
- break;
- case INCREMENTAL_COMPACTION:
- ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
- stub->instruction_start()[0] = kTwoByteNopInstruction;
- stub->instruction_start()[2] = kFiveByteJumpInstruction;
- break;
- }
- ASSERT(GetMode(stub) == mode);
- CPU::FlushICache(stub->instruction_start(), 7);
- }
-
- private:
- // This is a helper class for freeing up 3 scratch registers, where the third
- // is always rcx (needed for shift operations). The input is two registers
- // that must be preserved and one scratch register provided by the caller.
- class RegisterAllocation {
- public:
- RegisterAllocation(Register object,
- Register address,
- Register scratch0)
- : object_orig_(object),
- address_orig_(address),
- scratch0_orig_(scratch0),
- object_(object),
- address_(address),
- scratch0_(scratch0) {
- ASSERT(!AreAliased(scratch0, object, address, no_reg));
- scratch1_ = GetRegThatIsNotRcxOr(object_, address_, scratch0_);
- if (scratch0.is(rcx)) {
- scratch0_ = GetRegThatIsNotRcxOr(object_, address_, scratch1_);
- }
- if (object.is(rcx)) {
- object_ = GetRegThatIsNotRcxOr(address_, scratch0_, scratch1_);
- }
- if (address.is(rcx)) {
- address_ = GetRegThatIsNotRcxOr(object_, scratch0_, scratch1_);
- }
- ASSERT(!AreAliased(scratch0_, object_, address_, rcx));
- }
-
- void Save(MacroAssembler* masm) {
- ASSERT(!address_orig_.is(object_));
- ASSERT(object_.is(object_orig_) || address_.is(address_orig_));
- ASSERT(!AreAliased(object_, address_, scratch1_, scratch0_));
- ASSERT(!AreAliased(object_orig_, address_, scratch1_, scratch0_));
- ASSERT(!AreAliased(object_, address_orig_, scratch1_, scratch0_));
- // We don't have to save scratch0_orig_ because it was given to us as
- // a scratch register. But if we had to switch to a different reg then
- // we should save the new scratch0_.
- if (!scratch0_.is(scratch0_orig_)) masm->push(scratch0_);
- if (!rcx.is(scratch0_orig_) &&
- !rcx.is(object_orig_) &&
- !rcx.is(address_orig_)) {
- masm->push(rcx);
- }
- masm->push(scratch1_);
- if (!address_.is(address_orig_)) {
- masm->push(address_);
- masm->movq(address_, address_orig_);
- }
- if (!object_.is(object_orig_)) {
- masm->push(object_);
- masm->movq(object_, object_orig_);
- }
- }
-
- void Restore(MacroAssembler* masm) {
- // These will have been preserved the entire time, so we just need to move
- // them back. Only in one case is the orig_ reg different from the plain
- // one, since only one of them can alias with rcx.
- if (!object_.is(object_orig_)) {
- masm->movq(object_orig_, object_);
- masm->pop(object_);
- }
- if (!address_.is(address_orig_)) {
- masm->movq(address_orig_, address_);
- masm->pop(address_);
- }
- masm->pop(scratch1_);
- if (!rcx.is(scratch0_orig_) &&
- !rcx.is(object_orig_) &&
- !rcx.is(address_orig_)) {
- masm->pop(rcx);
- }
- if (!scratch0_.is(scratch0_orig_)) masm->pop(scratch0_);
- }
-
- // If we have to call into C then we need to save and restore all caller-
- // saved registers that were not already preserved.
-
- // The three scratch registers (incl. rcx) will be restored by other means
- // so we don't bother pushing them here. Rbx, rbp and r12-15 are callee
- // save and don't need to be preserved.
- void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) {
- masm->PushCallerSaved(mode, scratch0_, scratch1_, rcx);
- }
-
- inline void RestoreCallerSaveRegisters(MacroAssembler*masm,
- SaveFPRegsMode mode) {
- masm->PopCallerSaved(mode, scratch0_, scratch1_, rcx);
- }
-
- inline Register object() { return object_; }
- inline Register address() { return address_; }
- inline Register scratch0() { return scratch0_; }
- inline Register scratch1() { return scratch1_; }
-
- private:
- Register object_orig_;
- Register address_orig_;
- Register scratch0_orig_;
- Register object_;
- Register address_;
- Register scratch0_;
- Register scratch1_;
- // Third scratch register is always rcx.
-
- Register GetRegThatIsNotRcxOr(Register r1,
- Register r2,
- Register r3) {
- for (int i = 0; i < Register::NumAllocatableRegisters(); i++) {
- Register candidate = Register::FromAllocationIndex(i);
- if (candidate.is(rcx)) continue;
- if (candidate.is(r1)) continue;
- if (candidate.is(r2)) continue;
- if (candidate.is(r3)) continue;
- return candidate;
- }
- UNREACHABLE();
- return no_reg;
- }
- friend class RecordWriteStub;
- };
-
- enum OnNoNeedToInformIncrementalMarker {
- kReturnOnNoNeedToInformIncrementalMarker,
- kUpdateRememberedSetOnNoNeedToInformIncrementalMarker
- };
-
- void Generate(MacroAssembler* masm);
- void GenerateIncremental(MacroAssembler* masm, Mode mode);
- void CheckNeedsToInformIncrementalMarker(
- MacroAssembler* masm,
- OnNoNeedToInformIncrementalMarker on_no_need,
- Mode mode);
- void InformIncrementalMarker(MacroAssembler* masm, Mode mode);
-
- Major MajorKey() { return RecordWrite; }
-
- int MinorKey() {
- return ObjectBits::encode(object_.code()) |
- ValueBits::encode(value_.code()) |
- AddressBits::encode(address_.code()) |
- RememberedSetActionBits::encode(remembered_set_action_) |
- SaveFPRegsModeBits::encode(save_fp_regs_mode_);
- }
-
- void Activate(Code* code) {
- code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
- }
-
- class ObjectBits: public BitField<int, 0, 4> {};
- class ValueBits: public BitField<int, 4, 4> {};
- class AddressBits: public BitField<int, 8, 4> {};
- class RememberedSetActionBits: public BitField<RememberedSetAction, 12, 1> {};
- class SaveFPRegsModeBits: public BitField<SaveFPRegsMode, 13, 1> {};
-
- Register object_;
- Register value_;
- Register address_;
- RememberedSetAction remembered_set_action_;
- SaveFPRegsMode save_fp_regs_mode_;
- Label slow_;
- RegisterAllocation regs_;
-};
-
-
-} } // namespace v8::internal
-
-#endif // V8_X64_CODE_STUBS_X64_H_
diff --git a/src/3rdparty/v8/src/x64/codegen-x64.cc b/src/3rdparty/v8/src/x64/codegen-x64.cc
deleted file mode 100644
index fa8b44a..0000000
--- a/src/3rdparty/v8/src/x64/codegen-x64.cc
+++ /dev/null
@@ -1,785 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_X64)
-
-#include "codegen.h"
-#include "macro-assembler.h"
-
-namespace v8 {
-namespace internal {
-
-// -------------------------------------------------------------------------
-// Platform-specific RuntimeCallHelper functions.
-
-void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
- masm->EnterFrame(StackFrame::INTERNAL);
- ASSERT(!masm->has_frame());
- masm->set_has_frame(true);
-}
-
-
-void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
- masm->LeaveFrame(StackFrame::INTERNAL);
- ASSERT(masm->has_frame());
- masm->set_has_frame(false);
-}
-
-
-#define __ masm.
-
-
-UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type) {
- size_t actual_size;
- // Allocate buffer in executable space.
- byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB,
- &actual_size,
- true));
- if (buffer == NULL) {
- // Fallback to library function if function cannot be created.
- switch (type) {
- case TranscendentalCache::SIN: return &sin;
- case TranscendentalCache::COS: return &cos;
- case TranscendentalCache::TAN: return &tan;
- case TranscendentalCache::LOG: return &log;
- default: UNIMPLEMENTED();
- }
- }
-
- MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
- // xmm0: raw double input.
- // Move double input into registers.
- __ push(rbx);
- __ push(rdi);
- __ movq(rbx, xmm0);
- __ push(rbx);
- __ fld_d(Operand(rsp, 0));
- TranscendentalCacheStub::GenerateOperation(&masm, type);
- // The return value is expected to be in xmm0.
- __ fstp_d(Operand(rsp, 0));
- __ pop(rbx);
- __ movq(xmm0, rbx);
- __ pop(rdi);
- __ pop(rbx);
- __ Ret();
-
- CodeDesc desc;
- masm.GetCode(&desc);
- ASSERT(!RelocInfo::RequiresRelocation(desc));
-
- CPU::FlushICache(buffer, actual_size);
- OS::ProtectCode(buffer, actual_size);
- return FUNCTION_CAST<UnaryMathFunction>(buffer);
-}
-
-
-UnaryMathFunction CreateExpFunction() {
- if (!FLAG_fast_math) return &exp;
- size_t actual_size;
- byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
- if (buffer == NULL) return &exp;
- ExternalReference::InitializeMathExpData();
-
- MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
- // xmm0: raw double input.
- XMMRegister input = xmm0;
- XMMRegister result = xmm1;
- __ push(rax);
- __ push(rbx);
-
- MathExpGenerator::EmitMathExp(&masm, input, result, xmm2, rax, rbx);
-
- __ pop(rbx);
- __ pop(rax);
- __ movsd(xmm0, result);
- __ Ret();
-
- CodeDesc desc;
- masm.GetCode(&desc);
- ASSERT(!RelocInfo::RequiresRelocation(desc));
-
- CPU::FlushICache(buffer, actual_size);
- OS::ProtectCode(buffer, actual_size);
- return FUNCTION_CAST<UnaryMathFunction>(buffer);
-}
-
-
-UnaryMathFunction CreateSqrtFunction() {
- size_t actual_size;
- // Allocate buffer in executable space.
- byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB,
- &actual_size,
- true));
- if (buffer == NULL) return &sqrt;
-
- MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
- // xmm0: raw double input.
- // Move double input into registers.
- __ sqrtsd(xmm0, xmm0);
- __ Ret();
-
- CodeDesc desc;
- masm.GetCode(&desc);
- ASSERT(!RelocInfo::RequiresRelocation(desc));
-
- CPU::FlushICache(buffer, actual_size);
- OS::ProtectCode(buffer, actual_size);
- return FUNCTION_CAST<UnaryMathFunction>(buffer);
-}
-
-
-#ifdef _WIN64
-typedef double (*ModuloFunction)(double, double);
-// Define custom fmod implementation.
-ModuloFunction CreateModuloFunction() {
- size_t actual_size;
- byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
- &actual_size,
- true));
- CHECK(buffer);
- Assembler masm(NULL, buffer, static_cast<int>(actual_size));
- // Generated code is put into a fixed, unmovable, buffer, and not into
- // the V8 heap. We can't, and don't, refer to any relocatable addresses
- // (e.g. the JavaScript nan-object).
-
- // Windows 64 ABI passes double arguments in xmm0, xmm1 and
- // returns result in xmm0.
- // Argument backing space is allocated on the stack above
- // the return address.
-
- // Compute x mod y.
- // Load y and x (use argument backing store as temporary storage).
- __ movsd(Operand(rsp, kPointerSize * 2), xmm1);
- __ movsd(Operand(rsp, kPointerSize), xmm0);
- __ fld_d(Operand(rsp, kPointerSize * 2));
- __ fld_d(Operand(rsp, kPointerSize));
-
- // Clear exception flags before operation.
- {
- Label no_exceptions;
- __ fwait();
- __ fnstsw_ax();
- // Clear if Illegal Operand or Zero Division exceptions are set.
- __ testb(rax, Immediate(5));
- __ j(zero, &no_exceptions);
- __ fnclex();
- __ bind(&no_exceptions);
- }
-
- // Compute st(0) % st(1)
- {
- Label partial_remainder_loop;
- __ bind(&partial_remainder_loop);
- __ fprem();
- __ fwait();
- __ fnstsw_ax();
- __ testl(rax, Immediate(0x400 /* C2 */));
- // If C2 is set, computation only has partial result. Loop to
- // continue computation.
- __ j(not_zero, &partial_remainder_loop);
- }
-
- Label valid_result;
- Label return_result;
- // If Invalid Operand or Zero Division exceptions are set,
- // return NaN.
- __ testb(rax, Immediate(5));
- __ j(zero, &valid_result);
- __ fstp(0); // Drop result in st(0).
- int64_t kNaNValue = V8_INT64_C(0x7ff8000000000000);
- __ movq(rcx, kNaNValue, RelocInfo::NONE64);
- __ movq(Operand(rsp, kPointerSize), rcx);
- __ movsd(xmm0, Operand(rsp, kPointerSize));
- __ jmp(&return_result);
-
- // If result is valid, return that.
- __ bind(&valid_result);
- __ fstp_d(Operand(rsp, kPointerSize));
- __ movsd(xmm0, Operand(rsp, kPointerSize));
-
- // Clean up FPU stack and exceptions and return xmm0
- __ bind(&return_result);
- __ fstp(0); // Unload y.
-
- Label clear_exceptions;
- __ testb(rax, Immediate(0x3f /* Any Exception*/));
- __ j(not_zero, &clear_exceptions);
- __ ret(0);
- __ bind(&clear_exceptions);
- __ fnclex();
- __ ret(0);
-
- CodeDesc desc;
- masm.GetCode(&desc);
- OS::ProtectCode(buffer, actual_size);
- // Call the function from C++ through this pointer.
- return FUNCTION_CAST<ModuloFunction>(buffer);
-}
-
-#endif
-
-#undef __
-
-// -------------------------------------------------------------------------
-// Code generators
-
-#define __ ACCESS_MASM(masm)
-
-void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
- MacroAssembler* masm, AllocationSiteMode mode,
- Label* allocation_site_info_found) {
- // ----------- S t a t e -------------
- // -- rax : value
- // -- rbx : target map
- // -- rcx : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- if (mode == TRACK_ALLOCATION_SITE) {
- ASSERT(allocation_site_info_found != NULL);
- __ TestJSArrayForAllocationSiteInfo(rdx, rdi);
- __ j(equal, allocation_site_info_found);
- }
-
- // Set transitioned map.
- __ movq(FieldOperand(rdx, HeapObject::kMapOffset), rbx);
- __ RecordWriteField(rdx,
- HeapObject::kMapOffset,
- rbx,
- rdi,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
-}
-
-
-void ElementsTransitionGenerator::GenerateSmiToDouble(
- MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
- // ----------- S t a t e -------------
- // -- rax : value
- // -- rbx : target map
- // -- rcx : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- // The fail label is not actually used since we do not allocate.
- Label allocated, new_backing_store, only_change_map, done;
-
- if (mode == TRACK_ALLOCATION_SITE) {
- __ TestJSArrayForAllocationSiteInfo(rdx, rdi);
- __ j(equal, fail);
- }
-
- // Check for empty arrays, which only require a map transition and no changes
- // to the backing store.
- __ movq(r8, FieldOperand(rdx, JSObject::kElementsOffset));
- __ CompareRoot(r8, Heap::kEmptyFixedArrayRootIndex);
- __ j(equal, &only_change_map);
-
- // Check backing store for COW-ness. For COW arrays we have to
- // allocate a new backing store.
- __ SmiToInteger32(r9, FieldOperand(r8, FixedDoubleArray::kLengthOffset));
- __ CompareRoot(FieldOperand(r8, HeapObject::kMapOffset),
- Heap::kFixedCOWArrayMapRootIndex);
- __ j(equal, &new_backing_store);
- // Check if the backing store is in new-space. If not, we need to allocate
- // a new one since the old one is in pointer-space.
- // If in new space, we can reuse the old backing store because it is
- // the same size.
- __ JumpIfNotInNewSpace(r8, rdi, &new_backing_store);
-
- __ movq(r14, r8); // Destination array equals source array.
-
- // r8 : source FixedArray
- // r9 : elements array length
- // r14: destination FixedDoubleArray
- // Set backing store's map
- __ LoadRoot(rdi, Heap::kFixedDoubleArrayMapRootIndex);
- __ movq(FieldOperand(r14, HeapObject::kMapOffset), rdi);
-
- __ bind(&allocated);
- // Set transitioned map.
- __ movq(FieldOperand(rdx, HeapObject::kMapOffset), rbx);
- __ RecordWriteField(rdx,
- HeapObject::kMapOffset,
- rbx,
- rdi,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
-
- // Convert smis to doubles and holes to hole NaNs. The Array's length
- // remains unchanged.
- STATIC_ASSERT(FixedDoubleArray::kLengthOffset == FixedArray::kLengthOffset);
- STATIC_ASSERT(FixedDoubleArray::kHeaderSize == FixedArray::kHeaderSize);
-
- Label loop, entry, convert_hole;
- __ movq(r15, BitCast<int64_t, uint64_t>(kHoleNanInt64), RelocInfo::NONE64);
- // r15: the-hole NaN
- __ jmp(&entry);
-
- // Allocate new backing store.
- __ bind(&new_backing_store);
- __ lea(rdi, Operand(r9, times_pointer_size, FixedArray::kHeaderSize));
- __ AllocateInNewSpace(rdi, r14, r11, r15, fail, TAG_OBJECT);
- // Set backing store's map
- __ LoadRoot(rdi, Heap::kFixedDoubleArrayMapRootIndex);
- __ movq(FieldOperand(r14, HeapObject::kMapOffset), rdi);
- // Set receiver's backing store.
- __ movq(FieldOperand(rdx, JSObject::kElementsOffset), r14);
- __ movq(r11, r14);
- __ RecordWriteField(rdx,
- JSObject::kElementsOffset,
- r11,
- r15,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- // Set backing store's length.
- __ Integer32ToSmi(r11, r9);
- __ movq(FieldOperand(r14, FixedDoubleArray::kLengthOffset), r11);
- __ jmp(&allocated);
-
- __ bind(&only_change_map);
- // Set transitioned map.
- __ movq(FieldOperand(rdx, HeapObject::kMapOffset), rbx);
- __ RecordWriteField(rdx,
- HeapObject::kMapOffset,
- rbx,
- rdi,
- kDontSaveFPRegs,
- OMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- __ jmp(&done);
-
- // Conversion loop.
- __ bind(&loop);
- __ movq(rbx,
- FieldOperand(r8, r9, times_8, FixedArray::kHeaderSize));
- // r9 : current element's index
- // rbx: current element (smi-tagged)
- __ JumpIfNotSmi(rbx, &convert_hole);
- __ SmiToInteger32(rbx, rbx);
- __ cvtlsi2sd(xmm0, rbx);
- __ movsd(FieldOperand(r14, r9, times_8, FixedDoubleArray::kHeaderSize),
- xmm0);
- __ jmp(&entry);
- __ bind(&convert_hole);
-
- if (FLAG_debug_code) {
- __ CompareRoot(rbx, Heap::kTheHoleValueRootIndex);
- __ Assert(equal, "object found in smi-only array");
- }
-
- __ movq(FieldOperand(r14, r9, times_8, FixedDoubleArray::kHeaderSize), r15);
- __ bind(&entry);
- __ decq(r9);
- __ j(not_sign, &loop);
-
- __ bind(&done);
-}
-
-
-void ElementsTransitionGenerator::GenerateDoubleToObject(
- MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
- // ----------- S t a t e -------------
- // -- rax : value
- // -- rbx : target map
- // -- rcx : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- Label loop, entry, convert_hole, gc_required, only_change_map;
-
- if (mode == TRACK_ALLOCATION_SITE) {
- __ TestJSArrayForAllocationSiteInfo(rdx, rdi);
- __ j(equal, fail);
- }
-
- // Check for empty arrays, which only require a map transition and no changes
- // to the backing store.
- __ movq(r8, FieldOperand(rdx, JSObject::kElementsOffset));
- __ CompareRoot(r8, Heap::kEmptyFixedArrayRootIndex);
- __ j(equal, &only_change_map);
-
- __ push(rax);
-
- __ movq(r8, FieldOperand(rdx, JSObject::kElementsOffset));
- __ SmiToInteger32(r9, FieldOperand(r8, FixedDoubleArray::kLengthOffset));
- // r8 : source FixedDoubleArray
- // r9 : number of elements
- __ lea(rdi, Operand(r9, times_pointer_size, FixedArray::kHeaderSize));
- __ AllocateInNewSpace(rdi, r11, r14, r15, &gc_required, TAG_OBJECT);
- // r11: destination FixedArray
- __ LoadRoot(rdi, Heap::kFixedArrayMapRootIndex);
- __ movq(FieldOperand(r11, HeapObject::kMapOffset), rdi);
- __ Integer32ToSmi(r14, r9);
- __ movq(FieldOperand(r11, FixedArray::kLengthOffset), r14);
-
- // Prepare for conversion loop.
- __ movq(rsi, BitCast<int64_t, uint64_t>(kHoleNanInt64), RelocInfo::NONE64);
- __ LoadRoot(rdi, Heap::kTheHoleValueRootIndex);
- // rsi: the-hole NaN
- // rdi: pointer to the-hole
- __ jmp(&entry);
-
- // Call into runtime if GC is required.
- __ bind(&gc_required);
- __ pop(rax);
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- __ jmp(fail);
-
- // Box doubles into heap numbers.
- __ bind(&loop);
- __ movq(r14, FieldOperand(r8,
- r9,
- times_pointer_size,
- FixedDoubleArray::kHeaderSize));
- // r9 : current element's index
- // r14: current element
- __ cmpq(r14, rsi);
- __ j(equal, &convert_hole);
-
- // Non-hole double, copy value into a heap number.
- __ AllocateHeapNumber(rax, r15, &gc_required);
- // rax: new heap number
- __ movq(FieldOperand(rax, HeapNumber::kValueOffset), r14);
- __ movq(FieldOperand(r11,
- r9,
- times_pointer_size,
- FixedArray::kHeaderSize),
- rax);
- __ movq(r15, r9);
- __ RecordWriteArray(r11,
- rax,
- r15,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- __ jmp(&entry, Label::kNear);
-
- // Replace the-hole NaN with the-hole pointer.
- __ bind(&convert_hole);
- __ movq(FieldOperand(r11,
- r9,
- times_pointer_size,
- FixedArray::kHeaderSize),
- rdi);
-
- __ bind(&entry);
- __ decq(r9);
- __ j(not_sign, &loop);
-
- // Replace receiver's backing store with newly created and filled FixedArray.
- __ movq(FieldOperand(rdx, JSObject::kElementsOffset), r11);
- __ RecordWriteField(rdx,
- JSObject::kElementsOffset,
- r11,
- r15,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- __ pop(rax);
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
-
- __ bind(&only_change_map);
- // Set transitioned map.
- __ movq(FieldOperand(rdx, HeapObject::kMapOffset), rbx);
- __ RecordWriteField(rdx,
- HeapObject::kMapOffset,
- rbx,
- rdi,
- kDontSaveFPRegs,
- OMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
-}
-
-
-void StringCharLoadGenerator::Generate(MacroAssembler* masm,
- Register string,
- Register index,
- Register result,
- Label* call_runtime) {
- // Fetch the instance type of the receiver into result register.
- __ movq(result, FieldOperand(string, HeapObject::kMapOffset));
- __ movzxbl(result, FieldOperand(result, Map::kInstanceTypeOffset));
-
- // We need special handling for indirect strings.
- Label check_sequential;
- __ testb(result, Immediate(kIsIndirectStringMask));
- __ j(zero, &check_sequential, Label::kNear);
-
- // Dispatch on the indirect string shape: slice or cons.
- Label cons_string;
- __ testb(result, Immediate(kSlicedNotConsMask));
- __ j(zero, &cons_string, Label::kNear);
-
- // Handle slices.
- Label indirect_string_loaded;
- __ SmiToInteger32(result, FieldOperand(string, SlicedString::kOffsetOffset));
- __ addq(index, result);
- __ movq(string, FieldOperand(string, SlicedString::kParentOffset));
- __ jmp(&indirect_string_loaded, Label::kNear);
-
- // Handle cons strings.
- // Check whether the right hand side is the empty string (i.e. if
- // this is really a flat string in a cons string). If that is not
- // the case we would rather go to the runtime system now to flatten
- // the string.
- __ bind(&cons_string);
- __ CompareRoot(FieldOperand(string, ConsString::kSecondOffset),
- Heap::kempty_stringRootIndex);
- __ j(not_equal, call_runtime);
- __ movq(string, FieldOperand(string, ConsString::kFirstOffset));
-
- __ bind(&indirect_string_loaded);
- __ movq(result, FieldOperand(string, HeapObject::kMapOffset));
- __ movzxbl(result, FieldOperand(result, Map::kInstanceTypeOffset));
-
- // Distinguish sequential and external strings. Only these two string
- // representations can reach here (slices and flat cons strings have been
- // reduced to the underlying sequential or external string).
- Label seq_string;
- __ bind(&check_sequential);
- STATIC_ASSERT(kSeqStringTag == 0);
- __ testb(result, Immediate(kStringRepresentationMask));
- __ j(zero, &seq_string, Label::kNear);
-
- // Handle external strings.
- Label ascii_external, done;
- if (FLAG_debug_code) {
- // Assert that we do not have a cons or slice (indirect strings) here.
- // Sequential strings have already been ruled out.
- __ testb(result, Immediate(kIsIndirectStringMask));
- __ Assert(zero, "external string expected, but not found");
- }
- // Rule out short external strings.
- STATIC_CHECK(kShortExternalStringTag != 0);
- __ testb(result, Immediate(kShortExternalStringTag));
- __ j(not_zero, call_runtime);
- // Check encoding.
- STATIC_ASSERT(kTwoByteStringTag == 0);
- __ testb(result, Immediate(kStringEncodingMask));
- __ movq(result, FieldOperand(string, ExternalString::kResourceDataOffset));
- __ j(not_equal, &ascii_external, Label::kNear);
- // Two-byte string.
- __ movzxwl(result, Operand(result, index, times_2, 0));
- __ jmp(&done, Label::kNear);
- __ bind(&ascii_external);
- // Ascii string.
- __ movzxbl(result, Operand(result, index, times_1, 0));
- __ jmp(&done, Label::kNear);
-
- // Dispatch on the encoding: ASCII or two-byte.
- Label ascii;
- __ bind(&seq_string);
- STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
- STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
- __ testb(result, Immediate(kStringEncodingMask));
- __ j(not_zero, &ascii, Label::kNear);
-
- // Two-byte string.
- // Load the two-byte character code into the result register.
- STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
- __ movzxwl(result, FieldOperand(string,
- index,
- times_2,
- SeqTwoByteString::kHeaderSize));
- __ jmp(&done, Label::kNear);
-
- // ASCII string.
- // Load the byte into the result register.
- __ bind(&ascii);
- __ movzxbl(result, FieldOperand(string,
- index,
- times_1,
- SeqOneByteString::kHeaderSize));
- __ bind(&done);
-}
-
-
-void SeqStringSetCharGenerator::Generate(MacroAssembler* masm,
- String::Encoding encoding,
- Register string,
- Register index,
- Register value) {
- if (FLAG_debug_code) {
- __ Check(masm->CheckSmi(index), "Non-smi index");
- __ Check(masm->CheckSmi(value), "Non-smi value");
-
- __ SmiCompare(index, FieldOperand(string, String::kLengthOffset));
- __ Check(less, "Index is too large");
-
- __ SmiCompare(index, Smi::FromInt(0));
- __ Check(greater_equal, "Index is negative");
-
- __ push(value);
- __ movq(value, FieldOperand(string, HeapObject::kMapOffset));
- __ movzxbq(value, FieldOperand(value, Map::kInstanceTypeOffset));
-
- __ andb(value, Immediate(kStringRepresentationMask | kStringEncodingMask));
- static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
- static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
- __ cmpq(value, Immediate(encoding == String::ONE_BYTE_ENCODING
- ? one_byte_seq_type : two_byte_seq_type));
- __ Check(equal, "Unexpected string type");
- __ pop(value);
- }
-
- __ SmiToInteger32(value, value);
- __ SmiToInteger32(index, index);
- if (encoding == String::ONE_BYTE_ENCODING) {
- __ movb(FieldOperand(string, index, times_1, SeqString::kHeaderSize),
- value);
- } else {
- __ movw(FieldOperand(string, index, times_2, SeqString::kHeaderSize),
- value);
- }
-}
-
-
-void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
- XMMRegister input,
- XMMRegister result,
- XMMRegister double_scratch,
- Register temp1,
- Register temp2) {
- ASSERT(!input.is(result));
- ASSERT(!input.is(double_scratch));
- ASSERT(!result.is(double_scratch));
- ASSERT(!temp1.is(temp2));
- ASSERT(ExternalReference::math_exp_constants(0).address() != NULL);
-
- Label done;
-
- __ movq(kScratchRegister, ExternalReference::math_exp_constants(0));
- __ movsd(double_scratch, Operand(kScratchRegister, 0 * kDoubleSize));
- __ xorpd(result, result);
- __ ucomisd(double_scratch, input);
- __ j(above_equal, &done);
- __ ucomisd(input, Operand(kScratchRegister, 1 * kDoubleSize));
- __ movsd(result, Operand(kScratchRegister, 2 * kDoubleSize));
- __ j(above_equal, &done);
- __ movsd(double_scratch, Operand(kScratchRegister, 3 * kDoubleSize));
- __ movsd(result, Operand(kScratchRegister, 4 * kDoubleSize));
- __ mulsd(double_scratch, input);
- __ addsd(double_scratch, result);
- __ movq(temp2, double_scratch);
- __ subsd(double_scratch, result);
- __ movsd(result, Operand(kScratchRegister, 6 * kDoubleSize));
- __ lea(temp1, Operand(temp2, 0x1ff800));
- __ and_(temp2, Immediate(0x7ff));
- __ shr(temp1, Immediate(11));
- __ mulsd(double_scratch, Operand(kScratchRegister, 5 * kDoubleSize));
- __ movq(kScratchRegister, ExternalReference::math_exp_log_table());
- __ shl(temp1, Immediate(52));
- __ or_(temp1, Operand(kScratchRegister, temp2, times_8, 0));
- __ movq(kScratchRegister, ExternalReference::math_exp_constants(0));
- __ subsd(double_scratch, input);
- __ movsd(input, double_scratch);
- __ subsd(result, double_scratch);
- __ mulsd(input, double_scratch);
- __ mulsd(result, input);
- __ movq(input, temp1);
- __ mulsd(result, Operand(kScratchRegister, 7 * kDoubleSize));
- __ subsd(result, double_scratch);
- __ addsd(result, Operand(kScratchRegister, 8 * kDoubleSize));
- __ mulsd(result, input);
-
- __ bind(&done);
-}
-
-#undef __
-
-
-static const int kNoCodeAgeSequenceLength = 6;
-
-static byte* GetNoCodeAgeSequence(uint32_t* length) {
- static bool initialized = false;
- static byte sequence[kNoCodeAgeSequenceLength];
- *length = kNoCodeAgeSequenceLength;
- if (!initialized) {
- // The sequence of instructions that is patched out for aging code is the
- // following boilerplate stack-building prologue that is found both in
- // FUNCTION and OPTIMIZED_FUNCTION code:
- CodePatcher patcher(sequence, kNoCodeAgeSequenceLength);
- patcher.masm()->push(rbp);
- patcher.masm()->movq(rbp, rsp);
- patcher.masm()->push(rsi);
- patcher.masm()->push(rdi);
- initialized = true;
- }
- return sequence;
-}
-
-
-bool Code::IsYoungSequence(byte* sequence) {
- uint32_t young_length;
- byte* young_sequence = GetNoCodeAgeSequence(&young_length);
- bool result = (!memcmp(sequence, young_sequence, young_length));
- ASSERT(result || *sequence == kCallOpcode);
- return result;
-}
-
-
-void Code::GetCodeAgeAndParity(byte* sequence, Age* age,
- MarkingParity* parity) {
- if (IsYoungSequence(sequence)) {
- *age = kNoAge;
- *parity = NO_MARKING_PARITY;
- } else {
- sequence++; // Skip the kCallOpcode byte
- Address target_address = sequence + *reinterpret_cast<int*>(sequence) +
- Assembler::kCallTargetAddressOffset;
- Code* stub = GetCodeFromTargetAddress(target_address);
- GetCodeAgeAndParity(stub, age, parity);
- }
-}
-
-
-void Code::PatchPlatformCodeAge(byte* sequence,
- Code::Age age,
- MarkingParity parity) {
- uint32_t young_length;
- byte* young_sequence = GetNoCodeAgeSequence(&young_length);
- if (age == kNoAge) {
- memcpy(sequence, young_sequence, young_length);
- CPU::FlushICache(sequence, young_length);
- } else {
- Code* stub = GetCodeAgeStub(age, parity);
- CodePatcher patcher(sequence, young_length);
- patcher.masm()->call(stub->instruction_start());
- patcher.masm()->nop();
- }
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_X64
diff --git a/src/3rdparty/v8/src/x64/codegen-x64.h b/src/3rdparty/v8/src/x64/codegen-x64.h
deleted file mode 100644
index 3a7646b..0000000
--- a/src/3rdparty/v8/src/x64/codegen-x64.h
+++ /dev/null
@@ -1,108 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_X64_CODEGEN_X64_H_
-#define V8_X64_CODEGEN_X64_H_
-
-#include "ast.h"
-#include "ic-inl.h"
-
-namespace v8 {
-namespace internal {
-
-// Forward declarations
-class CompilationInfo;
-
-enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
-
-// -------------------------------------------------------------------------
-// CodeGenerator
-
-class CodeGenerator: public AstVisitor {
- public:
- CodeGenerator() {
- InitializeAstVisitor();
- }
-
- static bool MakeCode(CompilationInfo* info);
-
- // Printing of AST, etc. as requested by flags.
- static void MakeCodePrologue(CompilationInfo* info);
-
- // Allocate and install the code.
- static Handle<Code> MakeCodeEpilogue(MacroAssembler* masm,
- Code::Flags flags,
- CompilationInfo* info);
-
- // Print the code after compiling it.
- static void PrintCode(Handle<Code> code, CompilationInfo* info);
-
- static bool ShouldGenerateLog(Expression* type);
-
- static bool RecordPositions(MacroAssembler* masm,
- int pos,
- bool right_here = false);
-
- DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
-
- private:
- DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
-};
-
-
-class StringCharLoadGenerator : public AllStatic {
- public:
- // Generates the code for handling different string types and loading the
- // indexed character into |result|. We expect |index| as untagged input and
- // |result| as untagged output.
- static void Generate(MacroAssembler* masm,
- Register string,
- Register index,
- Register result,
- Label* call_runtime);
-
- private:
- DISALLOW_COPY_AND_ASSIGN(StringCharLoadGenerator);
-};
-
-
-class MathExpGenerator : public AllStatic {
- public:
- static void EmitMathExp(MacroAssembler* masm,
- XMMRegister input,
- XMMRegister result,
- XMMRegister double_scratch,
- Register temp1,
- Register temp2);
-
- private:
- DISALLOW_COPY_AND_ASSIGN(MathExpGenerator);
-};
-
-} } // namespace v8::internal
-
-#endif // V8_X64_CODEGEN_X64_H_
diff --git a/src/3rdparty/v8/src/x64/cpu-x64.cc b/src/3rdparty/v8/src/x64/cpu-x64.cc
deleted file mode 100644
index 80e22c6..0000000
--- a/src/3rdparty/v8/src/x64/cpu-x64.cc
+++ /dev/null
@@ -1,89 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// CPU specific code for x64 independent of OS goes here.
-
-#if defined(__GNUC__) && !defined(__MINGW64__)
-#include "third_party/valgrind/valgrind.h"
-#endif
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_X64)
-
-#include "cpu.h"
-#include "macro-assembler.h"
-
-namespace v8 {
-namespace internal {
-
-void CPU::SetUp() {
- CpuFeatures::Probe();
-}
-
-
-bool CPU::SupportsCrankshaft() {
- return true; // Yay!
-}
-
-
-void CPU::FlushICache(void* start, size_t size) {
- // No need to flush the instruction cache on Intel. On Intel instruction
- // cache flushing is only necessary when multiple cores running the same
- // code simultaneously. V8 (and JavaScript) is single threaded and when code
- // is patched on an intel CPU the core performing the patching will have its
- // own instruction cache updated automatically.
-
- // If flushing of the instruction cache becomes necessary Windows has the
- // API function FlushInstructionCache.
-
- // By default, valgrind only checks the stack for writes that might need to
- // invalidate already cached translated code. This leads to random
- // instability when code patches or moves are sometimes unnoticed. One
- // solution is to run valgrind with --smc-check=all, but this comes at a big
- // performance cost. We can notify valgrind to invalidate its cache.
-#ifdef VALGRIND_DISCARD_TRANSLATIONS
- unsigned res = VALGRIND_DISCARD_TRANSLATIONS(start, size);
- USE(res);
-#endif
-}
-
-
-void CPU::DebugBreak() {
-#ifdef _MSC_VER
- // To avoid Visual Studio runtime support the following code can be used
- // instead
- // __asm { int 3 }
- __debugbreak();
-#else
- asm("int $3");
-#endif
-}
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_X64
diff --git a/src/3rdparty/v8/src/x64/debug-x64.cc b/src/3rdparty/v8/src/x64/debug-x64.cc
deleted file mode 100644
index 1b29e58..0000000
--- a/src/3rdparty/v8/src/x64/debug-x64.cc
+++ /dev/null
@@ -1,354 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_X64)
-
-#include "assembler.h"
-#include "codegen.h"
-#include "debug.h"
-
-
-namespace v8 {
-namespace internal {
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-
-bool BreakLocationIterator::IsDebugBreakAtReturn() {
- return Debug::IsDebugBreakAtReturn(rinfo());
-}
-
-
-// Patch the JS frame exit code with a debug break call. See
-// CodeGenerator::VisitReturnStatement and VirtualFrame::Exit in codegen-x64.cc
-// for the precise return instructions sequence.
-void BreakLocationIterator::SetDebugBreakAtReturn() {
- ASSERT(Assembler::kJSReturnSequenceLength >=
- Assembler::kCallInstructionLength);
- rinfo()->PatchCodeWithCall(
- Isolate::Current()->debug()->debug_break_return()->entry(),
- Assembler::kJSReturnSequenceLength - Assembler::kCallInstructionLength);
-}
-
-
-// Restore the JS frame exit code.
-void BreakLocationIterator::ClearDebugBreakAtReturn() {
- rinfo()->PatchCode(original_rinfo()->pc(),
- Assembler::kJSReturnSequenceLength);
-}
-
-
-// A debug break in the frame exit code is identified by the JS frame exit code
-// having been patched with a call instruction.
-bool Debug::IsDebugBreakAtReturn(v8::internal::RelocInfo* rinfo) {
- ASSERT(RelocInfo::IsJSReturn(rinfo->rmode()));
- return rinfo->IsPatchedReturnSequence();
-}
-
-
-bool BreakLocationIterator::IsDebugBreakAtSlot() {
- ASSERT(IsDebugBreakSlot());
- // Check whether the debug break slot instructions have been patched.
- return !Assembler::IsNop(rinfo()->pc());
-}
-
-
-void BreakLocationIterator::SetDebugBreakAtSlot() {
- ASSERT(IsDebugBreakSlot());
- rinfo()->PatchCodeWithCall(
- Isolate::Current()->debug()->debug_break_slot()->entry(),
- Assembler::kDebugBreakSlotLength - Assembler::kCallInstructionLength);
-}
-
-
-void BreakLocationIterator::ClearDebugBreakAtSlot() {
- ASSERT(IsDebugBreakSlot());
- rinfo()->PatchCode(original_rinfo()->pc(), Assembler::kDebugBreakSlotLength);
-}
-
-const bool Debug::FramePaddingLayout::kIsSupported = true;
-
-
-#define __ ACCESS_MASM(masm)
-
-
-static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
- RegList object_regs,
- RegList non_object_regs,
- bool convert_call_to_jmp) {
- // Enter an internal frame.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Load padding words on stack.
- for (int i = 0; i < Debug::FramePaddingLayout::kInitialSize; i++) {
- __ Push(Smi::FromInt(Debug::FramePaddingLayout::kPaddingValue));
- }
- __ Push(Smi::FromInt(Debug::FramePaddingLayout::kInitialSize));
-
- // Store the registers containing live values on the expression stack to
- // make sure that these are correctly updated during GC. Non object values
- // are stored as as two smis causing it to be untouched by GC.
- ASSERT((object_regs & ~kJSCallerSaved) == 0);
- ASSERT((non_object_regs & ~kJSCallerSaved) == 0);
- ASSERT((object_regs & non_object_regs) == 0);
- for (int i = 0; i < kNumJSCallerSaved; i++) {
- int r = JSCallerSavedCode(i);
- Register reg = { r };
- ASSERT(!reg.is(kScratchRegister));
- if ((object_regs & (1 << r)) != 0) {
- __ push(reg);
- }
- // Store the 64-bit value as two smis.
- if ((non_object_regs & (1 << r)) != 0) {
- __ movq(kScratchRegister, reg);
- __ Integer32ToSmi(reg, reg);
- __ push(reg);
- __ sar(kScratchRegister, Immediate(32));
- __ Integer32ToSmi(kScratchRegister, kScratchRegister);
- __ push(kScratchRegister);
- }
- }
-
-#ifdef DEBUG
- __ RecordComment("// Calling from debug break to runtime - come in - over");
-#endif
- __ Set(rax, 0); // No arguments (argc == 0).
- __ movq(rbx, ExternalReference::debug_break(masm->isolate()));
-
- CEntryStub ceb(1);
- __ CallStub(&ceb);
-
- // Restore the register values from the expression stack.
- for (int i = kNumJSCallerSaved - 1; i >= 0; i--) {
- int r = JSCallerSavedCode(i);
- Register reg = { r };
- if (FLAG_debug_code) {
- __ Set(reg, kDebugZapValue);
- }
- if ((object_regs & (1 << r)) != 0) {
- __ pop(reg);
- }
- // Reconstruct the 64-bit value from two smis.
- if ((non_object_regs & (1 << r)) != 0) {
- __ pop(kScratchRegister);
- __ SmiToInteger32(kScratchRegister, kScratchRegister);
- __ shl(kScratchRegister, Immediate(32));
- __ pop(reg);
- __ SmiToInteger32(reg, reg);
- __ or_(reg, kScratchRegister);
- }
- }
-
- // Read current padding counter and skip corresponding number of words.
- __ pop(kScratchRegister);
- __ SmiToInteger32(kScratchRegister, kScratchRegister);
- __ lea(rsp, Operand(rsp, kScratchRegister, times_pointer_size, 0));
-
- // Get rid of the internal frame.
- }
-
- // If this call did not replace a call but patched other code then there will
- // be an unwanted return address left on the stack. Here we get rid of that.
- if (convert_call_to_jmp) {
- __ addq(rsp, Immediate(kPointerSize));
- }
-
- // Now that the break point has been handled, resume normal execution by
- // jumping to the target address intended by the caller and that was
- // overwritten by the address of DebugBreakXXX.
- ExternalReference after_break_target =
- ExternalReference(Debug_Address::AfterBreakTarget(), masm->isolate());
- __ movq(kScratchRegister, after_break_target);
- __ jmp(Operand(kScratchRegister, 0));
-}
-
-
-void Debug::GenerateLoadICDebugBreak(MacroAssembler* masm) {
- // Register state for IC load call (from ic-x64.cc).
- // ----------- S t a t e -------------
- // -- rax : receiver
- // -- rcx : name
- // -----------------------------------
- Generate_DebugBreakCallHelper(masm, rax.bit() | rcx.bit(), 0, false);
-}
-
-
-void Debug::GenerateStoreICDebugBreak(MacroAssembler* masm) {
- // Register state for IC store call (from ic-x64.cc).
- // ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : name
- // -- rdx : receiver
- // -----------------------------------
- Generate_DebugBreakCallHelper(
- masm, rax.bit() | rcx.bit() | rdx.bit(), 0, false);
-}
-
-
-void Debug::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
- // Register state for keyed IC load call (from ic-x64.cc).
- // ----------- S t a t e -------------
- // -- rax : key
- // -- rdx : receiver
- // -----------------------------------
- Generate_DebugBreakCallHelper(masm, rax.bit() | rdx.bit(), 0, false);
-}
-
-
-void Debug::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
- // Register state for keyed IC load call (from ic-x64.cc).
- // ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : key
- // -- rdx : receiver
- // -----------------------------------
- Generate_DebugBreakCallHelper(
- masm, rax.bit() | rcx.bit() | rdx.bit(), 0, false);
-}
-
-
-void Debug::GenerateCallICDebugBreak(MacroAssembler* masm) {
- // Register state for IC call call (from ic-x64.cc)
- // ----------- S t a t e -------------
- // -- rcx: function name
- // -----------------------------------
- Generate_DebugBreakCallHelper(masm, rcx.bit(), 0, false);
-}
-
-
-void Debug::GenerateReturnDebugBreak(MacroAssembler* masm) {
- // Register state just before return from JS function (from codegen-x64.cc).
- // ----------- S t a t e -------------
- // -- rax: return value
- // -----------------------------------
- Generate_DebugBreakCallHelper(masm, rax.bit(), 0, true);
-}
-
-
-void Debug::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) {
- // Register state for CallFunctionStub (from code-stubs-x64.cc).
- // ----------- S t a t e -------------
- // -- rdi : function
- // -----------------------------------
- Generate_DebugBreakCallHelper(masm, rdi.bit(), 0, false);
-}
-
-
-void Debug::GenerateCallFunctionStubRecordDebugBreak(MacroAssembler* masm) {
- // Register state for CallFunctionStub (from code-stubs-x64.cc).
- // ----------- S t a t e -------------
- // -- rdi : function
- // -- rbx: cache cell for call target
- // -----------------------------------
- Generate_DebugBreakCallHelper(masm, rbx.bit() | rdi.bit(), 0, false);
-}
-
-
-void Debug::GenerateCallConstructStubDebugBreak(MacroAssembler* masm) {
- // Register state for CallConstructStub (from code-stubs-x64.cc).
- // rax is the actual number of arguments not encoded as a smi, see comment
- // above IC call.
- // ----------- S t a t e -------------
- // -- rax: number of arguments
- // -----------------------------------
- // The number of arguments in rax is not smi encoded.
- Generate_DebugBreakCallHelper(masm, rdi.bit(), rax.bit(), false);
-}
-
-
-void Debug::GenerateCallConstructStubRecordDebugBreak(MacroAssembler* masm) {
- // Register state for CallConstructStub (from code-stubs-x64.cc).
- // rax is the actual number of arguments not encoded as a smi, see comment
- // above IC call.
- // ----------- S t a t e -------------
- // -- rax: number of arguments
- // -- rbx: cache cell for call target
- // -----------------------------------
- // The number of arguments in rax is not smi encoded.
- Generate_DebugBreakCallHelper(masm, rbx.bit() | rdi.bit(), rax.bit(), false);
-}
-
-
-void Debug::GenerateSlot(MacroAssembler* masm) {
- // Generate enough nop's to make space for a call instruction.
- Label check_codesize;
- __ bind(&check_codesize);
- __ RecordDebugBreakSlot();
- __ Nop(Assembler::kDebugBreakSlotLength);
- ASSERT_EQ(Assembler::kDebugBreakSlotLength,
- masm->SizeOfCodeGeneratedSince(&check_codesize));
-}
-
-
-void Debug::GenerateSlotDebugBreak(MacroAssembler* masm) {
- // In the places where a debug break slot is inserted no registers can contain
- // object pointers.
- Generate_DebugBreakCallHelper(masm, 0, 0, true);
-}
-
-
-void Debug::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
- masm->ret(0);
-}
-
-
-void Debug::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
- ExternalReference restarter_frame_function_slot =
- ExternalReference(Debug_Address::RestarterFrameFunctionPointer(),
- masm->isolate());
- __ movq(rax, restarter_frame_function_slot);
- __ movq(Operand(rax, 0), Immediate(0));
-
- // We do not know our frame height, but set rsp based on rbp.
- __ lea(rsp, Operand(rbp, -1 * kPointerSize));
-
- __ pop(rdi); // Function.
- __ pop(rbp);
-
- // Load context from the function.
- __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
-
- // Get function code.
- __ movq(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
- __ movq(rdx, FieldOperand(rdx, SharedFunctionInfo::kCodeOffset));
- __ lea(rdx, FieldOperand(rdx, Code::kHeaderSize));
-
- // Re-run JSFunction, rdi is function, rsi is context.
- __ jmp(rdx);
-}
-
-const bool Debug::kFrameDropperSupported = true;
-
-#undef __
-
-#endif // ENABLE_DEBUGGER_SUPPORT
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_X64
diff --git a/src/3rdparty/v8/src/x64/deoptimizer-x64.cc b/src/3rdparty/v8/src/x64/deoptimizer-x64.cc
deleted file mode 100644
index 8e776f9..0000000
--- a/src/3rdparty/v8/src/x64/deoptimizer-x64.cc
+++ /dev/null
@@ -1,1076 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_X64)
-
-#include "codegen.h"
-#include "deoptimizer.h"
-#include "full-codegen.h"
-#include "safepoint-table.h"
-
-namespace v8 {
-namespace internal {
-
-
-const int Deoptimizer::table_entry_size_ = 10;
-
-
-int Deoptimizer::patch_size() {
- return Assembler::kCallInstructionLength;
-}
-
-
-void Deoptimizer::DeoptimizeFunctionWithPreparedFunctionList(
- JSFunction* function) {
- Isolate* isolate = function->GetIsolate();
- HandleScope scope(isolate);
- AssertNoAllocation no_allocation;
-
- ASSERT(function->IsOptimized());
- ASSERT(function->FunctionsInFunctionListShareSameCode());
-
- // The optimized code is going to be patched, so we cannot use it
- // any more. Play safe and reset the whole cache.
- function->shared()->ClearOptimizedCodeMap();
-
- // Get the optimized code.
- Code* code = function->code();
-
- // Invalidate the relocation information, as it will become invalid by the
- // code patching below, and is not needed any more.
- code->InvalidateRelocation();
-
- // For each LLazyBailout instruction insert a absolute call to the
- // corresponding deoptimization entry, or a short call to an absolute
- // jump if space is short. The absolute jumps are put in a table just
- // before the safepoint table (space was allocated there when the Code
- // object was created, if necessary).
-
- Address instruction_start = function->code()->instruction_start();
-#ifdef DEBUG
- Address prev_call_address = NULL;
-#endif
- DeoptimizationInputData* deopt_data =
- DeoptimizationInputData::cast(code->deoptimization_data());
- for (int i = 0; i < deopt_data->DeoptCount(); i++) {
- if (deopt_data->Pc(i)->value() == -1) continue;
- // Position where Call will be patched in.
- Address call_address = instruction_start + deopt_data->Pc(i)->value();
- // There is room enough to write a long call instruction because we pad
- // LLazyBailout instructions with nops if necessary.
- CodePatcher patcher(call_address, Assembler::kCallInstructionLength);
- patcher.masm()->Call(GetDeoptimizationEntry(isolate, i, LAZY),
- RelocInfo::NONE64);
- ASSERT(prev_call_address == NULL ||
- call_address >= prev_call_address + patch_size());
- ASSERT(call_address + patch_size() <= code->instruction_end());
-#ifdef DEBUG
- prev_call_address = call_address;
-#endif
- }
-
- // Add the deoptimizing code to the list.
- DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code);
- DeoptimizerData* data = isolate->deoptimizer_data();
- node->set_next(data->deoptimizing_code_list_);
- data->deoptimizing_code_list_ = node;
-
- // We might be in the middle of incremental marking with compaction.
- // Tell collector to treat this code object in a special way and
- // ignore all slots that might have been recorded on it.
- isolate->heap()->mark_compact_collector()->InvalidateCode(code);
-
- ReplaceCodeForRelatedFunctions(function, code);
-
- if (FLAG_trace_deopt) {
- PrintF("[forced deoptimization: ");
- function->PrintName();
- PrintF(" / %" V8PRIxPTR "]\n", reinterpret_cast<intptr_t>(function));
- }
-}
-
-
-static const byte kJnsInstruction = 0x79;
-static const byte kJnsOffset = 0x1f;
-static const byte kCallInstruction = 0xe8;
-static const byte kNopByteOne = 0x66;
-static const byte kNopByteTwo = 0x90;
-
-void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code,
- Address pc_after,
- Code* check_code,
- Code* replacement_code) {
- Address call_target_address = pc_after - kIntSize;
- ASSERT_EQ(check_code->entry(),
- Assembler::target_address_at(call_target_address));
- // The back edge bookkeeping code matches the pattern:
- //
- // add <profiling_counter>, <-delta>
- // jns ok
- // call <stack guard>
- // test rax, <loop nesting depth>
- // ok: ...
- //
- // We will patch away the branch so the code is:
- //
- // add <profiling_counter>, <-delta> ;; Not changed
- // nop
- // nop
- // call <on-stack replacment>
- // test rax, <loop nesting depth>
- // ok:
- //
- ASSERT_EQ(kJnsInstruction, *(call_target_address - 3));
- ASSERT_EQ(kJnsOffset, *(call_target_address - 2));
- ASSERT_EQ(kCallInstruction, *(call_target_address - 1));
- *(call_target_address - 3) = kNopByteOne;
- *(call_target_address - 2) = kNopByteTwo;
- Assembler::set_target_address_at(call_target_address,
- replacement_code->entry());
-
- unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
- unoptimized_code, call_target_address, replacement_code);
-}
-
-
-void Deoptimizer::RevertStackCheckCodeAt(Code* unoptimized_code,
- Address pc_after,
- Code* check_code,
- Code* replacement_code) {
- Address call_target_address = pc_after - kIntSize;
- ASSERT(replacement_code->entry() ==
- Assembler::target_address_at(call_target_address));
- // Replace the nops from patching (Deoptimizer::PatchStackCheckCode) to
- // restore the conditional branch.
- ASSERT_EQ(kNopByteOne, *(call_target_address - 3));
- ASSERT_EQ(kNopByteTwo, *(call_target_address - 2));
- ASSERT_EQ(kCallInstruction, *(call_target_address - 1));
- *(call_target_address - 3) = kJnsInstruction;
- *(call_target_address - 2) = kJnsOffset;
- Assembler::set_target_address_at(call_target_address,
- check_code->entry());
-
- check_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
- unoptimized_code, call_target_address, check_code);
-}
-
-
-static int LookupBailoutId(DeoptimizationInputData* data, BailoutId ast_id) {
- ByteArray* translations = data->TranslationByteArray();
- int length = data->DeoptCount();
- for (int i = 0; i < length; i++) {
- if (data->AstId(i) == ast_id) {
- TranslationIterator it(translations, data->TranslationIndex(i)->value());
- int value = it.Next();
- ASSERT(Translation::BEGIN == static_cast<Translation::Opcode>(value));
- // Read the number of frames.
- value = it.Next();
- if (value == 1) return i;
- }
- }
- UNREACHABLE();
- return -1;
-}
-
-
-void Deoptimizer::DoComputeOsrOutputFrame() {
- DeoptimizationInputData* data = DeoptimizationInputData::cast(
- compiled_code_->deoptimization_data());
- unsigned ast_id = data->OsrAstId()->value();
- // TODO(kasperl): This should not be the bailout_id_. It should be
- // the ast id. Confusing.
- ASSERT(bailout_id_ == ast_id);
-
- int bailout_id = LookupBailoutId(data, BailoutId(ast_id));
- unsigned translation_index = data->TranslationIndex(bailout_id)->value();
- ByteArray* translations = data->TranslationByteArray();
-
- TranslationIterator iterator(translations, translation_index);
- Translation::Opcode opcode =
- static_cast<Translation::Opcode>(iterator.Next());
- ASSERT(Translation::BEGIN == opcode);
- USE(opcode);
- int count = iterator.Next();
- iterator.Skip(1); // Drop JS frame count.
- ASSERT(count == 1);
- USE(count);
-
- opcode = static_cast<Translation::Opcode>(iterator.Next());
- USE(opcode);
- ASSERT(Translation::JS_FRAME == opcode);
- unsigned node_id = iterator.Next();
- USE(node_id);
- ASSERT(node_id == ast_id);
- int closure_id = iterator.Next();
- USE(closure_id);
- ASSERT_EQ(Translation::kSelfLiteralId, closure_id);
- unsigned height = iterator.Next();
- unsigned height_in_bytes = height * kPointerSize;
- USE(height_in_bytes);
-
- unsigned fixed_size = ComputeFixedSize(function_);
- unsigned input_frame_size = input_->GetFrameSize();
- ASSERT(fixed_size + height_in_bytes == input_frame_size);
-
- unsigned stack_slot_size = compiled_code_->stack_slots() * kPointerSize;
- unsigned outgoing_height = data->ArgumentsStackHeight(bailout_id)->value();
- unsigned outgoing_size = outgoing_height * kPointerSize;
- unsigned output_frame_size = fixed_size + stack_slot_size + outgoing_size;
- ASSERT(outgoing_size == 0); // OSR does not happen in the middle of a call.
-
- if (FLAG_trace_osr) {
- PrintF("[on-stack replacement: begin 0x%08" V8PRIxPTR " ",
- reinterpret_cast<intptr_t>(function_));
- function_->PrintName();
- PrintF(" => node=%u, frame=%d->%d]\n",
- ast_id,
- input_frame_size,
- output_frame_size);
- }
-
- // There's only one output frame in the OSR case.
- output_count_ = 1;
- output_ = new FrameDescription*[1];
- output_[0] = new(output_frame_size) FrameDescription(
- output_frame_size, function_);
- output_[0]->SetFrameType(StackFrame::JAVA_SCRIPT);
-
- // Clear the incoming parameters in the optimized frame to avoid
- // confusing the garbage collector.
- unsigned output_offset = output_frame_size - kPointerSize;
- int parameter_count = function_->shared()->formal_parameter_count() + 1;
- for (int i = 0; i < parameter_count; ++i) {
- output_[0]->SetFrameSlot(output_offset, 0);
- output_offset -= kPointerSize;
- }
-
- // Translate the incoming parameters. This may overwrite some of the
- // incoming argument slots we've just cleared.
- int input_offset = input_frame_size - kPointerSize;
- bool ok = true;
- int limit = input_offset - (parameter_count * kPointerSize);
- while (ok && input_offset > limit) {
- ok = DoOsrTranslateCommand(&iterator, &input_offset);
- }
-
- // There are no translation commands for the caller's pc and fp, the
- // context, and the function. Set them up explicitly.
- for (int i = StandardFrameConstants::kCallerPCOffset;
- ok && i >= StandardFrameConstants::kMarkerOffset;
- i -= kPointerSize) {
- intptr_t input_value = input_->GetFrameSlot(input_offset);
- if (FLAG_trace_osr) {
- const char* name = "UNKNOWN";
- switch (i) {
- case StandardFrameConstants::kCallerPCOffset:
- name = "caller's pc";
- break;
- case StandardFrameConstants::kCallerFPOffset:
- name = "fp";
- break;
- case StandardFrameConstants::kContextOffset:
- name = "context";
- break;
- case StandardFrameConstants::kMarkerOffset:
- name = "function";
- break;
- }
- PrintF(" [rsp + %d] <- 0x%08" V8PRIxPTR " ; [rsp + %d] "
- "(fixed part - %s)\n",
- output_offset,
- input_value,
- input_offset,
- name);
- }
- output_[0]->SetFrameSlot(output_offset, input_->GetFrameSlot(input_offset));
- input_offset -= kPointerSize;
- output_offset -= kPointerSize;
- }
-
- // Translate the rest of the frame.
- while (ok && input_offset >= 0) {
- ok = DoOsrTranslateCommand(&iterator, &input_offset);
- }
-
- // If translation of any command failed, continue using the input frame.
- if (!ok) {
- delete output_[0];
- output_[0] = input_;
- output_[0]->SetPc(reinterpret_cast<intptr_t>(from_));
- } else {
- // Set up the frame pointer and the context pointer.
- output_[0]->SetRegister(rbp.code(), input_->GetRegister(rbp.code()));
- output_[0]->SetRegister(rsi.code(), input_->GetRegister(rsi.code()));
-
- unsigned pc_offset = data->OsrPcOffset()->value();
- intptr_t pc = reinterpret_cast<intptr_t>(
- compiled_code_->entry() + pc_offset);
- output_[0]->SetPc(pc);
- }
- Code* continuation =
- function_->GetIsolate()->builtins()->builtin(Builtins::kNotifyOSR);
- output_[0]->SetContinuation(
- reinterpret_cast<intptr_t>(continuation->entry()));
-
- if (FLAG_trace_osr) {
- PrintF("[on-stack replacement translation %s: 0x%08" V8PRIxPTR " ",
- ok ? "finished" : "aborted",
- reinterpret_cast<intptr_t>(function_));
- function_->PrintName();
- PrintF(" => pc=0x%0" V8PRIxPTR "]\n", output_[0]->GetPc());
- }
-}
-
-
-void Deoptimizer::DoComputeCompiledStubFrame(TranslationIterator* iterator,
- int frame_index) {
- //
- // FROM TO
- // | .... | | .... |
- // +-------------------------+ +-------------------------+
- // | JSFunction continuation | | JSFunction continuation |
- // +-------------------------+ +-------------------------+
- // | | saved frame (rbp) | | saved frame (rbp) |
- // | +=========================+<-rbp +=========================+<-rbp
- // | | JSFunction context | | JSFunction context |
- // v +-------------------------+ +-------------------------|
- // | COMPILED_STUB marker | | STUB_FAILURE marker |
- // +-------------------------+ +-------------------------+
- // | | | caller args.arguments_ |
- // | ... | +-------------------------+
- // | | | caller args.length_ |
- // |-------------------------|<-rsp +-------------------------+
- // | caller args pointer |
- // +-------------------------+
- // | caller stack param 1 |
- // parameters in registers +-------------------------+
- // and spilled to stack | .... |
- // +-------------------------+
- // | caller stack param n |
- // +-------------------------+<-rsp
- // rax = number of parameters
- // rbx = failure handler address
- // rbp = saved frame
- // rsi = JSFunction context
- //
-
- ASSERT(compiled_code_->kind() == Code::COMPILED_STUB);
- int major_key = compiled_code_->major_key();
- CodeStubInterfaceDescriptor* descriptor =
- isolate_->code_stub_interface_descriptor(major_key);
-
- // The output frame must have room for all pushed register parameters
- // and the standard stack frame slots. Include space for an argument
- // object to the callee and optionally the space to pass the argument
- // object to the stub failure handler.
- int height_in_bytes = kPointerSize * descriptor->register_param_count_ +
- sizeof(Arguments) + kPointerSize;
- int fixed_frame_size = StandardFrameConstants::kFixedFrameSize;
- int input_frame_size = input_->GetFrameSize();
- int output_frame_size = height_in_bytes + fixed_frame_size;
- if (trace_) {
- PrintF(" translating %s => StubFailureTrampolineStub, height=%d\n",
- CodeStub::MajorName(static_cast<CodeStub::Major>(major_key), false),
- height_in_bytes);
- }
-
- // The stub failure trampoline is a single frame.
- FrameDescription* output_frame =
- new(output_frame_size) FrameDescription(output_frame_size, NULL);
- output_frame->SetFrameType(StackFrame::STUB_FAILURE_TRAMPOLINE);
- ASSERT(frame_index == 0);
- output_[frame_index] = output_frame;
-
- // The top address for the output frame can be computed from the input
- // frame pointer and the output frame's height. Subtract space for the
- // context and function slots.
- intptr_t top_address = input_->GetRegister(rbp.code()) - (2 * kPointerSize) -
- height_in_bytes;
- output_frame->SetTop(top_address);
-
- // Read caller's PC (JSFunction continuation) from the input frame.
- unsigned input_frame_offset = input_frame_size - kPointerSize;
- unsigned output_frame_offset = output_frame_size - kPointerSize;
- intptr_t value = input_->GetFrameSlot(input_frame_offset);
- output_frame->SetFrameSlot(output_frame_offset, value);
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
- V8PRIxPTR " ; caller's pc\n",
- top_address + output_frame_offset, output_frame_offset, value);
- }
-
- // Read caller's FP from the input frame, and set this frame's FP.
- input_frame_offset -= kPointerSize;
- value = input_->GetFrameSlot(input_frame_offset);
- output_frame_offset -= kPointerSize;
- output_frame->SetFrameSlot(output_frame_offset, value);
- intptr_t frame_ptr = input_->GetRegister(rbp.code());
- output_frame->SetRegister(rbp.code(), frame_ptr);
- output_frame->SetFp(frame_ptr);
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
- V8PRIxPTR " ; caller's fp\n",
- top_address + output_frame_offset, output_frame_offset, value);
- }
-
- // The context can be gotten from the input frame.
- input_frame_offset -= kPointerSize;
- value = input_->GetFrameSlot(input_frame_offset);
- output_frame->SetRegister(rsi.code(), value);
- output_frame_offset -= kPointerSize;
- output_frame->SetFrameSlot(output_frame_offset, value);
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
- V8PRIxPTR " ; context\n",
- top_address + output_frame_offset, output_frame_offset, value);
- }
-
- // A marker value is used in place of the function.
- output_frame_offset -= kPointerSize;
- value = reinterpret_cast<intptr_t>(
- Smi::FromInt(StackFrame::STUB_FAILURE_TRAMPOLINE));
- output_frame->SetFrameSlot(output_frame_offset, value);
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
- V8PRIxPTR " ; function (stub failure sentinel)\n",
- top_address + output_frame_offset, output_frame_offset, value);
- }
-
- intptr_t caller_arg_count = 0;
- if (descriptor->stack_parameter_count_ != NULL) {
- caller_arg_count =
- input_->GetRegister(descriptor->stack_parameter_count_->code());
- }
-
- // Build the Arguments object for the caller's parameters and a pointer to it.
- output_frame_offset -= kPointerSize;
- value = frame_ptr + StandardFrameConstants::kCallerSPOffset +
- (caller_arg_count - 1) * kPointerSize;
- output_frame->SetFrameSlot(output_frame_offset, value);
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
- V8PRIxPTR " ; args.arguments\n",
- top_address + output_frame_offset, output_frame_offset, value);
- }
-
- output_frame_offset -= kPointerSize;
- value = caller_arg_count;
- output_frame->SetFrameSlot(output_frame_offset, value);
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
- V8PRIxPTR " ; args.length\n",
- top_address + output_frame_offset, output_frame_offset, value);
- }
-
- output_frame_offset -= kPointerSize;
- value = frame_ptr - (output_frame_size - output_frame_offset) -
- StandardFrameConstants::kMarkerOffset + kPointerSize;
- output_frame->SetFrameSlot(output_frame_offset, value);
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
- V8PRIxPTR " ; args*\n",
- top_address + output_frame_offset, output_frame_offset, value);
- }
-
- // Copy the register parameters to the failure frame.
- for (int i = 0; i < descriptor->register_param_count_; ++i) {
- output_frame_offset -= kPointerSize;
- DoTranslateCommand(iterator, 0, output_frame_offset);
- }
-
- ASSERT(0 == output_frame_offset);
-
- for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); ++i) {
- double double_value = input_->GetDoubleRegister(i);
- output_frame->SetDoubleRegister(i, double_value);
- }
-
- intptr_t handler =
- reinterpret_cast<intptr_t>(descriptor->deoptimization_handler_);
- int params = descriptor->register_param_count_;
- if (descriptor->stack_parameter_count_ != NULL) {
- params++;
- }
- output_frame->SetRegister(rax.code(), params);
- output_frame->SetRegister(rbx.code(), handler);
-
- // Compute this frame's PC, state, and continuation.
- Code* trampoline = NULL;
- int extra = descriptor->extra_expression_stack_count_;
- StubFailureTrampolineStub(extra).FindCodeInCache(&trampoline, isolate_);
- ASSERT(trampoline != NULL);
- output_frame->SetPc(reinterpret_cast<intptr_t>(
- trampoline->instruction_start()));
- output_frame->SetState(Smi::FromInt(FullCodeGenerator::NO_REGISTERS));
- Code* notify_failure =
- isolate_->builtins()->builtin(Builtins::kNotifyStubFailure);
- output_frame->SetContinuation(
- reinterpret_cast<intptr_t>(notify_failure->entry()));
-}
-
-
-void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
- int frame_index) {
- Builtins* builtins = isolate_->builtins();
- Code* construct_stub = builtins->builtin(Builtins::kJSConstructStubGeneric);
- JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
- unsigned height = iterator->Next();
- unsigned height_in_bytes = height * kPointerSize;
- if (trace_) {
- PrintF(" translating construct stub => height=%d\n", height_in_bytes);
- }
-
- unsigned fixed_frame_size = 7 * kPointerSize;
- unsigned output_frame_size = height_in_bytes + fixed_frame_size;
-
- // Allocate and store the output frame description.
- FrameDescription* output_frame =
- new(output_frame_size) FrameDescription(output_frame_size, function);
- output_frame->SetFrameType(StackFrame::CONSTRUCT);
-
- // Construct stub can not be topmost or bottommost.
- ASSERT(frame_index > 0 && frame_index < output_count_ - 1);
- ASSERT(output_[frame_index] == NULL);
- output_[frame_index] = output_frame;
-
- // The top address of the frame is computed from the previous
- // frame's top and this frame's size.
- intptr_t top_address;
- top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
- output_frame->SetTop(top_address);
-
- // Compute the incoming parameter translation.
- int parameter_count = height;
- unsigned output_offset = output_frame_size;
- for (int i = 0; i < parameter_count; ++i) {
- output_offset -= kPointerSize;
- DoTranslateCommand(iterator, frame_index, output_offset);
- }
-
- // Read caller's PC from the previous frame.
- output_offset -= kPointerSize;
- intptr_t callers_pc = output_[frame_index - 1]->GetPc();
- output_frame->SetFrameSlot(output_offset, callers_pc);
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
- V8PRIxPTR " ; caller's pc\n",
- top_address + output_offset, output_offset, callers_pc);
- }
-
- // Read caller's FP from the previous frame, and set this frame's FP.
- output_offset -= kPointerSize;
- intptr_t value = output_[frame_index - 1]->GetFp();
- output_frame->SetFrameSlot(output_offset, value);
- intptr_t fp_value = top_address + output_offset;
- output_frame->SetFp(fp_value);
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
- V8PRIxPTR " ; caller's fp\n",
- fp_value, output_offset, value);
- }
-
- // The context can be gotten from the previous frame.
- output_offset -= kPointerSize;
- value = output_[frame_index - 1]->GetContext();
- output_frame->SetFrameSlot(output_offset, value);
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
- V8PRIxPTR " ; context\n",
- top_address + output_offset, output_offset, value);
- }
-
- // A marker value is used in place of the function.
- output_offset -= kPointerSize;
- value = reinterpret_cast<intptr_t>(Smi::FromInt(StackFrame::CONSTRUCT));
- output_frame->SetFrameSlot(output_offset, value);
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
- V8PRIxPTR " ; function (construct sentinel)\n",
- top_address + output_offset, output_offset, value);
- }
-
- // The output frame reflects a JSConstructStubGeneric frame.
- output_offset -= kPointerSize;
- value = reinterpret_cast<intptr_t>(construct_stub);
- output_frame->SetFrameSlot(output_offset, value);
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
- V8PRIxPTR " ; code object\n",
- top_address + output_offset, output_offset, value);
- }
-
- // Number of incoming arguments.
- output_offset -= kPointerSize;
- value = reinterpret_cast<intptr_t>(Smi::FromInt(height - 1));
- output_frame->SetFrameSlot(output_offset, value);
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
- V8PRIxPTR " ; argc (%d)\n",
- top_address + output_offset, output_offset, value, height - 1);
- }
-
- // The newly allocated object was passed as receiver in the artificial
- // constructor stub environment created by HEnvironment::CopyForInlining().
- output_offset -= kPointerSize;
- value = output_frame->GetFrameSlot(output_frame_size - kPointerSize);
- output_frame->SetFrameSlot(output_offset, value);
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
- V8PRIxPTR " ; allocated receiver\n",
- top_address + output_offset, output_offset, value);
- }
-
- ASSERT(0 == output_offset);
-
- intptr_t pc = reinterpret_cast<intptr_t>(
- construct_stub->instruction_start() +
- isolate_->heap()->construct_stub_deopt_pc_offset()->value());
- output_frame->SetPc(pc);
-}
-
-
-void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
- int frame_index) {
- BailoutId node_id = BailoutId(iterator->Next());
- JSFunction* function;
- if (frame_index != 0) {
- function = JSFunction::cast(ComputeLiteral(iterator->Next()));
- } else {
- int closure_id = iterator->Next();
- USE(closure_id);
- ASSERT_EQ(Translation::kSelfLiteralId, closure_id);
- function = function_;
- }
- unsigned height = iterator->Next();
- unsigned height_in_bytes = height * kPointerSize;
- if (trace_) {
- PrintF(" translating ");
- function->PrintName();
- PrintF(" => node=%d, height=%d\n", node_id.ToInt(), height_in_bytes);
- }
-
- // The 'fixed' part of the frame consists of the incoming parameters and
- // the part described by JavaScriptFrameConstants.
- unsigned fixed_frame_size = ComputeFixedSize(function);
- unsigned input_frame_size = input_->GetFrameSize();
- unsigned output_frame_size = height_in_bytes + fixed_frame_size;
-
- // Allocate and store the output frame description.
- FrameDescription* output_frame =
- new(output_frame_size) FrameDescription(output_frame_size, function);
- output_frame->SetFrameType(StackFrame::JAVA_SCRIPT);
-
- bool is_bottommost = (0 == frame_index);
- bool is_topmost = (output_count_ - 1 == frame_index);
- ASSERT(frame_index >= 0 && frame_index < output_count_);
- ASSERT(output_[frame_index] == NULL);
- output_[frame_index] = output_frame;
-
- // The top address for the bottommost output frame can be computed from
- // the input frame pointer and the output frame's height. For all
- // subsequent output frames, it can be computed from the previous one's
- // top address and the current frame's size.
- intptr_t top_address;
- if (is_bottommost) {
- // 2 = context and function in the frame.
- top_address =
- input_->GetRegister(rbp.code()) - (2 * kPointerSize) - height_in_bytes;
- } else {
- top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
- }
- output_frame->SetTop(top_address);
-
- // Compute the incoming parameter translation.
- int parameter_count = function->shared()->formal_parameter_count() + 1;
- unsigned output_offset = output_frame_size;
- unsigned input_offset = input_frame_size;
- for (int i = 0; i < parameter_count; ++i) {
- output_offset -= kPointerSize;
- DoTranslateCommand(iterator, frame_index, output_offset);
- }
- input_offset -= (parameter_count * kPointerSize);
-
- // There are no translation commands for the caller's pc and fp, the
- // context, and the function. Synthesize their values and set them up
- // explicitly.
- //
- // The caller's pc for the bottommost output frame is the same as in the
- // input frame. For all subsequent output frames, it can be read from the
- // previous one. This frame's pc can be computed from the non-optimized
- // function code and AST id of the bailout.
- output_offset -= kPointerSize;
- input_offset -= kPointerSize;
- intptr_t value;
- if (is_bottommost) {
- value = input_->GetFrameSlot(input_offset);
- } else {
- value = output_[frame_index - 1]->GetPc();
- }
- output_frame->SetFrameSlot(output_offset, value);
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
- V8PRIxPTR " ; caller's pc\n",
- top_address + output_offset, output_offset, value);
- }
-
- // The caller's frame pointer for the bottommost output frame is the same
- // as in the input frame. For all subsequent output frames, it can be
- // read from the previous one. Also compute and set this frame's frame
- // pointer.
- output_offset -= kPointerSize;
- input_offset -= kPointerSize;
- if (is_bottommost) {
- value = input_->GetFrameSlot(input_offset);
- } else {
- value = output_[frame_index - 1]->GetFp();
- }
- output_frame->SetFrameSlot(output_offset, value);
- intptr_t fp_value = top_address + output_offset;
- ASSERT(!is_bottommost || input_->GetRegister(rbp.code()) == fp_value);
- output_frame->SetFp(fp_value);
- if (is_topmost) output_frame->SetRegister(rbp.code(), fp_value);
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
- V8PRIxPTR " ; caller's fp\n",
- fp_value, output_offset, value);
- }
-
- // For the bottommost output frame the context can be gotten from the input
- // frame. For all subsequent output frames it can be gotten from the function
- // so long as we don't inline functions that need local contexts.
- output_offset -= kPointerSize;
- input_offset -= kPointerSize;
- if (is_bottommost) {
- value = input_->GetFrameSlot(input_offset);
- } else {
- value = reinterpret_cast<intptr_t>(function->context());
- }
- output_frame->SetFrameSlot(output_offset, value);
- output_frame->SetContext(value);
- if (is_topmost) output_frame->SetRegister(rsi.code(), value);
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
- V8PRIxPTR "; context\n",
- top_address + output_offset, output_offset, value);
- }
-
- // The function was mentioned explicitly in the BEGIN_FRAME.
- output_offset -= kPointerSize;
- input_offset -= kPointerSize;
- value = reinterpret_cast<intptr_t>(function);
- // The function for the bottommost output frame should also agree with the
- // input frame.
- ASSERT(!is_bottommost || input_->GetFrameSlot(input_offset) == value);
- output_frame->SetFrameSlot(output_offset, value);
- if (trace_) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
- V8PRIxPTR "; function\n",
- top_address + output_offset, output_offset, value);
- }
-
- // Translate the rest of the frame.
- for (unsigned i = 0; i < height; ++i) {
- output_offset -= kPointerSize;
- DoTranslateCommand(iterator, frame_index, output_offset);
- }
- ASSERT(0 == output_offset);
-
- // Compute this frame's PC, state, and continuation.
- Code* non_optimized_code = function->shared()->code();
- FixedArray* raw_data = non_optimized_code->deoptimization_data();
- DeoptimizationOutputData* data = DeoptimizationOutputData::cast(raw_data);
- Address start = non_optimized_code->instruction_start();
- unsigned pc_and_state = GetOutputInfo(data, node_id, function->shared());
- unsigned pc_offset = FullCodeGenerator::PcField::decode(pc_and_state);
- intptr_t pc_value = reinterpret_cast<intptr_t>(start + pc_offset);
- output_frame->SetPc(pc_value);
-
- FullCodeGenerator::State state =
- FullCodeGenerator::StateField::decode(pc_and_state);
- output_frame->SetState(Smi::FromInt(state));
-
- // Set the continuation for the topmost frame.
- if (is_topmost && bailout_type_ != DEBUGGER) {
- Code* continuation = (bailout_type_ == EAGER)
- ? isolate_->builtins()->builtin(Builtins::kNotifyDeoptimized)
- : isolate_->builtins()->builtin(Builtins::kNotifyLazyDeoptimized);
- output_frame->SetContinuation(
- reinterpret_cast<intptr_t>(continuation->entry()));
- }
-}
-
-
-void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
- // Set the register values. The values are not important as there are no
- // callee saved registers in JavaScript frames, so all registers are
- // spilled. Registers rbp and rsp are set to the correct values though.
- for (int i = 0; i < Register::kNumRegisters; i++) {
- input_->SetRegister(i, i * 4);
- }
- input_->SetRegister(rsp.code(), reinterpret_cast<intptr_t>(frame->sp()));
- input_->SetRegister(rbp.code(), reinterpret_cast<intptr_t>(frame->fp()));
- for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); i++) {
- input_->SetDoubleRegister(i, 0.0);
- }
-
- // Fill the frame content from the actual data on the frame.
- for (unsigned i = 0; i < input_->GetFrameSize(); i += kPointerSize) {
- input_->SetFrameSlot(i, Memory::uint64_at(tos + i));
- }
-}
-
-
-#define __ masm()->
-
-void Deoptimizer::EntryGenerator::Generate() {
- GeneratePrologue();
-
- // Save all general purpose registers before messing with them.
- const int kNumberOfRegisters = Register::kNumRegisters;
-
- const int kDoubleRegsSize = kDoubleSize *
- XMMRegister::NumAllocatableRegisters();
- __ subq(rsp, Immediate(kDoubleRegsSize));
-
- for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); ++i) {
- XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
- int offset = i * kDoubleSize;
- __ movsd(Operand(rsp, offset), xmm_reg);
- }
-
- // We push all registers onto the stack, even though we do not need
- // to restore all later.
- for (int i = 0; i < kNumberOfRegisters; i++) {
- Register r = Register::from_code(i);
- __ push(r);
- }
-
- const int kSavedRegistersAreaSize = kNumberOfRegisters * kPointerSize +
- kDoubleRegsSize;
-
- // When calling new_deoptimizer_function we need to pass the last argument
- // on the stack on windows and in r8 on linux. The remaining arguments are
- // all passed in registers (different ones on linux and windows though).
-
-#ifdef _WIN64
- Register arg4 = r9;
- Register arg3 = r8;
- Register arg2 = rdx;
- Register arg1 = rcx;
-#else
- Register arg4 = rcx;
- Register arg3 = rdx;
- Register arg2 = rsi;
- Register arg1 = rdi;
-#endif
-
- // We use this to keep the value of the fifth argument temporarily.
- // Unfortunately we can't store it directly in r8 (used for passing
- // this on linux), since it is another parameter passing register on windows.
- Register arg5 = r11;
-
- // Get the bailout id from the stack.
- __ movq(arg3, Operand(rsp, kSavedRegistersAreaSize));
-
- // Get the address of the location in the code object if possible
- // and compute the fp-to-sp delta in register arg5.
- if (type() == EAGER) {
- __ Set(arg4, 0);
- __ lea(arg5, Operand(rsp, kSavedRegistersAreaSize + 1 * kPointerSize));
- } else {
- __ movq(arg4, Operand(rsp, kSavedRegistersAreaSize + 1 * kPointerSize));
- __ lea(arg5, Operand(rsp, kSavedRegistersAreaSize + 2 * kPointerSize));
- }
-
- __ subq(arg5, rbp);
- __ neg(arg5);
-
- // Allocate a new deoptimizer object.
- __ PrepareCallCFunction(6);
- __ movq(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- __ movq(arg1, rax);
- __ Set(arg2, type());
- // Args 3 and 4 are already in the right registers.
-
- // On windows put the arguments on the stack (PrepareCallCFunction
- // has created space for this). On linux pass the arguments in r8 and r9.
-#ifdef _WIN64
- __ movq(Operand(rsp, 4 * kPointerSize), arg5);
- __ LoadAddress(arg5, ExternalReference::isolate_address());
- __ movq(Operand(rsp, 5 * kPointerSize), arg5);
-#else
- __ movq(r8, arg5);
- __ LoadAddress(r9, ExternalReference::isolate_address());
-#endif
-
- Isolate* isolate = masm()->isolate();
-
- {
- AllowExternalCallThatCantCauseGC scope(masm());
- __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 6);
- }
- // Preserve deoptimizer object in register rax and get the input
- // frame descriptor pointer.
- __ movq(rbx, Operand(rax, Deoptimizer::input_offset()));
-
- // Fill in the input registers.
- for (int i = kNumberOfRegisters -1; i >= 0; i--) {
- int offset = (i * kPointerSize) + FrameDescription::registers_offset();
- __ pop(Operand(rbx, offset));
- }
-
- // Fill in the double input registers.
- int double_regs_offset = FrameDescription::double_registers_offset();
- for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); i++) {
- int dst_offset = i * kDoubleSize + double_regs_offset;
- __ pop(Operand(rbx, dst_offset));
- }
-
- // Remove the bailout id from the stack.
- if (type() == EAGER) {
- __ addq(rsp, Immediate(kPointerSize));
- } else {
- __ addq(rsp, Immediate(2 * kPointerSize));
- }
-
- // Compute a pointer to the unwinding limit in register rcx; that is
- // the first stack slot not part of the input frame.
- __ movq(rcx, Operand(rbx, FrameDescription::frame_size_offset()));
- __ addq(rcx, rsp);
-
- // Unwind the stack down to - but not including - the unwinding
- // limit and copy the contents of the activation frame to the input
- // frame description.
- __ lea(rdx, Operand(rbx, FrameDescription::frame_content_offset()));
- Label pop_loop_header;
- __ jmp(&pop_loop_header);
- Label pop_loop;
- __ bind(&pop_loop);
- __ pop(Operand(rdx, 0));
- __ addq(rdx, Immediate(sizeof(intptr_t)));
- __ bind(&pop_loop_header);
- __ cmpq(rcx, rsp);
- __ j(not_equal, &pop_loop);
-
- // Compute the output frame in the deoptimizer.
- __ push(rax);
- __ PrepareCallCFunction(2);
- __ movq(arg1, rax);
- __ LoadAddress(arg2, ExternalReference::isolate_address());
- {
- AllowExternalCallThatCantCauseGC scope(masm());
- __ CallCFunction(
- ExternalReference::compute_output_frames_function(isolate), 2);
- }
- __ pop(rax);
-
- // Replace the current frame with the output frames.
- Label outer_push_loop, inner_push_loop,
- outer_loop_header, inner_loop_header;
- // Outer loop state: rax = current FrameDescription**, rdx = one past the
- // last FrameDescription**.
- __ movl(rdx, Operand(rax, Deoptimizer::output_count_offset()));
- __ movq(rax, Operand(rax, Deoptimizer::output_offset()));
- __ lea(rdx, Operand(rax, rdx, times_8, 0));
- __ jmp(&outer_loop_header);
- __ bind(&outer_push_loop);
- // Inner loop state: rbx = current FrameDescription*, rcx = loop index.
- __ movq(rbx, Operand(rax, 0));
- __ movq(rcx, Operand(rbx, FrameDescription::frame_size_offset()));
- __ jmp(&inner_loop_header);
- __ bind(&inner_push_loop);
- __ subq(rcx, Immediate(sizeof(intptr_t)));
- __ push(Operand(rbx, rcx, times_1, FrameDescription::frame_content_offset()));
- __ bind(&inner_loop_header);
- __ testq(rcx, rcx);
- __ j(not_zero, &inner_push_loop);
- __ addq(rax, Immediate(kPointerSize));
- __ bind(&outer_loop_header);
- __ cmpq(rax, rdx);
- __ j(below, &outer_push_loop);
-
- for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); ++i) {
- XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
- int src_offset = i * kDoubleSize + double_regs_offset;
- __ movsd(xmm_reg, Operand(rbx, src_offset));
- }
-
- // Push state, pc, and continuation from the last output frame.
- if (type() != OSR) {
- __ push(Operand(rbx, FrameDescription::state_offset()));
- }
- __ push(Operand(rbx, FrameDescription::pc_offset()));
- __ push(Operand(rbx, FrameDescription::continuation_offset()));
-
- // Push the registers from the last output frame.
- for (int i = 0; i < kNumberOfRegisters; i++) {
- int offset = (i * kPointerSize) + FrameDescription::registers_offset();
- __ push(Operand(rbx, offset));
- }
-
- // Restore the registers from the stack.
- for (int i = kNumberOfRegisters - 1; i >= 0 ; i--) {
- Register r = Register::from_code(i);
- // Do not restore rsp, simply pop the value into the next register
- // and overwrite this afterwards.
- if (r.is(rsp)) {
- ASSERT(i > 0);
- r = Register::from_code(i - 1);
- }
- __ pop(r);
- }
-
- // Set up the roots register.
- __ InitializeRootRegister();
- __ InitializeSmiConstantRegister();
-
- // Return to the continuation point.
- __ ret(0);
-}
-
-
-void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
- // Create a sequence of deoptimization entries.
- Label done;
- for (int i = 0; i < count(); i++) {
- int start = masm()->pc_offset();
- USE(start);
- __ push_imm32(i);
- __ jmp(&done);
- ASSERT(masm()->pc_offset() - start == table_entry_size_);
- }
- __ bind(&done);
-}
-
-#undef __
-
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_X64
diff --git a/src/3rdparty/v8/src/x64/disasm-x64.cc b/src/3rdparty/v8/src/x64/disasm-x64.cc
deleted file mode 100644
index fb0914d..0000000
--- a/src/3rdparty/v8/src/x64/disasm-x64.cc
+++ /dev/null
@@ -1,1869 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include <assert.h>
-#include <stdio.h>
-#include <stdarg.h>
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_X64)
-
-#include "disasm.h"
-#include "lazy-instance.h"
-
-namespace disasm {
-
-enum OperandType {
- UNSET_OP_ORDER = 0,
- // Operand size decides between 16, 32 and 64 bit operands.
- REG_OPER_OP_ORDER = 1, // Register destination, operand source.
- OPER_REG_OP_ORDER = 2, // Operand destination, register source.
- // Fixed 8-bit operands.
- BYTE_SIZE_OPERAND_FLAG = 4,
- BYTE_REG_OPER_OP_ORDER = REG_OPER_OP_ORDER | BYTE_SIZE_OPERAND_FLAG,
- BYTE_OPER_REG_OP_ORDER = OPER_REG_OP_ORDER | BYTE_SIZE_OPERAND_FLAG
-};
-
-//------------------------------------------------------------------
-// Tables
-//------------------------------------------------------------------
-struct ByteMnemonic {
- int b; // -1 terminates, otherwise must be in range (0..255)
- OperandType op_order_;
- const char* mnem;
-};
-
-
-static const ByteMnemonic two_operands_instr[] = {
- { 0x00, BYTE_OPER_REG_OP_ORDER, "add" },
- { 0x01, OPER_REG_OP_ORDER, "add" },
- { 0x02, BYTE_REG_OPER_OP_ORDER, "add" },
- { 0x03, REG_OPER_OP_ORDER, "add" },
- { 0x08, BYTE_OPER_REG_OP_ORDER, "or" },
- { 0x09, OPER_REG_OP_ORDER, "or" },
- { 0x0A, BYTE_REG_OPER_OP_ORDER, "or" },
- { 0x0B, REG_OPER_OP_ORDER, "or" },
- { 0x10, BYTE_OPER_REG_OP_ORDER, "adc" },
- { 0x11, OPER_REG_OP_ORDER, "adc" },
- { 0x12, BYTE_REG_OPER_OP_ORDER, "adc" },
- { 0x13, REG_OPER_OP_ORDER, "adc" },
- { 0x18, BYTE_OPER_REG_OP_ORDER, "sbb" },
- { 0x19, OPER_REG_OP_ORDER, "sbb" },
- { 0x1A, BYTE_REG_OPER_OP_ORDER, "sbb" },
- { 0x1B, REG_OPER_OP_ORDER, "sbb" },
- { 0x20, BYTE_OPER_REG_OP_ORDER, "and" },
- { 0x21, OPER_REG_OP_ORDER, "and" },
- { 0x22, BYTE_REG_OPER_OP_ORDER, "and" },
- { 0x23, REG_OPER_OP_ORDER, "and" },
- { 0x28, BYTE_OPER_REG_OP_ORDER, "sub" },
- { 0x29, OPER_REG_OP_ORDER, "sub" },
- { 0x2A, BYTE_REG_OPER_OP_ORDER, "sub" },
- { 0x2B, REG_OPER_OP_ORDER, "sub" },
- { 0x30, BYTE_OPER_REG_OP_ORDER, "xor" },
- { 0x31, OPER_REG_OP_ORDER, "xor" },
- { 0x32, BYTE_REG_OPER_OP_ORDER, "xor" },
- { 0x33, REG_OPER_OP_ORDER, "xor" },
- { 0x38, BYTE_OPER_REG_OP_ORDER, "cmp" },
- { 0x39, OPER_REG_OP_ORDER, "cmp" },
- { 0x3A, BYTE_REG_OPER_OP_ORDER, "cmp" },
- { 0x3B, REG_OPER_OP_ORDER, "cmp" },
- { 0x63, REG_OPER_OP_ORDER, "movsxlq" },
- { 0x84, BYTE_REG_OPER_OP_ORDER, "test" },
- { 0x85, REG_OPER_OP_ORDER, "test" },
- { 0x86, BYTE_REG_OPER_OP_ORDER, "xchg" },
- { 0x87, REG_OPER_OP_ORDER, "xchg" },
- { 0x88, BYTE_OPER_REG_OP_ORDER, "mov" },
- { 0x89, OPER_REG_OP_ORDER, "mov" },
- { 0x8A, BYTE_REG_OPER_OP_ORDER, "mov" },
- { 0x8B, REG_OPER_OP_ORDER, "mov" },
- { 0x8D, REG_OPER_OP_ORDER, "lea" },
- { -1, UNSET_OP_ORDER, "" }
-};
-
-
-static const ByteMnemonic zero_operands_instr[] = {
- { 0xC3, UNSET_OP_ORDER, "ret" },
- { 0xC9, UNSET_OP_ORDER, "leave" },
- { 0xF4, UNSET_OP_ORDER, "hlt" },
- { 0xFC, UNSET_OP_ORDER, "cld" },
- { 0xCC, UNSET_OP_ORDER, "int3" },
- { 0x60, UNSET_OP_ORDER, "pushad" },
- { 0x61, UNSET_OP_ORDER, "popad" },
- { 0x9C, UNSET_OP_ORDER, "pushfd" },
- { 0x9D, UNSET_OP_ORDER, "popfd" },
- { 0x9E, UNSET_OP_ORDER, "sahf" },
- { 0x99, UNSET_OP_ORDER, "cdq" },
- { 0x9B, UNSET_OP_ORDER, "fwait" },
- { 0xA4, UNSET_OP_ORDER, "movs" },
- { 0xA5, UNSET_OP_ORDER, "movs" },
- { 0xA6, UNSET_OP_ORDER, "cmps" },
- { 0xA7, UNSET_OP_ORDER, "cmps" },
- { -1, UNSET_OP_ORDER, "" }
-};
-
-
-static const ByteMnemonic call_jump_instr[] = {
- { 0xE8, UNSET_OP_ORDER, "call" },
- { 0xE9, UNSET_OP_ORDER, "jmp" },
- { -1, UNSET_OP_ORDER, "" }
-};
-
-
-static const ByteMnemonic short_immediate_instr[] = {
- { 0x05, UNSET_OP_ORDER, "add" },
- { 0x0D, UNSET_OP_ORDER, "or" },
- { 0x15, UNSET_OP_ORDER, "adc" },
- { 0x1D, UNSET_OP_ORDER, "sbb" },
- { 0x25, UNSET_OP_ORDER, "and" },
- { 0x2D, UNSET_OP_ORDER, "sub" },
- { 0x35, UNSET_OP_ORDER, "xor" },
- { 0x3D, UNSET_OP_ORDER, "cmp" },
- { -1, UNSET_OP_ORDER, "" }
-};
-
-
-static const char* const conditional_code_suffix[] = {
- "o", "no", "c", "nc", "z", "nz", "na", "a",
- "s", "ns", "pe", "po", "l", "ge", "le", "g"
-};
-
-
-enum InstructionType {
- NO_INSTR,
- ZERO_OPERANDS_INSTR,
- TWO_OPERANDS_INSTR,
- JUMP_CONDITIONAL_SHORT_INSTR,
- REGISTER_INSTR,
- PUSHPOP_INSTR, // Has implicit 64-bit operand size.
- MOVE_REG_INSTR,
- CALL_JUMP_INSTR,
- SHORT_IMMEDIATE_INSTR
-};
-
-
-enum Prefixes {
- ESCAPE_PREFIX = 0x0F,
- OPERAND_SIZE_OVERRIDE_PREFIX = 0x66,
- ADDRESS_SIZE_OVERRIDE_PREFIX = 0x67,
- REPNE_PREFIX = 0xF2,
- REP_PREFIX = 0xF3,
- REPEQ_PREFIX = REP_PREFIX
-};
-
-
-struct InstructionDesc {
- const char* mnem;
- InstructionType type;
- OperandType op_order_;
- bool byte_size_operation; // Fixed 8-bit operation.
-};
-
-
-class InstructionTable {
- public:
- InstructionTable();
- const InstructionDesc& Get(byte x) const {
- return instructions_[x];
- }
-
- private:
- InstructionDesc instructions_[256];
- void Clear();
- void Init();
- void CopyTable(const ByteMnemonic bm[], InstructionType type);
- void SetTableRange(InstructionType type, byte start, byte end, bool byte_size,
- const char* mnem);
- void AddJumpConditionalShort();
-};
-
-
-InstructionTable::InstructionTable() {
- Clear();
- Init();
-}
-
-
-void InstructionTable::Clear() {
- for (int i = 0; i < 256; i++) {
- instructions_[i].mnem = "(bad)";
- instructions_[i].type = NO_INSTR;
- instructions_[i].op_order_ = UNSET_OP_ORDER;
- instructions_[i].byte_size_operation = false;
- }
-}
-
-
-void InstructionTable::Init() {
- CopyTable(two_operands_instr, TWO_OPERANDS_INSTR);
- CopyTable(zero_operands_instr, ZERO_OPERANDS_INSTR);
- CopyTable(call_jump_instr, CALL_JUMP_INSTR);
- CopyTable(short_immediate_instr, SHORT_IMMEDIATE_INSTR);
- AddJumpConditionalShort();
- SetTableRange(PUSHPOP_INSTR, 0x50, 0x57, false, "push");
- SetTableRange(PUSHPOP_INSTR, 0x58, 0x5F, false, "pop");
- SetTableRange(MOVE_REG_INSTR, 0xB8, 0xBF, false, "mov");
-}
-
-
-void InstructionTable::CopyTable(const ByteMnemonic bm[],
- InstructionType type) {
- for (int i = 0; bm[i].b >= 0; i++) {
- InstructionDesc* id = &instructions_[bm[i].b];
- id->mnem = bm[i].mnem;
- OperandType op_order = bm[i].op_order_;
- id->op_order_ =
- static_cast<OperandType>(op_order & ~BYTE_SIZE_OPERAND_FLAG);
- ASSERT_EQ(NO_INSTR, id->type); // Information not already entered
- id->type = type;
- id->byte_size_operation = ((op_order & BYTE_SIZE_OPERAND_FLAG) != 0);
- }
-}
-
-
-void InstructionTable::SetTableRange(InstructionType type,
- byte start,
- byte end,
- bool byte_size,
- const char* mnem) {
- for (byte b = start; b <= end; b++) {
- InstructionDesc* id = &instructions_[b];
- ASSERT_EQ(NO_INSTR, id->type); // Information not already entered
- id->mnem = mnem;
- id->type = type;
- id->byte_size_operation = byte_size;
- }
-}
-
-
-void InstructionTable::AddJumpConditionalShort() {
- for (byte b = 0x70; b <= 0x7F; b++) {
- InstructionDesc* id = &instructions_[b];
- ASSERT_EQ(NO_INSTR, id->type); // Information not already entered
- id->mnem = NULL; // Computed depending on condition code.
- id->type = JUMP_CONDITIONAL_SHORT_INSTR;
- }
-}
-
-
-static v8::internal::LazyInstance<InstructionTable>::type instruction_table =
- LAZY_INSTANCE_INITIALIZER;
-
-
-static InstructionDesc cmov_instructions[16] = {
- {"cmovo", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
- {"cmovno", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
- {"cmovc", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
- {"cmovnc", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
- {"cmovz", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
- {"cmovnz", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
- {"cmovna", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
- {"cmova", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
- {"cmovs", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
- {"cmovns", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
- {"cmovpe", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
- {"cmovpo", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
- {"cmovl", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
- {"cmovge", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
- {"cmovle", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
- {"cmovg", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false}
-};
-
-//------------------------------------------------------------------------------
-// DisassemblerX64 implementation.
-
-enum UnimplementedOpcodeAction {
- CONTINUE_ON_UNIMPLEMENTED_OPCODE,
- ABORT_ON_UNIMPLEMENTED_OPCODE
-};
-
-// A new DisassemblerX64 object is created to disassemble each instruction.
-// The object can only disassemble a single instruction.
-class DisassemblerX64 {
- public:
- DisassemblerX64(const NameConverter& converter,
- UnimplementedOpcodeAction unimplemented_action =
- ABORT_ON_UNIMPLEMENTED_OPCODE)
- : converter_(converter),
- tmp_buffer_pos_(0),
- abort_on_unimplemented_(
- unimplemented_action == ABORT_ON_UNIMPLEMENTED_OPCODE),
- rex_(0),
- operand_size_(0),
- group_1_prefix_(0),
- byte_size_operand_(false),
- instruction_table_(instruction_table.Pointer()) {
- tmp_buffer_[0] = '\0';
- }
-
- virtual ~DisassemblerX64() {
- }
-
- // Writes one disassembled instruction into 'buffer' (0-terminated).
- // Returns the length of the disassembled machine instruction in bytes.
- int InstructionDecode(v8::internal::Vector<char> buffer, byte* instruction);
-
- private:
- enum OperandSize {
- BYTE_SIZE = 0,
- WORD_SIZE = 1,
- DOUBLEWORD_SIZE = 2,
- QUADWORD_SIZE = 3
- };
-
- const NameConverter& converter_;
- v8::internal::EmbeddedVector<char, 128> tmp_buffer_;
- unsigned int tmp_buffer_pos_;
- bool abort_on_unimplemented_;
- // Prefixes parsed
- byte rex_;
- byte operand_size_; // 0x66 or (if no group 3 prefix is present) 0x0.
- byte group_1_prefix_; // 0xF2, 0xF3, or (if no group 1 prefix is present) 0.
- // Byte size operand override.
- bool byte_size_operand_;
- const InstructionTable* const instruction_table_;
-
- void setRex(byte rex) {
- ASSERT_EQ(0x40, rex & 0xF0);
- rex_ = rex;
- }
-
- bool rex() { return rex_ != 0; }
-
- bool rex_b() { return (rex_ & 0x01) != 0; }
-
- // Actual number of base register given the low bits and the rex.b state.
- int base_reg(int low_bits) { return low_bits | ((rex_ & 0x01) << 3); }
-
- bool rex_x() { return (rex_ & 0x02) != 0; }
-
- bool rex_r() { return (rex_ & 0x04) != 0; }
-
- bool rex_w() { return (rex_ & 0x08) != 0; }
-
- OperandSize operand_size() {
- if (byte_size_operand_) return BYTE_SIZE;
- if (rex_w()) return QUADWORD_SIZE;
- if (operand_size_ != 0) return WORD_SIZE;
- return DOUBLEWORD_SIZE;
- }
-
- char operand_size_code() {
- return "bwlq"[operand_size()];
- }
-
- const char* NameOfCPURegister(int reg) const {
- return converter_.NameOfCPURegister(reg);
- }
-
- const char* NameOfByteCPURegister(int reg) const {
- return converter_.NameOfByteCPURegister(reg);
- }
-
- const char* NameOfXMMRegister(int reg) const {
- return converter_.NameOfXMMRegister(reg);
- }
-
- const char* NameOfAddress(byte* addr) const {
- return converter_.NameOfAddress(addr);
- }
-
- // Disassembler helper functions.
- void get_modrm(byte data,
- int* mod,
- int* regop,
- int* rm) {
- *mod = (data >> 6) & 3;
- *regop = ((data & 0x38) >> 3) | (rex_r() ? 8 : 0);
- *rm = (data & 7) | (rex_b() ? 8 : 0);
- }
-
- void get_sib(byte data,
- int* scale,
- int* index,
- int* base) {
- *scale = (data >> 6) & 3;
- *index = ((data >> 3) & 7) | (rex_x() ? 8 : 0);
- *base = (data & 7) | (rex_b() ? 8 : 0);
- }
-
- typedef const char* (DisassemblerX64::*RegisterNameMapping)(int reg) const;
-
- int PrintRightOperandHelper(byte* modrmp,
- RegisterNameMapping register_name);
- int PrintRightOperand(byte* modrmp);
- int PrintRightByteOperand(byte* modrmp);
- int PrintRightXMMOperand(byte* modrmp);
- int PrintOperands(const char* mnem,
- OperandType op_order,
- byte* data);
- int PrintImmediate(byte* data, OperandSize size);
- int PrintImmediateOp(byte* data);
- const char* TwoByteMnemonic(byte opcode);
- int TwoByteOpcodeInstruction(byte* data);
- int F6F7Instruction(byte* data);
- int ShiftInstruction(byte* data);
- int JumpShort(byte* data);
- int JumpConditional(byte* data);
- int JumpConditionalShort(byte* data);
- int SetCC(byte* data);
- int FPUInstruction(byte* data);
- int MemoryFPUInstruction(int escape_opcode, int regop, byte* modrm_start);
- int RegisterFPUInstruction(int escape_opcode, byte modrm_byte);
- void AppendToBuffer(const char* format, ...);
-
- void UnimplementedInstruction() {
- if (abort_on_unimplemented_) {
- CHECK(false);
- } else {
- AppendToBuffer("'Unimplemented Instruction'");
- }
- }
-};
-
-
-void DisassemblerX64::AppendToBuffer(const char* format, ...) {
- v8::internal::Vector<char> buf = tmp_buffer_ + tmp_buffer_pos_;
- va_list args;
- va_start(args, format);
- int result = v8::internal::OS::VSNPrintF(buf, format, args);
- va_end(args);
- tmp_buffer_pos_ += result;
-}
-
-
-int DisassemblerX64::PrintRightOperandHelper(
- byte* modrmp,
- RegisterNameMapping direct_register_name) {
- int mod, regop, rm;
- get_modrm(*modrmp, &mod, &regop, &rm);
- RegisterNameMapping register_name = (mod == 3) ? direct_register_name :
- &DisassemblerX64::NameOfCPURegister;
- switch (mod) {
- case 0:
- if ((rm & 7) == 5) {
- int32_t disp = *reinterpret_cast<int32_t*>(modrmp + 1);
- AppendToBuffer("[0x%x]", disp);
- return 5;
- } else if ((rm & 7) == 4) {
- // Codes for SIB byte.
- byte sib = *(modrmp + 1);
- int scale, index, base;
- get_sib(sib, &scale, &index, &base);
- if (index == 4 && (base & 7) == 4 && scale == 0 /*times_1*/) {
- // index == rsp means no index. Only use sib byte with no index for
- // rsp and r12 base.
- AppendToBuffer("[%s]", NameOfCPURegister(base));
- return 2;
- } else if (base == 5) {
- // base == rbp means no base register (when mod == 0).
- int32_t disp = *reinterpret_cast<int32_t*>(modrmp + 2);
- AppendToBuffer("[%s*%d+0x%x]",
- NameOfCPURegister(index),
- 1 << scale, disp);
- return 6;
- } else if (index != 4 && base != 5) {
- // [base+index*scale]
- AppendToBuffer("[%s+%s*%d]",
- NameOfCPURegister(base),
- NameOfCPURegister(index),
- 1 << scale);
- return 2;
- } else {
- UnimplementedInstruction();
- return 1;
- }
- } else {
- AppendToBuffer("[%s]", NameOfCPURegister(rm));
- return 1;
- }
- break;
- case 1: // fall through
- case 2:
- if ((rm & 7) == 4) {
- byte sib = *(modrmp + 1);
- int scale, index, base;
- get_sib(sib, &scale, &index, &base);
- int disp = (mod == 2) ? *reinterpret_cast<int32_t*>(modrmp + 2)
- : *reinterpret_cast<char*>(modrmp + 2);
- if (index == 4 && (base & 7) == 4 && scale == 0 /*times_1*/) {
- if (-disp > 0) {
- AppendToBuffer("[%s-0x%x]", NameOfCPURegister(base), -disp);
- } else {
- AppendToBuffer("[%s+0x%x]", NameOfCPURegister(base), disp);
- }
- } else {
- if (-disp > 0) {
- AppendToBuffer("[%s+%s*%d-0x%x]",
- NameOfCPURegister(base),
- NameOfCPURegister(index),
- 1 << scale,
- -disp);
- } else {
- AppendToBuffer("[%s+%s*%d+0x%x]",
- NameOfCPURegister(base),
- NameOfCPURegister(index),
- 1 << scale,
- disp);
- }
- }
- return mod == 2 ? 6 : 3;
- } else {
- // No sib.
- int disp = (mod == 2) ? *reinterpret_cast<int32_t*>(modrmp + 1)
- : *reinterpret_cast<char*>(modrmp + 1);
- if (-disp > 0) {
- AppendToBuffer("[%s-0x%x]", NameOfCPURegister(rm), -disp);
- } else {
- AppendToBuffer("[%s+0x%x]", NameOfCPURegister(rm), disp);
- }
- return (mod == 2) ? 5 : 2;
- }
- break;
- case 3:
- AppendToBuffer("%s", (this->*register_name)(rm));
- return 1;
- default:
- UnimplementedInstruction();
- return 1;
- }
- UNREACHABLE();
-}
-
-
-int DisassemblerX64::PrintImmediate(byte* data, OperandSize size) {
- int64_t value;
- int count;
- switch (size) {
- case BYTE_SIZE:
- value = *data;
- count = 1;
- break;
- case WORD_SIZE:
- value = *reinterpret_cast<int16_t*>(data);
- count = 2;
- break;
- case DOUBLEWORD_SIZE:
- value = *reinterpret_cast<uint32_t*>(data);
- count = 4;
- break;
- case QUADWORD_SIZE:
- value = *reinterpret_cast<int32_t*>(data);
- count = 4;
- break;
- default:
- UNREACHABLE();
- value = 0; // Initialize variables on all paths to satisfy the compiler.
- count = 0;
- }
- AppendToBuffer("%" V8_PTR_PREFIX "x", value);
- return count;
-}
-
-
-int DisassemblerX64::PrintRightOperand(byte* modrmp) {
- return PrintRightOperandHelper(modrmp,
- &DisassemblerX64::NameOfCPURegister);
-}
-
-
-int DisassemblerX64::PrintRightByteOperand(byte* modrmp) {
- return PrintRightOperandHelper(modrmp,
- &DisassemblerX64::NameOfByteCPURegister);
-}
-
-
-int DisassemblerX64::PrintRightXMMOperand(byte* modrmp) {
- return PrintRightOperandHelper(modrmp,
- &DisassemblerX64::NameOfXMMRegister);
-}
-
-
-// Returns number of bytes used including the current *data.
-// Writes instruction's mnemonic, left and right operands to 'tmp_buffer_'.
-int DisassemblerX64::PrintOperands(const char* mnem,
- OperandType op_order,
- byte* data) {
- byte modrm = *data;
- int mod, regop, rm;
- get_modrm(modrm, &mod, &regop, &rm);
- int advance = 0;
- const char* register_name =
- byte_size_operand_ ? NameOfByteCPURegister(regop)
- : NameOfCPURegister(regop);
- switch (op_order) {
- case REG_OPER_OP_ORDER: {
- AppendToBuffer("%s%c %s,",
- mnem,
- operand_size_code(),
- register_name);
- advance = byte_size_operand_ ? PrintRightByteOperand(data)
- : PrintRightOperand(data);
- break;
- }
- case OPER_REG_OP_ORDER: {
- AppendToBuffer("%s%c ", mnem, operand_size_code());
- advance = byte_size_operand_ ? PrintRightByteOperand(data)
- : PrintRightOperand(data);
- AppendToBuffer(",%s", register_name);
- break;
- }
- default:
- UNREACHABLE();
- break;
- }
- return advance;
-}
-
-
-// Returns number of bytes used by machine instruction, including *data byte.
-// Writes immediate instructions to 'tmp_buffer_'.
-int DisassemblerX64::PrintImmediateOp(byte* data) {
- bool byte_size_immediate = (*data & 0x02) != 0;
- byte modrm = *(data + 1);
- int mod, regop, rm;
- get_modrm(modrm, &mod, &regop, &rm);
- const char* mnem = "Imm???";
- switch (regop) {
- case 0:
- mnem = "add";
- break;
- case 1:
- mnem = "or";
- break;
- case 2:
- mnem = "adc";
- break;
- case 3:
- mnem = "sbb";
- break;
- case 4:
- mnem = "and";
- break;
- case 5:
- mnem = "sub";
- break;
- case 6:
- mnem = "xor";
- break;
- case 7:
- mnem = "cmp";
- break;
- default:
- UnimplementedInstruction();
- }
- AppendToBuffer("%s%c ", mnem, operand_size_code());
- int count = PrintRightOperand(data + 1);
- AppendToBuffer(",0x");
- OperandSize immediate_size = byte_size_immediate ? BYTE_SIZE : operand_size();
- count += PrintImmediate(data + 1 + count, immediate_size);
- return 1 + count;
-}
-
-
-// Returns number of bytes used, including *data.
-int DisassemblerX64::F6F7Instruction(byte* data) {
- ASSERT(*data == 0xF7 || *data == 0xF6);
- byte modrm = *(data + 1);
- int mod, regop, rm;
- get_modrm(modrm, &mod, &regop, &rm);
- if (mod == 3 && regop != 0) {
- const char* mnem = NULL;
- switch (regop) {
- case 2:
- mnem = "not";
- break;
- case 3:
- mnem = "neg";
- break;
- case 4:
- mnem = "mul";
- break;
- case 5:
- mnem = "imul";
- break;
- case 7:
- mnem = "idiv";
- break;
- default:
- UnimplementedInstruction();
- }
- AppendToBuffer("%s%c %s",
- mnem,
- operand_size_code(),
- NameOfCPURegister(rm));
- return 2;
- } else if (regop == 0) {
- AppendToBuffer("test%c ", operand_size_code());
- int count = PrintRightOperand(data + 1); // Use name of 64-bit register.
- AppendToBuffer(",0x");
- count += PrintImmediate(data + 1 + count, operand_size());
- return 1 + count;
- } else {
- UnimplementedInstruction();
- return 2;
- }
-}
-
-
-int DisassemblerX64::ShiftInstruction(byte* data) {
- byte op = *data & (~1);
- if (op != 0xD0 && op != 0xD2 && op != 0xC0) {
- UnimplementedInstruction();
- return 1;
- }
- byte modrm = *(data + 1);
- int mod, regop, rm;
- get_modrm(modrm, &mod, &regop, &rm);
- regop &= 0x7; // The REX.R bit does not affect the operation.
- int imm8 = -1;
- int num_bytes = 2;
- if (mod != 3) {
- UnimplementedInstruction();
- return num_bytes;
- }
- const char* mnem = NULL;
- switch (regop) {
- case 0:
- mnem = "rol";
- break;
- case 1:
- mnem = "ror";
- break;
- case 2:
- mnem = "rcl";
- break;
- case 3:
- mnem = "rcr";
- break;
- case 4:
- mnem = "shl";
- break;
- case 5:
- mnem = "shr";
- break;
- case 7:
- mnem = "sar";
- break;
- default:
- UnimplementedInstruction();
- return num_bytes;
- }
- ASSERT_NE(NULL, mnem);
- if (op == 0xD0) {
- imm8 = 1;
- } else if (op == 0xC0) {
- imm8 = *(data + 2);
- num_bytes = 3;
- }
- AppendToBuffer("%s%c %s,",
- mnem,
- operand_size_code(),
- byte_size_operand_ ? NameOfByteCPURegister(rm)
- : NameOfCPURegister(rm));
- if (op == 0xD2) {
- AppendToBuffer("cl");
- } else {
- AppendToBuffer("%d", imm8);
- }
- return num_bytes;
-}
-
-
-// Returns number of bytes used, including *data.
-int DisassemblerX64::JumpShort(byte* data) {
- ASSERT_EQ(0xEB, *data);
- byte b = *(data + 1);
- byte* dest = data + static_cast<int8_t>(b) + 2;
- AppendToBuffer("jmp %s", NameOfAddress(dest));
- return 2;
-}
-
-
-// Returns number of bytes used, including *data.
-int DisassemblerX64::JumpConditional(byte* data) {
- ASSERT_EQ(0x0F, *data);
- byte cond = *(data + 1) & 0x0F;
- byte* dest = data + *reinterpret_cast<int32_t*>(data + 2) + 6;
- const char* mnem = conditional_code_suffix[cond];
- AppendToBuffer("j%s %s", mnem, NameOfAddress(dest));
- return 6; // includes 0x0F
-}
-
-
-// Returns number of bytes used, including *data.
-int DisassemblerX64::JumpConditionalShort(byte* data) {
- byte cond = *data & 0x0F;
- byte b = *(data + 1);
- byte* dest = data + static_cast<int8_t>(b) + 2;
- const char* mnem = conditional_code_suffix[cond];
- AppendToBuffer("j%s %s", mnem, NameOfAddress(dest));
- return 2;
-}
-
-
-// Returns number of bytes used, including *data.
-int DisassemblerX64::SetCC(byte* data) {
- ASSERT_EQ(0x0F, *data);
- byte cond = *(data + 1) & 0x0F;
- const char* mnem = conditional_code_suffix[cond];
- AppendToBuffer("set%s%c ", mnem, operand_size_code());
- PrintRightByteOperand(data + 2);
- return 3; // includes 0x0F
-}
-
-
-// Returns number of bytes used, including *data.
-int DisassemblerX64::FPUInstruction(byte* data) {
- byte escape_opcode = *data;
- ASSERT_EQ(0xD8, escape_opcode & 0xF8);
- byte modrm_byte = *(data+1);
-
- if (modrm_byte >= 0xC0) {
- return RegisterFPUInstruction(escape_opcode, modrm_byte);
- } else {
- return MemoryFPUInstruction(escape_opcode, modrm_byte, data+1);
- }
-}
-
-int DisassemblerX64::MemoryFPUInstruction(int escape_opcode,
- int modrm_byte,
- byte* modrm_start) {
- const char* mnem = "?";
- int regop = (modrm_byte >> 3) & 0x7; // reg/op field of modrm byte.
- switch (escape_opcode) {
- case 0xD9: switch (regop) {
- case 0: mnem = "fld_s"; break;
- case 3: mnem = "fstp_s"; break;
- case 7: mnem = "fstcw"; break;
- default: UnimplementedInstruction();
- }
- break;
-
- case 0xDB: switch (regop) {
- case 0: mnem = "fild_s"; break;
- case 1: mnem = "fisttp_s"; break;
- case 2: mnem = "fist_s"; break;
- case 3: mnem = "fistp_s"; break;
- default: UnimplementedInstruction();
- }
- break;
-
- case 0xDD: switch (regop) {
- case 0: mnem = "fld_d"; break;
- case 3: mnem = "fstp_d"; break;
- default: UnimplementedInstruction();
- }
- break;
-
- case 0xDF: switch (regop) {
- case 5: mnem = "fild_d"; break;
- case 7: mnem = "fistp_d"; break;
- default: UnimplementedInstruction();
- }
- break;
-
- default: UnimplementedInstruction();
- }
- AppendToBuffer("%s ", mnem);
- int count = PrintRightOperand(modrm_start);
- return count + 1;
-}
-
-int DisassemblerX64::RegisterFPUInstruction(int escape_opcode,
- byte modrm_byte) {
- bool has_register = false; // Is the FPU register encoded in modrm_byte?
- const char* mnem = "?";
-
- switch (escape_opcode) {
- case 0xD8:
- UnimplementedInstruction();
- break;
-
- case 0xD9:
- switch (modrm_byte & 0xF8) {
- case 0xC0:
- mnem = "fld";
- has_register = true;
- break;
- case 0xC8:
- mnem = "fxch";
- has_register = true;
- break;
- default:
- switch (modrm_byte) {
- case 0xE0: mnem = "fchs"; break;
- case 0xE1: mnem = "fabs"; break;
- case 0xE3: mnem = "fninit"; break;
- case 0xE4: mnem = "ftst"; break;
- case 0xE8: mnem = "fld1"; break;
- case 0xEB: mnem = "fldpi"; break;
- case 0xED: mnem = "fldln2"; break;
- case 0xEE: mnem = "fldz"; break;
- case 0xF0: mnem = "f2xm1"; break;
- case 0xF1: mnem = "fyl2x"; break;
- case 0xF2: mnem = "fptan"; break;
- case 0xF5: mnem = "fprem1"; break;
- case 0xF7: mnem = "fincstp"; break;
- case 0xF8: mnem = "fprem"; break;
- case 0xFD: mnem = "fscale"; break;
- case 0xFE: mnem = "fsin"; break;
- case 0xFF: mnem = "fcos"; break;
- default: UnimplementedInstruction();
- }
- }
- break;
-
- case 0xDA:
- if (modrm_byte == 0xE9) {
- mnem = "fucompp";
- } else {
- UnimplementedInstruction();
- }
- break;
-
- case 0xDB:
- if ((modrm_byte & 0xF8) == 0xE8) {
- mnem = "fucomi";
- has_register = true;
- } else if (modrm_byte == 0xE2) {
- mnem = "fclex";
- } else {
- UnimplementedInstruction();
- }
- break;
-
- case 0xDC:
- has_register = true;
- switch (modrm_byte & 0xF8) {
- case 0xC0: mnem = "fadd"; break;
- case 0xE8: mnem = "fsub"; break;
- case 0xC8: mnem = "fmul"; break;
- case 0xF8: mnem = "fdiv"; break;
- default: UnimplementedInstruction();
- }
- break;
-
- case 0xDD:
- has_register = true;
- switch (modrm_byte & 0xF8) {
- case 0xC0: mnem = "ffree"; break;
- case 0xD8: mnem = "fstp"; break;
- default: UnimplementedInstruction();
- }
- break;
-
- case 0xDE:
- if (modrm_byte == 0xD9) {
- mnem = "fcompp";
- } else {
- has_register = true;
- switch (modrm_byte & 0xF8) {
- case 0xC0: mnem = "faddp"; break;
- case 0xE8: mnem = "fsubp"; break;
- case 0xC8: mnem = "fmulp"; break;
- case 0xF8: mnem = "fdivp"; break;
- default: UnimplementedInstruction();
- }
- }
- break;
-
- case 0xDF:
- if (modrm_byte == 0xE0) {
- mnem = "fnstsw_ax";
- } else if ((modrm_byte & 0xF8) == 0xE8) {
- mnem = "fucomip";
- has_register = true;
- }
- break;
-
- default: UnimplementedInstruction();
- }
-
- if (has_register) {
- AppendToBuffer("%s st%d", mnem, modrm_byte & 0x7);
- } else {
- AppendToBuffer("%s", mnem);
- }
- return 2;
-}
-
-
-
-// Handle all two-byte opcodes, which start with 0x0F.
-// These instructions may be affected by an 0x66, 0xF2, or 0xF3 prefix.
-// We do not use any three-byte opcodes, which start with 0x0F38 or 0x0F3A.
-int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
- byte opcode = *(data + 1);
- byte* current = data + 2;
- // At return, "current" points to the start of the next instruction.
- const char* mnemonic = TwoByteMnemonic(opcode);
- if (operand_size_ == 0x66) {
- // 0x66 0x0F prefix.
- int mod, regop, rm;
- if (opcode == 0x3A) {
- byte third_byte = *current;
- current = data + 3;
- if (third_byte == 0x17) {
- get_modrm(*current, &mod, &regop, &rm);
- AppendToBuffer("extractps "); // reg/m32, xmm, imm8
- current += PrintRightOperand(current);
- AppendToBuffer(", %s, %d", NameOfCPURegister(regop), (*current) & 3);
- current += 1;
- } else if (third_byte == 0x0b) {
- get_modrm(*current, &mod, &regop, &rm);
- // roundsd xmm, xmm/m64, imm8
- AppendToBuffer("roundsd %s, ", NameOfCPURegister(regop));
- current += PrintRightOperand(current);
- AppendToBuffer(", %d", (*current) & 3);
- current += 1;
- } else {
- UnimplementedInstruction();
- }
- } else {
- get_modrm(*current, &mod, &regop, &rm);
- if (opcode == 0x1f) {
- current++;
- if (rm == 4) { // SIB byte present.
- current++;
- }
- if (mod == 1) { // Byte displacement.
- current += 1;
- } else if (mod == 2) { // 32-bit displacement.
- current += 4;
- } // else no immediate displacement.
- AppendToBuffer("nop");
- } else if (opcode == 0x28) {
- AppendToBuffer("movapd %s, ", NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
- } else if (opcode == 0x29) {
- AppendToBuffer("movapd ");
- current += PrintRightXMMOperand(current);
- AppendToBuffer(", %s", NameOfXMMRegister(regop));
- } else if (opcode == 0x6E) {
- AppendToBuffer("mov%c %s,",
- rex_w() ? 'q' : 'd',
- NameOfXMMRegister(regop));
- current += PrintRightOperand(current);
- } else if (opcode == 0x6F) {
- AppendToBuffer("movdqa %s,",
- NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
- } else if (opcode == 0x7E) {
- AppendToBuffer("mov%c ",
- rex_w() ? 'q' : 'd');
- current += PrintRightOperand(current);
- AppendToBuffer(", %s", NameOfXMMRegister(regop));
- } else if (opcode == 0x7F) {
- AppendToBuffer("movdqa ");
- current += PrintRightXMMOperand(current);
- AppendToBuffer(", %s", NameOfXMMRegister(regop));
- } else if (opcode == 0xD6) {
- AppendToBuffer("movq ");
- current += PrintRightXMMOperand(current);
- AppendToBuffer(", %s", NameOfXMMRegister(regop));
- } else if (opcode == 0x50) {
- AppendToBuffer("movmskpd %s,", NameOfCPURegister(regop));
- current += PrintRightXMMOperand(current);
- } else {
- const char* mnemonic = "?";
- if (opcode == 0x54) {
- mnemonic = "andpd";
- } else if (opcode == 0x56) {
- mnemonic = "orpd";
- } else if (opcode == 0x57) {
- mnemonic = "xorpd";
- } else if (opcode == 0x2E) {
- mnemonic = "ucomisd";
- } else if (opcode == 0x2F) {
- mnemonic = "comisd";
- } else {
- UnimplementedInstruction();
- }
- AppendToBuffer("%s %s,", mnemonic, NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
- }
- }
- } else if (group_1_prefix_ == 0xF2) {
- // Beginning of instructions with prefix 0xF2.
-
- if (opcode == 0x11 || opcode == 0x10) {
- // MOVSD: Move scalar double-precision fp to/from/between XMM registers.
- AppendToBuffer("movsd ");
- int mod, regop, rm;
- get_modrm(*current, &mod, &regop, &rm);
- if (opcode == 0x11) {
- current += PrintRightXMMOperand(current);
- AppendToBuffer(",%s", NameOfXMMRegister(regop));
- } else {
- AppendToBuffer("%s,", NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
- }
- } else if (opcode == 0x2A) {
- // CVTSI2SD: integer to XMM double conversion.
- int mod, regop, rm;
- get_modrm(*current, &mod, &regop, &rm);
- AppendToBuffer("%sd %s,", mnemonic, NameOfXMMRegister(regop));
- current += PrintRightOperand(current);
- } else if (opcode == 0x2C) {
- // CVTTSD2SI:
- // Convert with truncation scalar double-precision FP to integer.
- int mod, regop, rm;
- get_modrm(*current, &mod, &regop, &rm);
- AppendToBuffer("cvttsd2si%c %s,",
- operand_size_code(), NameOfCPURegister(regop));
- current += PrintRightXMMOperand(current);
- } else if (opcode == 0x2D) {
- // CVTSD2SI: Convert scalar double-precision FP to integer.
- int mod, regop, rm;
- get_modrm(*current, &mod, &regop, &rm);
- AppendToBuffer("cvtsd2si%c %s,",
- operand_size_code(), NameOfCPURegister(regop));
- current += PrintRightXMMOperand(current);
- } else if ((opcode & 0xF8) == 0x58 || opcode == 0x51) {
- // XMM arithmetic. Mnemonic was retrieved at the start of this function.
- int mod, regop, rm;
- get_modrm(*current, &mod, &regop, &rm);
- AppendToBuffer("%s %s,", mnemonic, NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
- } else {
- UnimplementedInstruction();
- }
- } else if (group_1_prefix_ == 0xF3) {
- // Instructions with prefix 0xF3.
- if (opcode == 0x11 || opcode == 0x10) {
- // MOVSS: Move scalar double-precision fp to/from/between XMM registers.
- AppendToBuffer("movss ");
- int mod, regop, rm;
- get_modrm(*current, &mod, &regop, &rm);
- if (opcode == 0x11) {
- current += PrintRightOperand(current);
- AppendToBuffer(",%s", NameOfXMMRegister(regop));
- } else {
- AppendToBuffer("%s,", NameOfXMMRegister(regop));
- current += PrintRightOperand(current);
- }
- } else if (opcode == 0x2A) {
- // CVTSI2SS: integer to XMM single conversion.
- int mod, regop, rm;
- get_modrm(*current, &mod, &regop, &rm);
- AppendToBuffer("%ss %s,", mnemonic, NameOfXMMRegister(regop));
- current += PrintRightOperand(current);
- } else if (opcode == 0x2C) {
- // CVTTSS2SI:
- // Convert with truncation scalar single-precision FP to dword integer.
- int mod, regop, rm;
- get_modrm(*current, &mod, &regop, &rm);
- AppendToBuffer("cvttss2si%c %s,",
- operand_size_code(), NameOfCPURegister(regop));
- current += PrintRightXMMOperand(current);
- } else if (opcode == 0x5A) {
- // CVTSS2SD:
- // Convert scalar single-precision FP to scalar double-precision FP.
- int mod, regop, rm;
- get_modrm(*current, &mod, &regop, &rm);
- AppendToBuffer("cvtss2sd %s,", NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
- } else if (opcode == 0x7E) {
- int mod, regop, rm;
- get_modrm(*current, &mod, &regop, &rm);
- AppendToBuffer("movq %s, ", NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
- } else {
- UnimplementedInstruction();
- }
- } else if (opcode == 0x1F) {
- // NOP
- int mod, regop, rm;
- get_modrm(*current, &mod, &regop, &rm);
- current++;
- if (rm == 4) { // SIB byte present.
- current++;
- }
- if (mod == 1) { // Byte displacement.
- current += 1;
- } else if (mod == 2) { // 32-bit displacement.
- current += 4;
- } // else no immediate displacement.
- AppendToBuffer("nop");
-
- } else if (opcode == 0x28) {
- // movaps xmm, xmm/m128
- int mod, regop, rm;
- get_modrm(*current, &mod, &regop, &rm);
- AppendToBuffer("movaps %s, ", NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
-
- } else if (opcode == 0x29) {
- // movaps xmm/m128, xmm
- int mod, regop, rm;
- get_modrm(*current, &mod, &regop, &rm);
- AppendToBuffer("movaps ");
- current += PrintRightXMMOperand(current);
- AppendToBuffer(", %s", NameOfXMMRegister(regop));
-
- } else if (opcode == 0xA2 || opcode == 0x31) {
- // RDTSC or CPUID
- AppendToBuffer("%s", mnemonic);
-
- } else if ((opcode & 0xF0) == 0x40) {
- // CMOVcc: conditional move.
- int condition = opcode & 0x0F;
- const InstructionDesc& idesc = cmov_instructions[condition];
- byte_size_operand_ = idesc.byte_size_operation;
- current += PrintOperands(idesc.mnem, idesc.op_order_, current);
-
- } else if (opcode == 0x57) {
- // xorps xmm, xmm/m128
- int mod, regop, rm;
- get_modrm(*current, &mod, &regop, &rm);
- AppendToBuffer("xorps %s, ", NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
-
- } else if (opcode == 0x50) {
- // movmskps reg, xmm
- int mod, regop, rm;
- get_modrm(*current, &mod, &regop, &rm);
- AppendToBuffer("movmskps %s, ", NameOfCPURegister(regop));
- current += PrintRightXMMOperand(current);
-
- } else if ((opcode & 0xF0) == 0x80) {
- // Jcc: Conditional jump (branch).
- current = data + JumpConditional(data);
-
- } else if (opcode == 0xBE || opcode == 0xBF || opcode == 0xB6 ||
- opcode == 0xB7 || opcode == 0xAF) {
- // Size-extending moves, IMUL.
- current += PrintOperands(mnemonic, REG_OPER_OP_ORDER, current);
-
- } else if ((opcode & 0xF0) == 0x90) {
- // SETcc: Set byte on condition. Needs pointer to beginning of instruction.
- current = data + SetCC(data);
-
- } else if (opcode == 0xAB || opcode == 0xA5 || opcode == 0xAD) {
- // SHLD, SHRD (double-precision shift), BTS (bit set).
- AppendToBuffer("%s ", mnemonic);
- int mod, regop, rm;
- get_modrm(*current, &mod, &regop, &rm);
- current += PrintRightOperand(current);
- if (opcode == 0xAB) {
- AppendToBuffer(",%s", NameOfCPURegister(regop));
- } else {
- AppendToBuffer(",%s,cl", NameOfCPURegister(regop));
- }
- } else {
- UnimplementedInstruction();
- }
- return static_cast<int>(current - data);
-}
-
-
-// Mnemonics for two-byte opcode instructions starting with 0x0F.
-// The argument is the second byte of the two-byte opcode.
-// Returns NULL if the instruction is not handled here.
-const char* DisassemblerX64::TwoByteMnemonic(byte opcode) {
- switch (opcode) {
- case 0x1F:
- return "nop";
- case 0x2A: // F2/F3 prefix.
- return "cvtsi2s";
- case 0x31:
- return "rdtsc";
- case 0x51: // F2 prefix.
- return "sqrtsd";
- case 0x58: // F2 prefix.
- return "addsd";
- case 0x59: // F2 prefix.
- return "mulsd";
- case 0x5C: // F2 prefix.
- return "subsd";
- case 0x5E: // F2 prefix.
- return "divsd";
- case 0xA2:
- return "cpuid";
- case 0xA5:
- return "shld";
- case 0xAB:
- return "bts";
- case 0xAD:
- return "shrd";
- case 0xAF:
- return "imul";
- case 0xB6:
- return "movzxb";
- case 0xB7:
- return "movzxw";
- case 0xBE:
- return "movsxb";
- case 0xBF:
- return "movsxw";
- default:
- return NULL;
- }
-}
-
-
-// Disassembles the instruction at instr, and writes it into out_buffer.
-int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
- byte* instr) {
- tmp_buffer_pos_ = 0; // starting to write as position 0
- byte* data = instr;
- bool processed = true; // Will be set to false if the current instruction
- // is not in 'instructions' table.
- byte current;
-
- // Scan for prefixes.
- while (true) {
- current = *data;
- if (current == OPERAND_SIZE_OVERRIDE_PREFIX) { // Group 3 prefix.
- operand_size_ = current;
- } else if ((current & 0xF0) == 0x40) { // REX prefix.
- setRex(current);
- if (rex_w()) AppendToBuffer("REX.W ");
- } else if ((current & 0xFE) == 0xF2) { // Group 1 prefix (0xF2 or 0xF3).
- group_1_prefix_ = current;
- } else { // Not a prefix - an opcode.
- break;
- }
- data++;
- }
-
- const InstructionDesc& idesc = instruction_table_->Get(current);
- byte_size_operand_ = idesc.byte_size_operation;
- switch (idesc.type) {
- case ZERO_OPERANDS_INSTR:
- if (current >= 0xA4 && current <= 0xA7) {
- // String move or compare operations.
- if (group_1_prefix_ == REP_PREFIX) {
- // REP.
- AppendToBuffer("rep ");
- }
- if (rex_w()) AppendToBuffer("REX.W ");
- AppendToBuffer("%s%c", idesc.mnem, operand_size_code());
- } else {
- AppendToBuffer("%s", idesc.mnem, operand_size_code());
- }
- data++;
- break;
-
- case TWO_OPERANDS_INSTR:
- data++;
- data += PrintOperands(idesc.mnem, idesc.op_order_, data);
- break;
-
- case JUMP_CONDITIONAL_SHORT_INSTR:
- data += JumpConditionalShort(data);
- break;
-
- case REGISTER_INSTR:
- AppendToBuffer("%s%c %s",
- idesc.mnem,
- operand_size_code(),
- NameOfCPURegister(base_reg(current & 0x07)));
- data++;
- break;
- case PUSHPOP_INSTR:
- AppendToBuffer("%s %s",
- idesc.mnem,
- NameOfCPURegister(base_reg(current & 0x07)));
- data++;
- break;
- case MOVE_REG_INSTR: {
- byte* addr = NULL;
- switch (operand_size()) {
- case WORD_SIZE:
- addr = reinterpret_cast<byte*>(*reinterpret_cast<int16_t*>(data + 1));
- data += 3;
- break;
- case DOUBLEWORD_SIZE:
- addr = reinterpret_cast<byte*>(*reinterpret_cast<int32_t*>(data + 1));
- data += 5;
- break;
- case QUADWORD_SIZE:
- addr = reinterpret_cast<byte*>(*reinterpret_cast<int64_t*>(data + 1));
- data += 9;
- break;
- default:
- UNREACHABLE();
- }
- AppendToBuffer("mov%c %s,%s",
- operand_size_code(),
- NameOfCPURegister(base_reg(current & 0x07)),
- NameOfAddress(addr));
- break;
- }
-
- case CALL_JUMP_INSTR: {
- byte* addr = data + *reinterpret_cast<int32_t*>(data + 1) + 5;
- AppendToBuffer("%s %s", idesc.mnem, NameOfAddress(addr));
- data += 5;
- break;
- }
-
- case SHORT_IMMEDIATE_INSTR: {
- byte* addr =
- reinterpret_cast<byte*>(*reinterpret_cast<int32_t*>(data + 1));
- AppendToBuffer("%s rax, %s", idesc.mnem, NameOfAddress(addr));
- data += 5;
- break;
- }
-
- case NO_INSTR:
- processed = false;
- break;
-
- default:
- UNIMPLEMENTED(); // This type is not implemented.
- }
-
- // The first byte didn't match any of the simple opcodes, so we
- // need to do special processing on it.
- if (!processed) {
- switch (*data) {
- case 0xC2:
- AppendToBuffer("ret 0x%x", *reinterpret_cast<uint16_t*>(data + 1));
- data += 3;
- break;
-
- case 0x69: // fall through
- case 0x6B: {
- int mod, regop, rm;
- get_modrm(*(data + 1), &mod, &regop, &rm);
- int32_t imm = *data == 0x6B ? *(data + 2)
- : *reinterpret_cast<int32_t*>(data + 2);
- AppendToBuffer("imul%c %s,%s,0x%x",
- operand_size_code(),
- NameOfCPURegister(regop),
- NameOfCPURegister(rm), imm);
- data += 2 + (*data == 0x6B ? 1 : 4);
- break;
- }
-
- case 0x81: // fall through
- case 0x83: // 0x81 with sign extension bit set
- data += PrintImmediateOp(data);
- break;
-
- case 0x0F:
- data += TwoByteOpcodeInstruction(data);
- break;
-
- case 0x8F: {
- data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- if (regop == 0) {
- AppendToBuffer("pop ");
- data += PrintRightOperand(data);
- }
- }
- break;
-
- case 0xFF: {
- data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- const char* mnem = NULL;
- switch (regop) {
- case 0:
- mnem = "inc";
- break;
- case 1:
- mnem = "dec";
- break;
- case 2:
- mnem = "call";
- break;
- case 4:
- mnem = "jmp";
- break;
- case 6:
- mnem = "push";
- break;
- default:
- mnem = "???";
- }
- AppendToBuffer(((regop <= 1) ? "%s%c " : "%s "),
- mnem,
- operand_size_code());
- data += PrintRightOperand(data);
- }
- break;
-
- case 0xC7: // imm32, fall through
- case 0xC6: // imm8
- {
- bool is_byte = *data == 0xC6;
- data++;
- if (is_byte) {
- AppendToBuffer("movb ");
- data += PrintRightByteOperand(data);
- int32_t imm = *data;
- AppendToBuffer(",0x%x", imm);
- data++;
- } else {
- AppendToBuffer("mov%c ", operand_size_code());
- data += PrintRightOperand(data);
- int32_t imm = *reinterpret_cast<int32_t*>(data);
- AppendToBuffer(",0x%x", imm);
- data += 4;
- }
- }
- break;
-
- case 0x80: {
- data++;
- AppendToBuffer("cmpb ");
- data += PrintRightByteOperand(data);
- int32_t imm = *data;
- AppendToBuffer(",0x%x", imm);
- data++;
- }
- break;
-
- case 0x88: // 8bit, fall through
- case 0x89: // 32bit
- {
- bool is_byte = *data == 0x88;
- int mod, regop, rm;
- data++;
- get_modrm(*data, &mod, &regop, &rm);
- if (is_byte) {
- AppendToBuffer("movb ");
- data += PrintRightByteOperand(data);
- AppendToBuffer(",%s", NameOfByteCPURegister(regop));
- } else {
- AppendToBuffer("mov%c ", operand_size_code());
- data += PrintRightOperand(data);
- AppendToBuffer(",%s", NameOfCPURegister(regop));
- }
- }
- break;
-
- case 0x90:
- case 0x91:
- case 0x92:
- case 0x93:
- case 0x94:
- case 0x95:
- case 0x96:
- case 0x97: {
- int reg = (*data & 0x7) | (rex_b() ? 8 : 0);
- if (reg == 0) {
- AppendToBuffer("nop"); // Common name for xchg rax,rax.
- } else {
- AppendToBuffer("xchg%c rax, %s",
- operand_size_code(),
- NameOfCPURegister(reg));
- }
- data++;
- }
- break;
- case 0xB0:
- case 0xB1:
- case 0xB2:
- case 0xB3:
- case 0xB4:
- case 0xB5:
- case 0xB6:
- case 0xB7:
- case 0xB8:
- case 0xB9:
- case 0xBA:
- case 0xBB:
- case 0xBC:
- case 0xBD:
- case 0xBE:
- case 0xBF: {
- // mov reg8,imm8 or mov reg32,imm32
- byte opcode = *data;
- data++;
- bool is_32bit = (opcode >= 0xB8);
- int reg = (opcode & 0x7) | (rex_b() ? 8 : 0);
- if (is_32bit) {
- AppendToBuffer("mov%c %s, ",
- operand_size_code(),
- NameOfCPURegister(reg));
- data += PrintImmediate(data, DOUBLEWORD_SIZE);
- } else {
- AppendToBuffer("movb %s, ",
- NameOfByteCPURegister(reg));
- data += PrintImmediate(data, BYTE_SIZE);
- }
- break;
- }
- case 0xFE: {
- data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- if (regop == 1) {
- AppendToBuffer("decb ");
- data += PrintRightByteOperand(data);
- } else {
- UnimplementedInstruction();
- }
- break;
- }
- case 0x68:
- AppendToBuffer("push 0x%x", *reinterpret_cast<int32_t*>(data + 1));
- data += 5;
- break;
-
- case 0x6A:
- AppendToBuffer("push 0x%x", *reinterpret_cast<int8_t*>(data + 1));
- data += 2;
- break;
-
- case 0xA1: // Fall through.
- case 0xA3:
- switch (operand_size()) {
- case DOUBLEWORD_SIZE: {
- const char* memory_location = NameOfAddress(
- reinterpret_cast<byte*>(
- *reinterpret_cast<int32_t*>(data + 1)));
- if (*data == 0xA1) { // Opcode 0xA1
- AppendToBuffer("movzxlq rax,(%s)", memory_location);
- } else { // Opcode 0xA3
- AppendToBuffer("movzxlq (%s),rax", memory_location);
- }
- data += 5;
- break;
- }
- case QUADWORD_SIZE: {
- // New x64 instruction mov rax,(imm_64).
- const char* memory_location = NameOfAddress(
- *reinterpret_cast<byte**>(data + 1));
- if (*data == 0xA1) { // Opcode 0xA1
- AppendToBuffer("movq rax,(%s)", memory_location);
- } else { // Opcode 0xA3
- AppendToBuffer("movq (%s),rax", memory_location);
- }
- data += 9;
- break;
- }
- default:
- UnimplementedInstruction();
- data += 2;
- }
- break;
-
- case 0xA8:
- AppendToBuffer("test al,0x%x", *reinterpret_cast<uint8_t*>(data + 1));
- data += 2;
- break;
-
- case 0xA9: {
- int64_t value = 0;
- switch (operand_size()) {
- case WORD_SIZE:
- value = *reinterpret_cast<uint16_t*>(data + 1);
- data += 3;
- break;
- case DOUBLEWORD_SIZE:
- value = *reinterpret_cast<uint32_t*>(data + 1);
- data += 5;
- break;
- case QUADWORD_SIZE:
- value = *reinterpret_cast<int32_t*>(data + 1);
- data += 5;
- break;
- default:
- UNREACHABLE();
- }
- AppendToBuffer("test%c rax,0x%" V8_PTR_PREFIX "x",
- operand_size_code(),
- value);
- break;
- }
- case 0xD1: // fall through
- case 0xD3: // fall through
- case 0xC1:
- data += ShiftInstruction(data);
- break;
- case 0xD0: // fall through
- case 0xD2: // fall through
- case 0xC0:
- byte_size_operand_ = true;
- data += ShiftInstruction(data);
- break;
-
- case 0xD9: // fall through
- case 0xDA: // fall through
- case 0xDB: // fall through
- case 0xDC: // fall through
- case 0xDD: // fall through
- case 0xDE: // fall through
- case 0xDF:
- data += FPUInstruction(data);
- break;
-
- case 0xEB:
- data += JumpShort(data);
- break;
-
- case 0xF6:
- byte_size_operand_ = true; // fall through
- case 0xF7:
- data += F6F7Instruction(data);
- break;
-
- case 0x3C:
- AppendToBuffer("cmp al, 0x%x", *reinterpret_cast<int8_t*>(data + 1));
- data +=2;
- break;
-
- default:
- UnimplementedInstruction();
- data += 1;
- }
- } // !processed
-
- if (tmp_buffer_pos_ < sizeof tmp_buffer_) {
- tmp_buffer_[tmp_buffer_pos_] = '\0';
- }
-
- int instr_len = static_cast<int>(data - instr);
- ASSERT(instr_len > 0); // Ensure progress.
-
- int outp = 0;
- // Instruction bytes.
- for (byte* bp = instr; bp < data; bp++) {
- outp += v8::internal::OS::SNPrintF(out_buffer + outp, "%02x", *bp);
- }
- for (int i = 6 - instr_len; i >= 0; i--) {
- outp += v8::internal::OS::SNPrintF(out_buffer + outp, " ");
- }
-
- outp += v8::internal::OS::SNPrintF(out_buffer + outp, " %s",
- tmp_buffer_.start());
- return instr_len;
-}
-
-//------------------------------------------------------------------------------
-
-
-static const char* cpu_regs[16] = {
- "rax", "rcx", "rdx", "rbx", "rsp", "rbp", "rsi", "rdi",
- "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
-};
-
-
-static const char* byte_cpu_regs[16] = {
- "al", "cl", "dl", "bl", "spl", "bpl", "sil", "dil",
- "r8l", "r9l", "r10l", "r11l", "r12l", "r13l", "r14l", "r15l"
-};
-
-
-static const char* xmm_regs[16] = {
- "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7",
- "xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", "xmm15"
-};
-
-
-const char* NameConverter::NameOfAddress(byte* addr) const {
- v8::internal::OS::SNPrintF(tmp_buffer_, "%p", addr);
- return tmp_buffer_.start();
-}
-
-
-const char* NameConverter::NameOfConstant(byte* addr) const {
- return NameOfAddress(addr);
-}
-
-
-const char* NameConverter::NameOfCPURegister(int reg) const {
- if (0 <= reg && reg < 16)
- return cpu_regs[reg];
- return "noreg";
-}
-
-
-const char* NameConverter::NameOfByteCPURegister(int reg) const {
- if (0 <= reg && reg < 16)
- return byte_cpu_regs[reg];
- return "noreg";
-}
-
-
-const char* NameConverter::NameOfXMMRegister(int reg) const {
- if (0 <= reg && reg < 16)
- return xmm_regs[reg];
- return "noxmmreg";
-}
-
-
-const char* NameConverter::NameInCode(byte* addr) const {
- // X64 does not embed debug strings at the moment.
- UNREACHABLE();
- return "";
-}
-
-//------------------------------------------------------------------------------
-
-Disassembler::Disassembler(const NameConverter& converter)
- : converter_(converter) { }
-
-Disassembler::~Disassembler() { }
-
-
-int Disassembler::InstructionDecode(v8::internal::Vector<char> buffer,
- byte* instruction) {
- DisassemblerX64 d(converter_, CONTINUE_ON_UNIMPLEMENTED_OPCODE);
- return d.InstructionDecode(buffer, instruction);
-}
-
-
-// The X64 assembler does not use constant pools.
-int Disassembler::ConstantPoolSizeAt(byte* instruction) {
- return -1;
-}
-
-
-void Disassembler::Disassemble(FILE* f, byte* begin, byte* end) {
- NameConverter converter;
- Disassembler d(converter);
- for (byte* pc = begin; pc < end;) {
- v8::internal::EmbeddedVector<char, 128> buffer;
- buffer[0] = '\0';
- byte* prev_pc = pc;
- pc += d.InstructionDecode(buffer, pc);
- fprintf(f, "%p", prev_pc);
- fprintf(f, " ");
-
- for (byte* bp = prev_pc; bp < pc; bp++) {
- fprintf(f, "%02x", *bp);
- }
- for (int i = 6 - static_cast<int>(pc - prev_pc); i >= 0; i--) {
- fprintf(f, " ");
- }
- fprintf(f, " %s\n", buffer.start());
- }
-}
-
-} // namespace disasm
-
-#endif // V8_TARGET_ARCH_X64
diff --git a/src/3rdparty/v8/src/x64/frames-x64.cc b/src/3rdparty/v8/src/x64/frames-x64.cc
deleted file mode 100644
index 6c58bc9..0000000
--- a/src/3rdparty/v8/src/x64/frames-x64.cc
+++ /dev/null
@@ -1,45 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_X64)
-
-#include "frames-inl.h"
-
-namespace v8 {
-namespace internal {
-
-
-Address ExitFrame::ComputeStackPointer(Address fp) {
- return Memory::Address_at(fp + ExitFrameConstants::kSPOffset);
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_X64
diff --git a/src/3rdparty/v8/src/x64/frames-x64.h b/src/3rdparty/v8/src/x64/frames-x64.h
deleted file mode 100644
index c9092af..0000000
--- a/src/3rdparty/v8/src/x64/frames-x64.h
+++ /dev/null
@@ -1,122 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_X64_FRAMES_X64_H_
-#define V8_X64_FRAMES_X64_H_
-
-namespace v8 {
-namespace internal {
-
-const int kNumRegs = 16;
-const RegList kJSCallerSaved =
- 1 << 0 | // rax
- 1 << 1 | // rcx
- 1 << 2 | // rdx
- 1 << 3 | // rbx - used as a caller-saved register in JavaScript code
- 1 << 7; // rdi - callee function
-
-const int kNumJSCallerSaved = 5;
-
-typedef Object* JSCallerSavedBuffer[kNumJSCallerSaved];
-
-// Number of registers for which space is reserved in safepoints.
-const int kNumSafepointRegisters = 16;
-
-// ----------------------------------------------------
-
-class StackHandlerConstants : public AllStatic {
- public:
- static const int kNextOffset = 0 * kPointerSize;
- static const int kCodeOffset = 1 * kPointerSize;
- static const int kStateOffset = 2 * kPointerSize;
- static const int kContextOffset = 3 * kPointerSize;
- static const int kFPOffset = 4 * kPointerSize;
-
- static const int kSize = kFPOffset + kPointerSize;
-};
-
-
-class EntryFrameConstants : public AllStatic {
- public:
-#ifdef _WIN64
- static const int kCallerFPOffset = -10 * kPointerSize;
-#else
- static const int kCallerFPOffset = -8 * kPointerSize;
-#endif
- static const int kArgvOffset = 6 * kPointerSize;
-};
-
-
-class ExitFrameConstants : public AllStatic {
- public:
- static const int kCodeOffset = -2 * kPointerSize;
- static const int kSPOffset = -1 * kPointerSize;
-
- static const int kCallerFPOffset = +0 * kPointerSize;
- static const int kCallerPCOffset = +1 * kPointerSize;
-
- // FP-relative displacement of the caller's SP. It points just
- // below the saved PC.
- static const int kCallerSPDisplacement = +2 * kPointerSize;
-};
-
-
-class JavaScriptFrameConstants : public AllStatic {
- public:
- // FP-relative.
- static const int kLocal0Offset = StandardFrameConstants::kExpressionsOffset;
- static const int kLastParameterOffset = +2 * kPointerSize;
- static const int kFunctionOffset = StandardFrameConstants::kMarkerOffset;
-
- // Caller SP-relative.
- static const int kParam0Offset = -2 * kPointerSize;
- static const int kReceiverOffset = -1 * kPointerSize;
-};
-
-
-class ArgumentsAdaptorFrameConstants : public AllStatic {
- public:
- static const int kLengthOffset = StandardFrameConstants::kExpressionsOffset;
- static const int kFrameSize =
- StandardFrameConstants::kFixedFrameSize + kPointerSize;
-};
-
-
-class InternalFrameConstants : public AllStatic {
- public:
- static const int kCodeOffset = StandardFrameConstants::kExpressionsOffset;
-};
-
-
-inline Object* JavaScriptFrame::function_slot_object() const {
- const int offset = JavaScriptFrameConstants::kFunctionOffset;
- return Memory::Object_at(fp() + offset);
-}
-
-} } // namespace v8::internal
-
-#endif // V8_X64_FRAMES_X64_H_
diff --git a/src/3rdparty/v8/src/x64/full-codegen-x64.cc b/src/3rdparty/v8/src/x64/full-codegen-x64.cc
deleted file mode 100644
index e236ce1..0000000
--- a/src/3rdparty/v8/src/x64/full-codegen-x64.cc
+++ /dev/null
@@ -1,4594 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_X64)
-
-#include "code-stubs.h"
-#include "codegen.h"
-#include "compiler.h"
-#include "debug.h"
-#include "full-codegen.h"
-#include "isolate-inl.h"
-#include "parser.h"
-#include "scopes.h"
-#include "stub-cache.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm_)
-
-
-class JumpPatchSite BASE_EMBEDDED {
- public:
- explicit JumpPatchSite(MacroAssembler* masm) : masm_(masm) {
-#ifdef DEBUG
- info_emitted_ = false;
-#endif
- }
-
- ~JumpPatchSite() {
- ASSERT(patch_site_.is_bound() == info_emitted_);
- }
-
- void EmitJumpIfNotSmi(Register reg,
- Label* target,
- Label::Distance near_jump = Label::kFar) {
- __ testb(reg, Immediate(kSmiTagMask));
- EmitJump(not_carry, target, near_jump); // Always taken before patched.
- }
-
- void EmitJumpIfSmi(Register reg,
- Label* target,
- Label::Distance near_jump = Label::kFar) {
- __ testb(reg, Immediate(kSmiTagMask));
- EmitJump(carry, target, near_jump); // Never taken before patched.
- }
-
- void EmitPatchInfo() {
- if (patch_site_.is_bound()) {
- int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(&patch_site_);
- ASSERT(is_int8(delta_to_patch_site));
- __ testl(rax, Immediate(delta_to_patch_site));
-#ifdef DEBUG
- info_emitted_ = true;
-#endif
- } else {
- __ nop(); // Signals no inlined code.
- }
- }
-
- private:
- // jc will be patched with jz, jnc will become jnz.
- void EmitJump(Condition cc, Label* target, Label::Distance near_jump) {
- ASSERT(!patch_site_.is_bound() && !info_emitted_);
- ASSERT(cc == carry || cc == not_carry);
- __ bind(&patch_site_);
- __ j(cc, target, near_jump);
- }
-
- MacroAssembler* masm_;
- Label patch_site_;
-#ifdef DEBUG
- bool info_emitted_;
-#endif
-};
-
-
-// Generate code for a JS function. On entry to the function the receiver
-// and arguments have been pushed on the stack left to right, with the
-// return address on top of them. The actual argument count matches the
-// formal parameter count expected by the function.
-//
-// The live registers are:
-// o rdi: the JS function object being called (i.e. ourselves)
-// o rsi: our context
-// o rbp: our caller's frame pointer
-// o rsp: stack pointer (pointing to return address)
-//
-// The function builds a JS frame. Please see JavaScriptFrameConstants in
-// frames-x64.h for its layout.
-void FullCodeGenerator::Generate() {
- CompilationInfo* info = info_;
- handler_table_ =
- isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED);
- profiling_counter_ = isolate()->factory()->NewJSGlobalPropertyCell(
- Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate()));
- SetFunctionPosition(function());
- Comment cmnt(masm_, "[ function compiled by full code generator");
-
- ProfileEntryHookStub::MaybeCallEntryHook(masm_);
-
-#ifdef DEBUG
- if (strlen(FLAG_stop_at) > 0 &&
- info->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
- __ int3();
- }
-#endif
-
- // Strict mode functions and builtins need to replace the receiver
- // with undefined when called as functions (without an explicit
- // receiver object). rcx is zero for method calls and non-zero for
- // function calls.
- if (!info->is_classic_mode() || info->is_native()) {
- Label ok;
- __ testq(rcx, rcx);
- __ j(zero, &ok, Label::kNear);
- // +1 for return address.
- int receiver_offset = (info->scope()->num_parameters() + 1) * kPointerSize;
- __ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
- __ movq(Operand(rsp, receiver_offset), kScratchRegister);
- __ bind(&ok);
- }
-
- // Open a frame scope to indicate that there is a frame on the stack. The
- // MANUAL indicates that the scope shouldn't actually generate code to set up
- // the frame (that is done below).
- FrameScope frame_scope(masm_, StackFrame::MANUAL);
-
- info->set_prologue_offset(masm_->pc_offset());
- __ push(rbp); // Caller's frame pointer.
- __ movq(rbp, rsp);
- __ push(rsi); // Callee's context.
- __ push(rdi); // Callee's JS Function.
-
- { Comment cmnt(masm_, "[ Allocate locals");
- int locals_count = info->scope()->num_stack_slots();
- if (locals_count == 1) {
- __ PushRoot(Heap::kUndefinedValueRootIndex);
- } else if (locals_count > 1) {
- __ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
- for (int i = 0; i < locals_count; i++) {
- __ push(rdx);
- }
- }
- }
-
- bool function_in_register = true;
-
- // Possibly allocate a local context.
- int heap_slots = info->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
- if (heap_slots > 0 ||
- (scope()->is_qml_mode() && scope()->is_global_scope())) {
- Comment cmnt(masm_, "[ Allocate context");
- // Argument to NewContext is the function, which is still in rdi.
- __ push(rdi);
- if (FLAG_harmony_scoping && info->scope()->is_global_scope()) {
- __ Push(info->scope()->GetScopeInfo());
- __ CallRuntime(Runtime::kNewGlobalContext, 2);
- } else if (heap_slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub((heap_slots < 0) ? 0 : heap_slots);
- __ CallStub(&stub);
- } else {
- __ CallRuntime(Runtime::kNewFunctionContext, 1);
- }
- function_in_register = false;
- // Context is returned in both rax and rsi. It replaces the context
- // passed to us. It's saved in the stack and kept live in rsi.
- __ movq(Operand(rbp, StandardFrameConstants::kContextOffset), rsi);
-
- // Copy any necessary parameters into the context.
- int num_parameters = info->scope()->num_parameters();
- for (int i = 0; i < num_parameters; i++) {
- Variable* var = scope()->parameter(i);
- if (var->IsContextSlot()) {
- int parameter_offset = StandardFrameConstants::kCallerSPOffset +
- (num_parameters - 1 - i) * kPointerSize;
- // Load parameter from stack.
- __ movq(rax, Operand(rbp, parameter_offset));
- // Store it in the context.
- int context_offset = Context::SlotOffset(var->index());
- __ movq(Operand(rsi, context_offset), rax);
- // Update the write barrier. This clobbers rax and rbx.
- __ RecordWriteContextSlot(
- rsi, context_offset, rax, rbx, kDontSaveFPRegs);
- }
- }
- }
-
- // Possibly allocate an arguments object.
- Variable* arguments = scope()->arguments();
- if (arguments != NULL) {
- // Arguments object must be allocated after the context object, in
- // case the "arguments" or ".arguments" variables are in the context.
- Comment cmnt(masm_, "[ Allocate arguments object");
- if (function_in_register) {
- __ push(rdi);
- } else {
- __ push(Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- }
- // The receiver is just before the parameters on the caller's stack.
- int num_parameters = info->scope()->num_parameters();
- int offset = num_parameters * kPointerSize;
- __ lea(rdx,
- Operand(rbp, StandardFrameConstants::kCallerSPOffset + offset));
- __ push(rdx);
- __ Push(Smi::FromInt(num_parameters));
- // Arguments to ArgumentsAccessStub:
- // function, receiver address, parameter count.
- // The stub will rewrite receiver and parameter count if the previous
- // stack frame was an arguments adapter frame.
- ArgumentsAccessStub::Type type;
- if (!is_classic_mode()) {
- type = ArgumentsAccessStub::NEW_STRICT;
- } else if (function()->has_duplicate_parameters()) {
- type = ArgumentsAccessStub::NEW_NON_STRICT_SLOW;
- } else {
- type = ArgumentsAccessStub::NEW_NON_STRICT_FAST;
- }
- ArgumentsAccessStub stub(type);
- __ CallStub(&stub);
-
- SetVar(arguments, rax, rbx, rdx);
- }
-
- if (FLAG_trace) {
- __ CallRuntime(Runtime::kTraceEnter, 0);
- }
-
- // Visit the declarations and body unless there is an illegal
- // redeclaration.
- if (scope()->HasIllegalRedeclaration()) {
- Comment cmnt(masm_, "[ Declarations");
- scope()->VisitIllegalRedeclaration(this);
-
- } else {
- PrepareForBailoutForId(BailoutId::FunctionEntry(), NO_REGISTERS);
- { Comment cmnt(masm_, "[ Declarations");
- // For named function expressions, declare the function name as a
- // constant.
- if (scope()->is_function_scope() && scope()->function() != NULL) {
- VariableDeclaration* function = scope()->function();
- ASSERT(function->proxy()->var()->mode() == CONST ||
- function->proxy()->var()->mode() == CONST_HARMONY);
- ASSERT(function->proxy()->var()->location() != Variable::UNALLOCATED);
- VisitVariableDeclaration(function);
- }
- VisitDeclarations(scope()->declarations());
- }
-
- { Comment cmnt(masm_, "[ Stack check");
- PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS);
- Label ok;
- __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
- __ j(above_equal, &ok, Label::kNear);
- StackCheckStub stub;
- __ CallStub(&stub);
- __ bind(&ok);
- }
-
- { Comment cmnt(masm_, "[ Body");
- ASSERT(loop_depth() == 0);
- VisitStatements(function()->body());
- ASSERT(loop_depth() == 0);
- }
- }
-
- // Always emit a 'return undefined' in case control fell off the end of
- // the body.
- { Comment cmnt(masm_, "[ return <undefined>;");
- __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
- EmitReturnSequence();
- }
-}
-
-
-void FullCodeGenerator::ClearAccumulator() {
- __ Set(rax, 0);
-}
-
-
-void FullCodeGenerator::EmitProfilingCounterDecrement(int delta) {
- __ movq(rbx, profiling_counter_, RelocInfo::EMBEDDED_OBJECT);
- __ SmiAddConstant(FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset),
- Smi::FromInt(-delta));
-}
-
-
-void FullCodeGenerator::EmitProfilingCounterReset() {
- int reset_value = FLAG_interrupt_budget;
- if (info_->ShouldSelfOptimize() && !FLAG_retry_self_opt) {
- // Self-optimization is a one-off thing; if it fails, don't try again.
- reset_value = Smi::kMaxValue;
- }
- __ movq(rbx, profiling_counter_, RelocInfo::EMBEDDED_OBJECT);
- __ movq(kScratchRegister,
- reinterpret_cast<uint64_t>(Smi::FromInt(reset_value)),
- RelocInfo::NONE64);
- __ movq(FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset),
- kScratchRegister);
-}
-
-
-void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt,
- Label* back_edge_target) {
- Comment cmnt(masm_, "[ Back edge bookkeeping");
- Label ok;
-
- int weight = 1;
- if (FLAG_weighted_back_edges) {
- ASSERT(back_edge_target->is_bound());
- int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
- weight = Min(kMaxBackEdgeWeight,
- Max(1, distance / kBackEdgeDistanceUnit));
- }
- EmitProfilingCounterDecrement(weight);
- __ j(positive, &ok, Label::kNear);
- InterruptStub stub;
- __ CallStub(&stub);
-
- // Record a mapping of this PC offset to the OSR id. This is used to find
- // the AST id from the unoptimized code in order to use it as a key into
- // the deoptimization input data found in the optimized code.
- RecordBackEdge(stmt->OsrEntryId());
-
- // Loop stack checks can be patched to perform on-stack replacement. In
- // order to decide whether or not to perform OSR we embed the loop depth
- // in a test instruction after the call so we can extract it from the OSR
- // builtin.
- ASSERT(loop_depth() > 0);
- __ testl(rax, Immediate(Min(loop_depth(), Code::kMaxLoopNestingMarker)));
-
- EmitProfilingCounterReset();
-
- __ bind(&ok);
- PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
- // Record a mapping of the OSR id to this PC. This is used if the OSR
- // entry becomes the target of a bailout. We don't expect it to be, but
- // we want it to work if it is.
- PrepareForBailoutForId(stmt->OsrEntryId(), NO_REGISTERS);
-}
-
-
-void FullCodeGenerator::EmitReturnSequence() {
- Comment cmnt(masm_, "[ Return sequence");
- if (return_label_.is_bound()) {
- __ jmp(&return_label_);
- } else {
- __ bind(&return_label_);
- if (FLAG_trace) {
- __ push(rax);
- __ CallRuntime(Runtime::kTraceExit, 1);
- }
- if (FLAG_interrupt_at_exit || FLAG_self_optimization) {
- // Pretend that the exit is a backwards jump to the entry.
- int weight = 1;
- if (info_->ShouldSelfOptimize()) {
- weight = FLAG_interrupt_budget / FLAG_self_opt_count;
- } else if (FLAG_weighted_back_edges) {
- int distance = masm_->pc_offset();
- weight = Min(kMaxBackEdgeWeight,
- Max(1, distance / kBackEdgeDistanceUnit));
- }
- EmitProfilingCounterDecrement(weight);
- Label ok;
- __ j(positive, &ok, Label::kNear);
- __ push(rax);
- if (info_->ShouldSelfOptimize() && FLAG_direct_self_opt) {
- __ push(Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- __ CallRuntime(Runtime::kOptimizeFunctionOnNextCall, 1);
- } else {
- InterruptStub stub;
- __ CallStub(&stub);
- }
- __ pop(rax);
- EmitProfilingCounterReset();
- __ bind(&ok);
- }
-#ifdef DEBUG
- // Add a label for checking the size of the code used for returning.
- Label check_exit_codesize;
- masm_->bind(&check_exit_codesize);
-#endif
- CodeGenerator::RecordPositions(masm_, function()->end_position() - 1);
- __ RecordJSReturn();
- // Do not use the leave instruction here because it is too short to
- // patch with the code required by the debugger.
- __ movq(rsp, rbp);
- __ pop(rbp);
-
- int arguments_bytes = (info_->scope()->num_parameters() + 1) * kPointerSize;
- __ Ret(arguments_bytes, rcx);
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- // Add padding that will be overwritten by a debugger breakpoint. We
- // have just generated at least 7 bytes: "movq rsp, rbp; pop rbp; ret k"
- // (3 + 1 + 3).
- const int kPadding = Assembler::kJSReturnSequenceLength - 7;
- for (int i = 0; i < kPadding; ++i) {
- masm_->int3();
- }
- // Check that the size of the code used for returning is large enough
- // for the debugger's requirements.
- ASSERT(Assembler::kJSReturnSequenceLength <=
- masm_->SizeOfCodeGeneratedSince(&check_exit_codesize));
-#endif
- }
-}
-
-
-void FullCodeGenerator::EffectContext::Plug(Variable* var) const {
- ASSERT(var->IsStackAllocated() || var->IsContextSlot());
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::Plug(Variable* var) const {
- ASSERT(var->IsStackAllocated() || var->IsContextSlot());
- codegen()->GetVar(result_register(), var);
-}
-
-
-void FullCodeGenerator::StackValueContext::Plug(Variable* var) const {
- ASSERT(var->IsStackAllocated() || var->IsContextSlot());
- MemOperand operand = codegen()->VarOperand(var, result_register());
- __ push(operand);
-}
-
-
-void FullCodeGenerator::TestContext::Plug(Variable* var) const {
- codegen()->GetVar(result_register(), var);
- codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
- codegen()->DoTest(this);
-}
-
-
-void FullCodeGenerator::EffectContext::Plug(Heap::RootListIndex index) const {
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::Plug(
- Heap::RootListIndex index) const {
- __ LoadRoot(result_register(), index);
-}
-
-
-void FullCodeGenerator::StackValueContext::Plug(
- Heap::RootListIndex index) const {
- __ PushRoot(index);
-}
-
-
-void FullCodeGenerator::TestContext::Plug(Heap::RootListIndex index) const {
- codegen()->PrepareForBailoutBeforeSplit(condition(),
- true,
- true_label_,
- false_label_);
- if (index == Heap::kUndefinedValueRootIndex ||
- index == Heap::kNullValueRootIndex ||
- index == Heap::kFalseValueRootIndex) {
- if (false_label_ != fall_through_) __ jmp(false_label_);
- } else if (index == Heap::kTrueValueRootIndex) {
- if (true_label_ != fall_through_) __ jmp(true_label_);
- } else {
- __ LoadRoot(result_register(), index);
- codegen()->DoTest(this);
- }
-}
-
-
-void FullCodeGenerator::EffectContext::Plug(Handle<Object> lit) const {
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::Plug(
- Handle<Object> lit) const {
- if (lit->IsSmi()) {
- __ SafeMove(result_register(), Smi::cast(*lit));
- } else {
- __ Move(result_register(), lit);
- }
-}
-
-
-void FullCodeGenerator::StackValueContext::Plug(Handle<Object> lit) const {
- if (lit->IsSmi()) {
- __ SafePush(Smi::cast(*lit));
- } else {
- __ Push(lit);
- }
-}
-
-
-void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
- codegen()->PrepareForBailoutBeforeSplit(condition(),
- true,
- true_label_,
- false_label_);
- ASSERT(!lit->IsUndetectableObject()); // There are no undetectable literals.
- if (lit->IsUndefined() || lit->IsNull() || lit->IsFalse()) {
- if (false_label_ != fall_through_) __ jmp(false_label_);
- } else if (lit->IsTrue() || lit->IsJSObject()) {
- if (true_label_ != fall_through_) __ jmp(true_label_);
- } else if (lit->IsString()) {
- if (String::cast(*lit)->length() == 0) {
- if (false_label_ != fall_through_) __ jmp(false_label_);
- } else {
- if (true_label_ != fall_through_) __ jmp(true_label_);
- }
- } else if (lit->IsSmi()) {
- if (Smi::cast(*lit)->value() == 0) {
- if (false_label_ != fall_through_) __ jmp(false_label_);
- } else {
- if (true_label_ != fall_through_) __ jmp(true_label_);
- }
- } else {
- // For simplicity we always test the accumulator register.
- __ Move(result_register(), lit);
- codegen()->DoTest(this);
- }
-}
-
-
-void FullCodeGenerator::EffectContext::DropAndPlug(int count,
- Register reg) const {
- ASSERT(count > 0);
- __ Drop(count);
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::DropAndPlug(
- int count,
- Register reg) const {
- ASSERT(count > 0);
- __ Drop(count);
- __ Move(result_register(), reg);
-}
-
-
-void FullCodeGenerator::StackValueContext::DropAndPlug(int count,
- Register reg) const {
- ASSERT(count > 0);
- if (count > 1) __ Drop(count - 1);
- __ movq(Operand(rsp, 0), reg);
-}
-
-
-void FullCodeGenerator::TestContext::DropAndPlug(int count,
- Register reg) const {
- ASSERT(count > 0);
- // For simplicity we always test the accumulator register.
- __ Drop(count);
- __ Move(result_register(), reg);
- codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
- codegen()->DoTest(this);
-}
-
-
-void FullCodeGenerator::EffectContext::Plug(Label* materialize_true,
- Label* materialize_false) const {
- ASSERT(materialize_true == materialize_false);
- __ bind(materialize_true);
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::Plug(
- Label* materialize_true,
- Label* materialize_false) const {
- Label done;
- __ bind(materialize_true);
- __ Move(result_register(), isolate()->factory()->true_value());
- __ jmp(&done, Label::kNear);
- __ bind(materialize_false);
- __ Move(result_register(), isolate()->factory()->false_value());
- __ bind(&done);
-}
-
-
-void FullCodeGenerator::StackValueContext::Plug(
- Label* materialize_true,
- Label* materialize_false) const {
- Label done;
- __ bind(materialize_true);
- __ Push(isolate()->factory()->true_value());
- __ jmp(&done, Label::kNear);
- __ bind(materialize_false);
- __ Push(isolate()->factory()->false_value());
- __ bind(&done);
-}
-
-
-void FullCodeGenerator::TestContext::Plug(Label* materialize_true,
- Label* materialize_false) const {
- ASSERT(materialize_true == true_label_);
- ASSERT(materialize_false == false_label_);
-}
-
-
-void FullCodeGenerator::EffectContext::Plug(bool flag) const {
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::Plug(bool flag) const {
- Heap::RootListIndex value_root_index =
- flag ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex;
- __ LoadRoot(result_register(), value_root_index);
-}
-
-
-void FullCodeGenerator::StackValueContext::Plug(bool flag) const {
- Heap::RootListIndex value_root_index =
- flag ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex;
- __ PushRoot(value_root_index);
-}
-
-
-void FullCodeGenerator::TestContext::Plug(bool flag) const {
- codegen()->PrepareForBailoutBeforeSplit(condition(),
- true,
- true_label_,
- false_label_);
- if (flag) {
- if (true_label_ != fall_through_) __ jmp(true_label_);
- } else {
- if (false_label_ != fall_through_) __ jmp(false_label_);
- }
-}
-
-
-void FullCodeGenerator::DoTest(Expression* condition,
- Label* if_true,
- Label* if_false,
- Label* fall_through) {
- ToBooleanStub stub(result_register());
- __ push(result_register());
- __ CallStub(&stub, condition->test_id());
- __ testq(result_register(), result_register());
- // The stub returns nonzero for true.
- Split(not_zero, if_true, if_false, fall_through);
-}
-
-
-void FullCodeGenerator::Split(Condition cc,
- Label* if_true,
- Label* if_false,
- Label* fall_through) {
- if (if_false == fall_through) {
- __ j(cc, if_true);
- } else if (if_true == fall_through) {
- __ j(NegateCondition(cc), if_false);
- } else {
- __ j(cc, if_true);
- __ jmp(if_false);
- }
-}
-
-
-MemOperand FullCodeGenerator::StackOperand(Variable* var) {
- ASSERT(var->IsStackAllocated());
- // Offset is negative because higher indexes are at lower addresses.
- int offset = -var->index() * kPointerSize;
- // Adjust by a (parameter or local) base offset.
- if (var->IsParameter()) {
- offset += (info_->scope()->num_parameters() + 1) * kPointerSize;
- } else {
- offset += JavaScriptFrameConstants::kLocal0Offset;
- }
- return Operand(rbp, offset);
-}
-
-
-MemOperand FullCodeGenerator::VarOperand(Variable* var, Register scratch) {
- ASSERT(var->IsContextSlot() || var->IsStackAllocated());
- if (var->IsContextSlot()) {
- int context_chain_length = scope()->ContextChainLength(var->scope());
- __ LoadContext(scratch, context_chain_length);
- return ContextOperand(scratch, var->index());
- } else {
- return StackOperand(var);
- }
-}
-
-
-void FullCodeGenerator::GetVar(Register dest, Variable* var) {
- ASSERT(var->IsContextSlot() || var->IsStackAllocated());
- MemOperand location = VarOperand(var, dest);
- __ movq(dest, location);
-}
-
-
-void FullCodeGenerator::SetVar(Variable* var,
- Register src,
- Register scratch0,
- Register scratch1) {
- ASSERT(var->IsContextSlot() || var->IsStackAllocated());
- ASSERT(!scratch0.is(src));
- ASSERT(!scratch0.is(scratch1));
- ASSERT(!scratch1.is(src));
- MemOperand location = VarOperand(var, scratch0);
- __ movq(location, src);
-
- // Emit the write barrier code if the location is in the heap.
- if (var->IsContextSlot()) {
- int offset = Context::SlotOffset(var->index());
- __ RecordWriteContextSlot(scratch0, offset, src, scratch1, kDontSaveFPRegs);
- }
-}
-
-
-void FullCodeGenerator::PrepareForBailoutBeforeSplit(Expression* expr,
- bool should_normalize,
- Label* if_true,
- Label* if_false) {
- // Only prepare for bailouts before splits if we're in a test
- // context. Otherwise, we let the Visit function deal with the
- // preparation to avoid preparing with the same AST id twice.
- if (!context()->IsTest() || !info_->IsOptimizable()) return;
-
- Label skip;
- if (should_normalize) __ jmp(&skip, Label::kNear);
- PrepareForBailout(expr, TOS_REG);
- if (should_normalize) {
- __ CompareRoot(rax, Heap::kTrueValueRootIndex);
- Split(equal, if_true, if_false, NULL);
- __ bind(&skip);
- }
-}
-
-
-void FullCodeGenerator::EmitDebugCheckDeclarationContext(Variable* variable) {
- // The variable in the declaration always resides in the current context.
- ASSERT_EQ(0, scope()->ContextChainLength(variable->scope()));
- if (generate_debug_code_) {
- // Check that we're not inside a with or catch context.
- __ movq(rbx, FieldOperand(rsi, HeapObject::kMapOffset));
- __ CompareRoot(rbx, Heap::kWithContextMapRootIndex);
- __ Check(not_equal, "Declaration in with context.");
- __ CompareRoot(rbx, Heap::kCatchContextMapRootIndex);
- __ Check(not_equal, "Declaration in catch context.");
- }
-}
-
-
-void FullCodeGenerator::VisitVariableDeclaration(
- VariableDeclaration* declaration) {
- // If it was not possible to allocate the variable at compile time, we
- // need to "declare" it at runtime to make sure it actually exists in the
- // local context.
- VariableProxy* proxy = declaration->proxy();
- VariableMode mode = declaration->mode();
- Variable* variable = proxy->var();
- bool hole_init = mode == CONST || mode == CONST_HARMONY || mode == LET;
- switch (variable->location()) {
- case Variable::UNALLOCATED:
- globals_->Add(variable->name(), zone());
- globals_->Add(variable->binding_needs_init()
- ? isolate()->factory()->the_hole_value()
- : isolate()->factory()->undefined_value(),
- zone());
- globals_->Add(isolate()->factory()->ToBoolean(variable->is_qml_global()),
- zone());
- break;
-
- case Variable::PARAMETER:
- case Variable::LOCAL:
- if (hole_init) {
- Comment cmnt(masm_, "[ VariableDeclaration");
- __ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
- __ movq(StackOperand(variable), kScratchRegister);
- }
- break;
-
- case Variable::CONTEXT:
- if (hole_init) {
- Comment cmnt(masm_, "[ VariableDeclaration");
- EmitDebugCheckDeclarationContext(variable);
- __ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
- __ movq(ContextOperand(rsi, variable->index()), kScratchRegister);
- // No write barrier since the hole value is in old space.
- PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
- }
- break;
-
- case Variable::LOOKUP: {
- Comment cmnt(masm_, "[ VariableDeclaration");
- __ push(rsi);
- __ Push(variable->name());
- // Declaration nodes are always introduced in one of four modes.
- ASSERT(IsDeclaredVariableMode(mode));
- PropertyAttributes attr =
- IsImmutableVariableMode(mode) ? READ_ONLY : NONE;
- __ Push(Smi::FromInt(attr));
- // Push initial value, if any.
- // Note: For variables we must not push an initial value (such as
- // 'undefined') because we may have a (legal) redeclaration and we
- // must not destroy the current value.
- if (hole_init) {
- __ PushRoot(Heap::kTheHoleValueRootIndex);
- } else {
- __ Push(Smi::FromInt(0)); // Indicates no initial value.
- }
- __ CallRuntime(Runtime::kDeclareContextSlot, 4);
- break;
- }
- }
-}
-
-
-void FullCodeGenerator::VisitFunctionDeclaration(
- FunctionDeclaration* declaration) {
- VariableProxy* proxy = declaration->proxy();
- Variable* variable = proxy->var();
- switch (variable->location()) {
- case Variable::UNALLOCATED: {
- globals_->Add(variable->name(), zone());
- Handle<SharedFunctionInfo> function =
- Compiler::BuildFunctionInfo(declaration->fun(), script());
- // Check for stack-overflow exception.
- if (function.is_null()) return SetStackOverflow();
- globals_->Add(function, zone());
- globals_->Add(isolate()->factory()->ToBoolean(variable->is_qml_global()),
- zone());
- break;
- }
-
- case Variable::PARAMETER:
- case Variable::LOCAL: {
- Comment cmnt(masm_, "[ FunctionDeclaration");
- VisitForAccumulatorValue(declaration->fun());
- __ movq(StackOperand(variable), result_register());
- break;
- }
-
- case Variable::CONTEXT: {
- Comment cmnt(masm_, "[ FunctionDeclaration");
- EmitDebugCheckDeclarationContext(variable);
- VisitForAccumulatorValue(declaration->fun());
- __ movq(ContextOperand(rsi, variable->index()), result_register());
- int offset = Context::SlotOffset(variable->index());
- // We know that we have written a function, which is not a smi.
- __ RecordWriteContextSlot(rsi,
- offset,
- result_register(),
- rcx,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
- break;
- }
-
- case Variable::LOOKUP: {
- Comment cmnt(masm_, "[ FunctionDeclaration");
- __ push(rsi);
- __ Push(variable->name());
- __ Push(Smi::FromInt(NONE));
- VisitForStackValue(declaration->fun());
- __ CallRuntime(Runtime::kDeclareContextSlot, 4);
- break;
- }
- }
-}
-
-
-void FullCodeGenerator::VisitModuleDeclaration(ModuleDeclaration* declaration) {
- Variable* variable = declaration->proxy()->var();
- ASSERT(variable->location() == Variable::CONTEXT);
- ASSERT(variable->interface()->IsFrozen());
-
- Comment cmnt(masm_, "[ ModuleDeclaration");
- EmitDebugCheckDeclarationContext(variable);
-
- // Load instance object.
- __ LoadContext(rax, scope_->ContextChainLength(scope_->GlobalScope()));
- __ movq(rax, ContextOperand(rax, variable->interface()->Index()));
- __ movq(rax, ContextOperand(rax, Context::EXTENSION_INDEX));
-
- // Assign it.
- __ movq(ContextOperand(rsi, variable->index()), rax);
- // We know that we have written a module, which is not a smi.
- __ RecordWriteContextSlot(rsi,
- Context::SlotOffset(variable->index()),
- rax,
- rcx,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- PrepareForBailoutForId(declaration->proxy()->id(), NO_REGISTERS);
-
- // Traverse into body.
- Visit(declaration->module());
-}
-
-
-void FullCodeGenerator::VisitImportDeclaration(ImportDeclaration* declaration) {
- VariableProxy* proxy = declaration->proxy();
- Variable* variable = proxy->var();
- switch (variable->location()) {
- case Variable::UNALLOCATED:
- // TODO(rossberg)
- break;
-
- case Variable::CONTEXT: {
- Comment cmnt(masm_, "[ ImportDeclaration");
- EmitDebugCheckDeclarationContext(variable);
- // TODO(rossberg)
- break;
- }
-
- case Variable::PARAMETER:
- case Variable::LOCAL:
- case Variable::LOOKUP:
- UNREACHABLE();
- }
-}
-
-
-void FullCodeGenerator::VisitExportDeclaration(ExportDeclaration* declaration) {
- // TODO(rossberg)
-}
-
-
-void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
- // Call the runtime to declare the globals.
- __ push(rsi); // The context is the first argument.
- __ Push(pairs);
- __ Push(Smi::FromInt(DeclareGlobalsFlags()));
- __ CallRuntime(Runtime::kDeclareGlobals, 3);
- // Return value is ignored.
-}
-
-
-void FullCodeGenerator::DeclareModules(Handle<FixedArray> descriptions) {
- // Call the runtime to declare the modules.
- __ Push(descriptions);
- __ CallRuntime(Runtime::kDeclareModules, 1);
- // Return value is ignored.
-}
-
-
-void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
- Comment cmnt(masm_, "[ SwitchStatement");
- Breakable nested_statement(this, stmt);
- SetStatementPosition(stmt);
-
- // Keep the switch value on the stack until a case matches.
- VisitForStackValue(stmt->tag());
- PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
-
- ZoneList<CaseClause*>* clauses = stmt->cases();
- CaseClause* default_clause = NULL; // Can occur anywhere in the list.
-
- Label next_test; // Recycled for each test.
- // Compile all the tests with branches to their bodies.
- for (int i = 0; i < clauses->length(); i++) {
- CaseClause* clause = clauses->at(i);
- clause->body_target()->Unuse();
-
- // The default is not a test, but remember it as final fall through.
- if (clause->is_default()) {
- default_clause = clause;
- continue;
- }
-
- Comment cmnt(masm_, "[ Case comparison");
- __ bind(&next_test);
- next_test.Unuse();
-
- // Compile the label expression.
- VisitForAccumulatorValue(clause->label());
-
- // Perform the comparison as if via '==='.
- __ movq(rdx, Operand(rsp, 0)); // Switch value.
- bool inline_smi_code = ShouldInlineSmiCase(Token::EQ_STRICT);
- JumpPatchSite patch_site(masm_);
- if (inline_smi_code) {
- Label slow_case;
- __ movq(rcx, rdx);
- __ or_(rcx, rax);
- patch_site.EmitJumpIfNotSmi(rcx, &slow_case, Label::kNear);
-
- __ cmpq(rdx, rax);
- __ j(not_equal, &next_test);
- __ Drop(1); // Switch value is no longer needed.
- __ jmp(clause->body_target());
- __ bind(&slow_case);
- }
-
- // Record position before stub call for type feedback.
- SetSourcePosition(clause->position());
- Handle<Code> ic = CompareIC::GetUninitialized(isolate(), Token::EQ_STRICT);
- CallIC(ic, RelocInfo::CODE_TARGET, clause->CompareId());
- patch_site.EmitPatchInfo();
-
- __ testq(rax, rax);
- __ j(not_equal, &next_test);
- __ Drop(1); // Switch value is no longer needed.
- __ jmp(clause->body_target());
- }
-
- // Discard the test value and jump to the default if present, otherwise to
- // the end of the statement.
- __ bind(&next_test);
- __ Drop(1); // Switch value is no longer needed.
- if (default_clause == NULL) {
- __ jmp(nested_statement.break_label());
- } else {
- __ jmp(default_clause->body_target());
- }
-
- // Compile all the case bodies.
- for (int i = 0; i < clauses->length(); i++) {
- Comment cmnt(masm_, "[ Case body");
- CaseClause* clause = clauses->at(i);
- __ bind(clause->body_target());
- PrepareForBailoutForId(clause->EntryId(), NO_REGISTERS);
- VisitStatements(clause->statements());
- }
-
- __ bind(nested_statement.break_label());
- PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
-}
-
-
-void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
- Comment cmnt(masm_, "[ ForInStatement");
- SetStatementPosition(stmt);
-
- Label loop, exit;
- ForIn loop_statement(this, stmt);
- increment_loop_depth();
-
- // Get the object to enumerate over. Both SpiderMonkey and JSC
- // ignore null and undefined in contrast to the specification; see
- // ECMA-262 section 12.6.4.
- VisitForAccumulatorValue(stmt->enumerable());
- __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
- __ j(equal, &exit);
- Register null_value = rdi;
- __ LoadRoot(null_value, Heap::kNullValueRootIndex);
- __ cmpq(rax, null_value);
- __ j(equal, &exit);
-
- PrepareForBailoutForId(stmt->PrepareId(), TOS_REG);
-
- // Convert the object to a JS object.
- Label convert, done_convert;
- __ JumpIfSmi(rax, &convert);
- __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rcx);
- __ j(above_equal, &done_convert);
- __ bind(&convert);
- __ push(rax);
- __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
- __ bind(&done_convert);
- __ push(rax);
-
- // Check for proxies.
- Label call_runtime;
- STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
- __ CmpObjectType(rax, LAST_JS_PROXY_TYPE, rcx);
- __ j(below_equal, &call_runtime);
-
- // Check cache validity in generated code. This is a fast case for
- // the JSObject::IsSimpleEnum cache validity checks. If we cannot
- // guarantee cache validity, call the runtime system to check cache
- // validity or get the property names in a fixed array.
- __ CheckEnumCache(null_value, &call_runtime);
-
- // The enum cache is valid. Load the map of the object being
- // iterated over and use the cache for the iteration.
- Label use_cache;
- __ movq(rax, FieldOperand(rax, HeapObject::kMapOffset));
- __ jmp(&use_cache, Label::kNear);
-
- // Get the set of properties to enumerate.
- __ bind(&call_runtime);
- __ push(rax); // Duplicate the enumerable object on the stack.
- __ CallRuntime(Runtime::kGetPropertyNamesFast, 1);
-
- // If we got a map from the runtime call, we can do a fast
- // modification check. Otherwise, we got a fixed array, and we have
- // to do a slow check.
- Label fixed_array;
- __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
- Heap::kMetaMapRootIndex);
- __ j(not_equal, &fixed_array);
-
- // We got a map in register rax. Get the enumeration cache from it.
- __ bind(&use_cache);
-
- Label no_descriptors;
-
- __ EnumLength(rdx, rax);
- __ Cmp(rdx, Smi::FromInt(0));
- __ j(equal, &no_descriptors);
-
- __ LoadInstanceDescriptors(rax, rcx);
- __ movq(rcx, FieldOperand(rcx, DescriptorArray::kEnumCacheOffset));
- __ movq(rcx, FieldOperand(rcx, DescriptorArray::kEnumCacheBridgeCacheOffset));
-
- // Set up the four remaining stack slots.
- __ push(rax); // Map.
- __ push(rcx); // Enumeration cache.
- __ push(rdx); // Number of valid entries for the map in the enum cache.
- __ Push(Smi::FromInt(0)); // Initial index.
- __ jmp(&loop);
-
- __ bind(&no_descriptors);
- __ addq(rsp, Immediate(kPointerSize));
- __ jmp(&exit);
-
- // We got a fixed array in register rax. Iterate through that.
- Label non_proxy;
- __ bind(&fixed_array);
-
- Handle<JSGlobalPropertyCell> cell =
- isolate()->factory()->NewJSGlobalPropertyCell(
- Handle<Object>(
- Smi::FromInt(TypeFeedbackCells::kForInFastCaseMarker),
- isolate()));
- RecordTypeFeedbackCell(stmt->ForInFeedbackId(), cell);
- __ LoadHeapObject(rbx, cell);
- __ Move(FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset),
- Smi::FromInt(TypeFeedbackCells::kForInSlowCaseMarker));
-
- __ Move(rbx, Smi::FromInt(1)); // Smi indicates slow check
- __ movq(rcx, Operand(rsp, 0 * kPointerSize)); // Get enumerated object
- STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
- __ CmpObjectType(rcx, LAST_JS_PROXY_TYPE, rcx);
- __ j(above, &non_proxy);
- __ Move(rbx, Smi::FromInt(0)); // Zero indicates proxy
- __ bind(&non_proxy);
- __ push(rbx); // Smi
- __ push(rax); // Array
- __ movq(rax, FieldOperand(rax, FixedArray::kLengthOffset));
- __ push(rax); // Fixed array length (as smi).
- __ Push(Smi::FromInt(0)); // Initial index.
-
- // Generate code for doing the condition check.
- PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
- __ bind(&loop);
- __ movq(rax, Operand(rsp, 0 * kPointerSize)); // Get the current index.
- __ cmpq(rax, Operand(rsp, 1 * kPointerSize)); // Compare to the array length.
- __ j(above_equal, loop_statement.break_label());
-
- // Get the current entry of the array into register rbx.
- __ movq(rbx, Operand(rsp, 2 * kPointerSize));
- SmiIndex index = masm()->SmiToIndex(rax, rax, kPointerSizeLog2);
- __ movq(rbx, FieldOperand(rbx,
- index.reg,
- index.scale,
- FixedArray::kHeaderSize));
-
- // Get the expected map from the stack or a smi in the
- // permanent slow case into register rdx.
- __ movq(rdx, Operand(rsp, 3 * kPointerSize));
-
- // Check if the expected map still matches that of the enumerable.
- // If not, we may have to filter the key.
- Label update_each;
- __ movq(rcx, Operand(rsp, 4 * kPointerSize));
- __ cmpq(rdx, FieldOperand(rcx, HeapObject::kMapOffset));
- __ j(equal, &update_each, Label::kNear);
-
- // For proxies, no filtering is done.
- // TODO(rossberg): What if only a prototype is a proxy? Not specified yet.
- __ Cmp(rdx, Smi::FromInt(0));
- __ j(equal, &update_each, Label::kNear);
-
- // Convert the entry to a string or null if it isn't a property
- // anymore. If the property has been removed while iterating, we
- // just skip it.
- __ push(rcx); // Enumerable.
- __ push(rbx); // Current entry.
- __ InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION);
- __ Cmp(rax, Smi::FromInt(0));
- __ j(equal, loop_statement.continue_label());
- __ movq(rbx, rax);
-
- // Update the 'each' property or variable from the possibly filtered
- // entry in register rbx.
- __ bind(&update_each);
- __ movq(result_register(), rbx);
- // Perform the assignment as if via '='.
- { EffectContext context(this);
- EmitAssignment(stmt->each());
- }
-
- // Generate code for the body of the loop.
- Visit(stmt->body());
-
- // Generate code for going to the next element by incrementing the
- // index (smi) stored on top of the stack.
- __ bind(loop_statement.continue_label());
- __ SmiAddConstant(Operand(rsp, 0 * kPointerSize), Smi::FromInt(1));
-
- EmitBackEdgeBookkeeping(stmt, &loop);
- __ jmp(&loop);
-
- // Remove the pointers stored on the stack.
- __ bind(loop_statement.break_label());
- __ addq(rsp, Immediate(5 * kPointerSize));
-
- // Exit and decrement the loop depth.
- PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
- __ bind(&exit);
- decrement_loop_depth();
-}
-
-
-void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
- bool pretenure) {
- // Use the fast case closure allocation code that allocates in new
- // space for nested functions that don't need literals cloning. If
- // we're running with the --always-opt or the --prepare-always-opt
- // flag, we need to use the runtime function so that the new function
- // we are creating here gets a chance to have its code optimized and
- // doesn't just get a copy of the existing unoptimized code.
- if (!FLAG_always_opt &&
- !FLAG_prepare_always_opt &&
- !pretenure &&
- scope()->is_function_scope() &&
- info->num_literals() == 0) {
- FastNewClosureStub stub(info->language_mode());
- __ Push(info);
- __ CallStub(&stub);
- } else {
- __ push(rsi);
- __ Push(info);
- __ Push(pretenure
- ? isolate()->factory()->true_value()
- : isolate()->factory()->false_value());
- __ CallRuntime(Runtime::kNewClosure, 3);
- }
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
- Comment cmnt(masm_, "[ VariableProxy");
- EmitVariableLoad(expr);
-}
-
-
-void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
- TypeofState typeof_state,
- Label* slow) {
- Register context = rsi;
- Register temp = rdx;
-
- Scope* s = scope();
- while (s != NULL) {
- if (s->num_heap_slots() > 0) {
- if (s->calls_non_strict_eval()) {
- // Check that extension is NULL.
- __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX),
- Immediate(0));
- __ j(not_equal, slow);
- }
- // Load next context in chain.
- __ movq(temp, ContextOperand(context, Context::PREVIOUS_INDEX));
- // Walk the rest of the chain without clobbering rsi.
- context = temp;
- }
- // If no outer scope calls eval, we do not need to check more
- // context extensions. If we have reached an eval scope, we check
- // all extensions from this point.
- if (!s->outer_scope_calls_non_strict_eval() || s->is_eval_scope()) break;
- s = s->outer_scope();
- }
-
- if (s != NULL && s->is_eval_scope()) {
- // Loop up the context chain. There is no frame effect so it is
- // safe to use raw labels here.
- Label next, fast;
- if (!context.is(temp)) {
- __ movq(temp, context);
- }
- // Load map for comparison into register, outside loop.
- __ LoadRoot(kScratchRegister, Heap::kNativeContextMapRootIndex);
- __ bind(&next);
- // Terminate at native context.
- __ cmpq(kScratchRegister, FieldOperand(temp, HeapObject::kMapOffset));
- __ j(equal, &fast, Label::kNear);
- // Check that extension is NULL.
- __ cmpq(ContextOperand(temp, Context::EXTENSION_INDEX), Immediate(0));
- __ j(not_equal, slow);
- // Load next context in chain.
- __ movq(temp, ContextOperand(temp, Context::PREVIOUS_INDEX));
- __ jmp(&next);
- __ bind(&fast);
- }
-
- // All extension objects were empty and it is safe to use a global
- // load IC call.
- __ movq(rax, var->is_qml_global()
- ? QmlGlobalObjectOperand()
- : GlobalObjectOperand());
- __ Move(rcx, var->name());
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF)
- ? RelocInfo::CODE_TARGET
- : RelocInfo::CODE_TARGET_CONTEXT;
- CallIC(ic, mode);
-}
-
-
-MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
- Label* slow) {
- ASSERT(var->IsContextSlot());
- Register context = rsi;
- Register temp = rbx;
-
- for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) {
- if (s->num_heap_slots() > 0) {
- if (s->calls_non_strict_eval()) {
- // Check that extension is NULL.
- __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX),
- Immediate(0));
- __ j(not_equal, slow);
- }
- __ movq(temp, ContextOperand(context, Context::PREVIOUS_INDEX));
- // Walk the rest of the chain without clobbering rsi.
- context = temp;
- }
- }
- // Check that last extension is NULL.
- __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX), Immediate(0));
- __ j(not_equal, slow);
-
- // This function is used only for loads, not stores, so it's safe to
- // return an rsi-based operand (the write barrier cannot be allowed to
- // destroy the rsi register).
- return ContextOperand(context, var->index());
-}
-
-
-void FullCodeGenerator::EmitDynamicLookupFastCase(Variable* var,
- TypeofState typeof_state,
- Label* slow,
- Label* done) {
- // Generate fast-case code for variables that might be shadowed by
- // eval-introduced variables. Eval is used a lot without
- // introducing variables. In those cases, we do not want to
- // perform a runtime call for all variables in the scope
- // containing the eval.
- if (var->mode() == DYNAMIC_GLOBAL) {
- EmitLoadGlobalCheckExtensions(var, typeof_state, slow);
- __ jmp(done);
- } else if (var->mode() == DYNAMIC_LOCAL) {
- Variable* local = var->local_if_not_shadowed();
- __ movq(rax, ContextSlotOperandCheckExtensions(local, slow));
- if (local->mode() == LET ||
- local->mode() == CONST ||
- local->mode() == CONST_HARMONY) {
- __ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
- __ j(not_equal, done);
- if (local->mode() == CONST) {
- __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
- } else { // LET || CONST_HARMONY
- __ Push(var->name());
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
- }
- }
- __ jmp(done);
- }
-}
-
-
-void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
- // Record position before possible IC call.
- SetSourcePosition(proxy->position());
- Variable* var = proxy->var();
-
- // Three cases: global variables, lookup variables, and all other types of
- // variables.
- switch (var->location()) {
- case Variable::UNALLOCATED: {
- Comment cmnt(masm_, "Global variable");
- // Use inline caching. Variable name is passed in rcx and the global
- // object on the stack.
- __ Move(rcx, var->name());
- __ movq(rax, var->is_qml_global()
- ? QmlGlobalObjectOperand()
- : GlobalObjectOperand());
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
- context()->Plug(rax);
- break;
- }
-
- case Variable::PARAMETER:
- case Variable::LOCAL:
- case Variable::CONTEXT: {
- Comment cmnt(masm_, var->IsContextSlot() ? "Context slot" : "Stack slot");
- if (var->binding_needs_init()) {
- // var->scope() may be NULL when the proxy is located in eval code and
- // refers to a potential outside binding. Currently those bindings are
- // always looked up dynamically, i.e. in that case
- // var->location() == LOOKUP.
- // always holds.
- ASSERT(var->scope() != NULL);
-
- // Check if the binding really needs an initialization check. The check
- // can be skipped in the following situation: we have a LET or CONST
- // binding in harmony mode, both the Variable and the VariableProxy have
- // the same declaration scope (i.e. they are both in global code, in the
- // same function or in the same eval code) and the VariableProxy is in
- // the source physically located after the initializer of the variable.
- //
- // We cannot skip any initialization checks for CONST in non-harmony
- // mode because const variables may be declared but never initialized:
- // if (false) { const x; }; var y = x;
- //
- // The condition on the declaration scopes is a conservative check for
- // nested functions that access a binding and are called before the
- // binding is initialized:
- // function() { f(); let x = 1; function f() { x = 2; } }
- //
- bool skip_init_check;
- if (var->scope()->DeclarationScope() != scope()->DeclarationScope()) {
- skip_init_check = false;
- } else {
- // Check that we always have valid source position.
- ASSERT(var->initializer_position() != RelocInfo::kNoPosition);
- ASSERT(proxy->position() != RelocInfo::kNoPosition);
- skip_init_check = var->mode() != CONST &&
- var->initializer_position() < proxy->position();
- }
-
- if (!skip_init_check) {
- // Let and const need a read barrier.
- Label done;
- GetVar(rax, var);
- __ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
- __ j(not_equal, &done, Label::kNear);
- if (var->mode() == LET || var->mode() == CONST_HARMONY) {
- // Throw a reference error when using an uninitialized let/const
- // binding in harmony mode.
- __ Push(var->name());
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
- } else {
- // Uninitalized const bindings outside of harmony mode are unholed.
- ASSERT(var->mode() == CONST);
- __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
- }
- __ bind(&done);
- context()->Plug(rax);
- break;
- }
- }
- context()->Plug(var);
- break;
- }
-
- case Variable::LOOKUP: {
- Label done, slow;
- // Generate code for loading from variables potentially shadowed
- // by eval-introduced variables.
- EmitDynamicLookupFastCase(var, NOT_INSIDE_TYPEOF, &slow, &done);
- __ bind(&slow);
- Comment cmnt(masm_, "Lookup slot");
- __ push(rsi); // Context.
- __ Push(var->name());
- __ CallRuntime(Runtime::kLoadContextSlot, 2);
- __ bind(&done);
- context()->Plug(rax);
- break;
- }
- }
-}
-
-
-void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
- Comment cmnt(masm_, "[ RegExpLiteral");
- Label materialized;
- // Registers will be used as follows:
- // rdi = JS function.
- // rcx = literals array.
- // rbx = regexp literal.
- // rax = regexp literal clone.
- __ movq(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- __ movq(rcx, FieldOperand(rdi, JSFunction::kLiteralsOffset));
- int literal_offset =
- FixedArray::kHeaderSize + expr->literal_index() * kPointerSize;
- __ movq(rbx, FieldOperand(rcx, literal_offset));
- __ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
- __ j(not_equal, &materialized, Label::kNear);
-
- // Create regexp literal using runtime function
- // Result will be in rax.
- __ push(rcx);
- __ Push(Smi::FromInt(expr->literal_index()));
- __ Push(expr->pattern());
- __ Push(expr->flags());
- __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
- __ movq(rbx, rax);
-
- __ bind(&materialized);
- int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
- Label allocated, runtime_allocate;
- __ AllocateInNewSpace(size, rax, rcx, rdx, &runtime_allocate, TAG_OBJECT);
- __ jmp(&allocated);
-
- __ bind(&runtime_allocate);
- __ push(rbx);
- __ Push(Smi::FromInt(size));
- __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
- __ pop(rbx);
-
- __ bind(&allocated);
- // Copy the content into the newly allocated memory.
- // (Unroll copy loop once for better throughput).
- for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
- __ movq(rdx, FieldOperand(rbx, i));
- __ movq(rcx, FieldOperand(rbx, i + kPointerSize));
- __ movq(FieldOperand(rax, i), rdx);
- __ movq(FieldOperand(rax, i + kPointerSize), rcx);
- }
- if ((size % (2 * kPointerSize)) != 0) {
- __ movq(rdx, FieldOperand(rbx, size - kPointerSize));
- __ movq(FieldOperand(rax, size - kPointerSize), rdx);
- }
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::EmitAccessor(Expression* expression) {
- if (expression == NULL) {
- __ PushRoot(Heap::kNullValueRootIndex);
- } else {
- VisitForStackValue(expression);
- }
-}
-
-
-void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
- Comment cmnt(masm_, "[ ObjectLiteral");
- Handle<FixedArray> constant_properties = expr->constant_properties();
- int flags = expr->fast_elements()
- ? ObjectLiteral::kFastElements
- : ObjectLiteral::kNoFlags;
- flags |= expr->has_function()
- ? ObjectLiteral::kHasFunction
- : ObjectLiteral::kNoFlags;
- int properties_count = constant_properties->length() / 2;
- if (expr->depth() > 1) {
- __ movq(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- __ push(FieldOperand(rdi, JSFunction::kLiteralsOffset));
- __ Push(Smi::FromInt(expr->literal_index()));
- __ Push(constant_properties);
- __ Push(Smi::FromInt(flags));
- __ CallRuntime(Runtime::kCreateObjectLiteral, 4);
- } else if (Serializer::enabled() || flags != ObjectLiteral::kFastElements ||
- properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
- __ movq(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- __ push(FieldOperand(rdi, JSFunction::kLiteralsOffset));
- __ Push(Smi::FromInt(expr->literal_index()));
- __ Push(constant_properties);
- __ Push(Smi::FromInt(flags));
- __ CallRuntime(Runtime::kCreateObjectLiteralShallow, 4);
- } else {
- __ movq(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- __ movq(rax, FieldOperand(rdi, JSFunction::kLiteralsOffset));
- __ Move(rbx, Smi::FromInt(expr->literal_index()));
- __ Move(rcx, constant_properties);
- __ Move(rdx, Smi::FromInt(flags));
- FastCloneShallowObjectStub stub(properties_count);
- __ CallStub(&stub);
- }
-
- // If result_saved is true the result is on top of the stack. If
- // result_saved is false the result is in rax.
- bool result_saved = false;
-
- // Mark all computed expressions that are bound to a key that
- // is shadowed by a later occurrence of the same key. For the
- // marked expressions, no store code is emitted.
- expr->CalculateEmitStore(zone());
-
- AccessorTable accessor_table(zone());
- for (int i = 0; i < expr->properties()->length(); i++) {
- ObjectLiteral::Property* property = expr->properties()->at(i);
- if (property->IsCompileTimeValue()) continue;
-
- Literal* key = property->key();
- Expression* value = property->value();
- if (!result_saved) {
- __ push(rax); // Save result on the stack
- result_saved = true;
- }
- switch (property->kind()) {
- case ObjectLiteral::Property::CONSTANT:
- UNREACHABLE();
- case ObjectLiteral::Property::MATERIALIZED_LITERAL:
- ASSERT(!CompileTimeValue::IsCompileTimeValue(value));
- // Fall through.
- case ObjectLiteral::Property::COMPUTED:
- if (key->handle()->IsInternalizedString()) {
- if (property->emit_store()) {
- VisitForAccumulatorValue(value);
- __ Move(rcx, key->handle());
- __ movq(rdx, Operand(rsp, 0));
- Handle<Code> ic = is_classic_mode()
- ? isolate()->builtins()->StoreIC_Initialize()
- : isolate()->builtins()->StoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, key->LiteralFeedbackId());
- PrepareForBailoutForId(key->id(), NO_REGISTERS);
- } else {
- VisitForEffect(value);
- }
- break;
- }
- // Fall through.
- case ObjectLiteral::Property::PROTOTYPE:
- __ push(Operand(rsp, 0)); // Duplicate receiver.
- VisitForStackValue(key);
- VisitForStackValue(value);
- if (property->emit_store()) {
- __ Push(Smi::FromInt(NONE)); // PropertyAttributes
- __ CallRuntime(Runtime::kSetProperty, 4);
- } else {
- __ Drop(3);
- }
- break;
- case ObjectLiteral::Property::GETTER:
- accessor_table.lookup(key)->second->getter = value;
- break;
- case ObjectLiteral::Property::SETTER:
- accessor_table.lookup(key)->second->setter = value;
- break;
- }
- }
-
- // Emit code to define accessors, using only a single call to the runtime for
- // each pair of corresponding getters and setters.
- for (AccessorTable::Iterator it = accessor_table.begin();
- it != accessor_table.end();
- ++it) {
- __ push(Operand(rsp, 0)); // Duplicate receiver.
- VisitForStackValue(it->first);
- EmitAccessor(it->second->getter);
- EmitAccessor(it->second->setter);
- __ Push(Smi::FromInt(NONE));
- __ CallRuntime(Runtime::kDefineOrRedefineAccessorProperty, 5);
- }
-
- if (expr->has_function()) {
- ASSERT(result_saved);
- __ push(Operand(rsp, 0));
- __ CallRuntime(Runtime::kToFastProperties, 1);
- }
-
- if (result_saved) {
- context()->PlugTOS();
- } else {
- context()->Plug(rax);
- }
-}
-
-
-void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
- Comment cmnt(masm_, "[ ArrayLiteral");
-
- ZoneList<Expression*>* subexprs = expr->values();
- int length = subexprs->length();
- Handle<FixedArray> constant_elements = expr->constant_elements();
- ASSERT_EQ(2, constant_elements->length());
- ElementsKind constant_elements_kind =
- static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value());
- bool has_constant_fast_elements =
- IsFastObjectElementsKind(constant_elements_kind);
- Handle<FixedArrayBase> constant_elements_values(
- FixedArrayBase::cast(constant_elements->get(1)));
-
- __ movq(rbx, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- __ push(FieldOperand(rbx, JSFunction::kLiteralsOffset));
- __ Push(Smi::FromInt(expr->literal_index()));
- __ Push(constant_elements);
- Heap* heap = isolate()->heap();
- if (has_constant_fast_elements &&
- constant_elements_values->map() == heap->fixed_cow_array_map()) {
- // If the elements are already FAST_*_ELEMENTS, the boilerplate cannot
- // change, so it's possible to specialize the stub in advance.
- __ IncrementCounter(isolate()->counters()->cow_arrays_created_stub(), 1);
- FastCloneShallowArrayStub stub(
- FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS,
- DONT_TRACK_ALLOCATION_SITE,
- length);
- __ CallStub(&stub);
- } else if (expr->depth() > 1) {
- __ CallRuntime(Runtime::kCreateArrayLiteral, 3);
- } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
- __ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
- } else {
- ASSERT(IsFastSmiOrObjectElementsKind(constant_elements_kind) ||
- FLAG_smi_only_arrays);
- FastCloneShallowArrayStub::Mode mode =
- FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS;
- AllocationSiteMode allocation_site_mode = FLAG_track_allocation_sites
- ? TRACK_ALLOCATION_SITE : DONT_TRACK_ALLOCATION_SITE;
-
- // If the elements are already FAST_*_ELEMENTS, the boilerplate cannot
- // change, so it's possible to specialize the stub in advance.
- if (has_constant_fast_elements) {
- mode = FastCloneShallowArrayStub::CLONE_ELEMENTS;
- allocation_site_mode = DONT_TRACK_ALLOCATION_SITE;
- }
-
- FastCloneShallowArrayStub stub(mode, allocation_site_mode, length);
- __ CallStub(&stub);
- }
-
- bool result_saved = false; // Is the result saved to the stack?
-
- // Emit code to evaluate all the non-constant subexpressions and to store
- // them into the newly cloned array.
- for (int i = 0; i < length; i++) {
- Expression* subexpr = subexprs->at(i);
- // If the subexpression is a literal or a simple materialized literal it
- // is already set in the cloned array.
- if (subexpr->AsLiteral() != NULL ||
- CompileTimeValue::IsCompileTimeValue(subexpr)) {
- continue;
- }
-
- if (!result_saved) {
- __ push(rax);
- result_saved = true;
- }
- VisitForAccumulatorValue(subexpr);
-
- if (IsFastObjectElementsKind(constant_elements_kind)) {
- // Fast-case array literal with ElementsKind of FAST_*_ELEMENTS, they
- // cannot transition and don't need to call the runtime stub.
- int offset = FixedArray::kHeaderSize + (i * kPointerSize);
- __ movq(rbx, Operand(rsp, 0)); // Copy of array literal.
- __ movq(rbx, FieldOperand(rbx, JSObject::kElementsOffset));
- // Store the subexpression value in the array's elements.
- __ movq(FieldOperand(rbx, offset), result_register());
- // Update the write barrier for the array store.
- __ RecordWriteField(rbx, offset, result_register(), rcx,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- INLINE_SMI_CHECK);
- } else {
- // Store the subexpression value in the array's elements.
- __ movq(rbx, Operand(rsp, 0)); // Copy of array literal.
- __ movq(rdi, FieldOperand(rbx, JSObject::kMapOffset));
- __ Move(rcx, Smi::FromInt(i));
- __ Move(rdx, Smi::FromInt(expr->literal_index()));
- StoreArrayLiteralElementStub stub;
- __ CallStub(&stub);
- }
-
- PrepareForBailoutForId(expr->GetIdForElement(i), NO_REGISTERS);
- }
-
- if (result_saved) {
- context()->PlugTOS();
- } else {
- context()->Plug(rax);
- }
-}
-
-
-void FullCodeGenerator::VisitAssignment(Assignment* expr) {
- Comment cmnt(masm_, "[ Assignment");
- // Invalid left-hand sides are rewritten to have a 'throw ReferenceError'
- // on the left-hand side.
- if (!expr->target()->IsValidLeftHandSide()) {
- VisitForEffect(expr->target());
- return;
- }
-
- // Left-hand side can only be a property, a global or a (parameter or local)
- // slot.
- enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
- LhsKind assign_type = VARIABLE;
- Property* property = expr->target()->AsProperty();
- if (property != NULL) {
- assign_type = (property->key()->IsPropertyName())
- ? NAMED_PROPERTY
- : KEYED_PROPERTY;
- }
-
- // Evaluate LHS expression.
- switch (assign_type) {
- case VARIABLE:
- // Nothing to do here.
- break;
- case NAMED_PROPERTY:
- if (expr->is_compound()) {
- // We need the receiver both on the stack and in the accumulator.
- VisitForAccumulatorValue(property->obj());
- __ push(result_register());
- } else {
- VisitForStackValue(property->obj());
- }
- break;
- case KEYED_PROPERTY: {
- if (expr->is_compound()) {
- VisitForStackValue(property->obj());
- VisitForAccumulatorValue(property->key());
- __ movq(rdx, Operand(rsp, 0));
- __ push(rax);
- } else {
- VisitForStackValue(property->obj());
- VisitForStackValue(property->key());
- }
- break;
- }
- }
-
- // For compound assignments we need another deoptimization point after the
- // variable/property load.
- if (expr->is_compound()) {
- { AccumulatorValueContext context(this);
- switch (assign_type) {
- case VARIABLE:
- EmitVariableLoad(expr->target()->AsVariableProxy());
- PrepareForBailout(expr->target(), TOS_REG);
- break;
- case NAMED_PROPERTY:
- EmitNamedPropertyLoad(property);
- PrepareForBailoutForId(property->LoadId(), TOS_REG);
- break;
- case KEYED_PROPERTY:
- EmitKeyedPropertyLoad(property);
- PrepareForBailoutForId(property->LoadId(), TOS_REG);
- break;
- }
- }
-
- Token::Value op = expr->binary_op();
- __ push(rax); // Left operand goes on the stack.
- VisitForAccumulatorValue(expr->value());
-
- OverwriteMode mode = expr->value()->ResultOverwriteAllowed()
- ? OVERWRITE_RIGHT
- : NO_OVERWRITE;
- SetSourcePosition(expr->position() + 1);
- AccumulatorValueContext context(this);
- if (ShouldInlineSmiCase(op)) {
- EmitInlineSmiBinaryOp(expr->binary_operation(),
- op,
- mode,
- expr->target(),
- expr->value());
- } else {
- EmitBinaryOp(expr->binary_operation(), op, mode);
- }
- // Deoptimization point in case the binary operation may have side effects.
- PrepareForBailout(expr->binary_operation(), TOS_REG);
- } else {
- VisitForAccumulatorValue(expr->value());
- }
-
- // Record source position before possible IC call.
- SetSourcePosition(expr->position());
-
- // Store the value.
- switch (assign_type) {
- case VARIABLE:
- EmitVariableAssignment(expr->target()->AsVariableProxy()->var(),
- expr->op());
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
- context()->Plug(rax);
- break;
- case NAMED_PROPERTY:
- EmitNamedPropertyAssignment(expr);
- break;
- case KEYED_PROPERTY:
- EmitKeyedPropertyAssignment(expr);
- break;
- }
-}
-
-
-void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
- SetSourcePosition(prop->position());
- Literal* key = prop->key()->AsLiteral();
- __ Move(rcx, key->handle());
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallIC(ic, RelocInfo::CODE_TARGET, prop->PropertyFeedbackId());
-}
-
-
-void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
- SetSourcePosition(prop->position());
- Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- CallIC(ic, RelocInfo::CODE_TARGET, prop->PropertyFeedbackId());
-}
-
-
-void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
- Token::Value op,
- OverwriteMode mode,
- Expression* left,
- Expression* right) {
- // Do combined smi check of the operands. Left operand is on the
- // stack (popped into rdx). Right operand is in rax but moved into
- // rcx to make the shifts easier.
- Label done, stub_call, smi_case;
- __ pop(rdx);
- __ movq(rcx, rax);
- __ or_(rax, rdx);
- JumpPatchSite patch_site(masm_);
- patch_site.EmitJumpIfSmi(rax, &smi_case, Label::kNear);
-
- __ bind(&stub_call);
- __ movq(rax, rcx);
- BinaryOpStub stub(op, mode);
- CallIC(stub.GetCode(isolate()), RelocInfo::CODE_TARGET,
- expr->BinaryOperationFeedbackId());
- patch_site.EmitPatchInfo();
- __ jmp(&done, Label::kNear);
-
- __ bind(&smi_case);
- switch (op) {
- case Token::SAR:
- __ SmiShiftArithmeticRight(rax, rdx, rcx);
- break;
- case Token::SHL:
- __ SmiShiftLeft(rax, rdx, rcx);
- break;
- case Token::SHR:
- __ SmiShiftLogicalRight(rax, rdx, rcx, &stub_call);
- break;
- case Token::ADD:
- __ SmiAdd(rax, rdx, rcx, &stub_call);
- break;
- case Token::SUB:
- __ SmiSub(rax, rdx, rcx, &stub_call);
- break;
- case Token::MUL:
- __ SmiMul(rax, rdx, rcx, &stub_call);
- break;
- case Token::BIT_OR:
- __ SmiOr(rax, rdx, rcx);
- break;
- case Token::BIT_AND:
- __ SmiAnd(rax, rdx, rcx);
- break;
- case Token::BIT_XOR:
- __ SmiXor(rax, rdx, rcx);
- break;
- default:
- UNREACHABLE();
- break;
- }
-
- __ bind(&done);
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
- Token::Value op,
- OverwriteMode mode) {
- __ pop(rdx);
- BinaryOpStub stub(op, mode);
- JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
- CallIC(stub.GetCode(isolate()), RelocInfo::CODE_TARGET,
- expr->BinaryOperationFeedbackId());
- patch_site.EmitPatchInfo();
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::EmitAssignment(Expression* expr) {
- // Invalid left-hand sides are rewritten by the parser to have a 'throw
- // ReferenceError' on the left-hand side.
- if (!expr->IsValidLeftHandSide()) {
- VisitForEffect(expr);
- return;
- }
-
- // Left-hand side can only be a property, a global or a (parameter or local)
- // slot.
- enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
- LhsKind assign_type = VARIABLE;
- Property* prop = expr->AsProperty();
- if (prop != NULL) {
- assign_type = (prop->key()->IsPropertyName())
- ? NAMED_PROPERTY
- : KEYED_PROPERTY;
- }
-
- switch (assign_type) {
- case VARIABLE: {
- Variable* var = expr->AsVariableProxy()->var();
- EffectContext context(this);
- EmitVariableAssignment(var, Token::ASSIGN);
- break;
- }
- case NAMED_PROPERTY: {
- __ push(rax); // Preserve value.
- VisitForAccumulatorValue(prop->obj());
- __ movq(rdx, rax);
- __ pop(rax); // Restore value.
- __ Move(rcx, prop->key()->AsLiteral()->handle());
- Handle<Code> ic = is_classic_mode()
- ? isolate()->builtins()->StoreIC_Initialize()
- : isolate()->builtins()->StoreIC_Initialize_Strict();
- CallIC(ic);
- break;
- }
- case KEYED_PROPERTY: {
- __ push(rax); // Preserve value.
- VisitForStackValue(prop->obj());
- VisitForAccumulatorValue(prop->key());
- __ movq(rcx, rax);
- __ pop(rdx);
- __ pop(rax); // Restore value.
- Handle<Code> ic = is_classic_mode()
- ? isolate()->builtins()->KeyedStoreIC_Initialize()
- : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
- CallIC(ic);
- break;
- }
- }
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::EmitVariableAssignment(Variable* var,
- Token::Value op) {
- if (var->IsUnallocated()) {
- // Global var, const, or let.
- __ Move(rcx, var->name());
- __ movq(rdx, var->is_qml_global()
- ? QmlGlobalObjectOperand()
- : GlobalObjectOperand());
- Handle<Code> ic = is_classic_mode()
- ? isolate()->builtins()->StoreIC_Initialize()
- : isolate()->builtins()->StoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
- } else if (op == Token::INIT_CONST) {
- // Const initializers need a write barrier.
- ASSERT(!var->IsParameter()); // No const parameters.
- if (var->IsStackLocal()) {
- Label skip;
- __ movq(rdx, StackOperand(var));
- __ CompareRoot(rdx, Heap::kTheHoleValueRootIndex);
- __ j(not_equal, &skip);
- __ movq(StackOperand(var), rax);
- __ bind(&skip);
- } else {
- ASSERT(var->IsContextSlot() || var->IsLookupSlot());
- // Like var declarations, const declarations are hoisted to function
- // scope. However, unlike var initializers, const initializers are
- // able to drill a hole to that function context, even from inside a
- // 'with' context. We thus bypass the normal static scope lookup for
- // var->IsContextSlot().
- __ push(rax);
- __ push(rsi);
- __ Push(var->name());
- __ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
- }
-
- } else if (var->mode() == LET && op != Token::INIT_LET) {
- // Non-initializing assignment to let variable needs a write barrier.
- if (var->IsLookupSlot()) {
- __ push(rax); // Value.
- __ push(rsi); // Context.
- __ Push(var->name());
- __ Push(Smi::FromInt(language_mode()));
- __ CallRuntime(Runtime::kStoreContextSlot, 4);
- } else {
- ASSERT(var->IsStackAllocated() || var->IsContextSlot());
- Label assign;
- MemOperand location = VarOperand(var, rcx);
- __ movq(rdx, location);
- __ CompareRoot(rdx, Heap::kTheHoleValueRootIndex);
- __ j(not_equal, &assign, Label::kNear);
- __ Push(var->name());
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
- __ bind(&assign);
- __ movq(location, rax);
- if (var->IsContextSlot()) {
- __ movq(rdx, rax);
- __ RecordWriteContextSlot(
- rcx, Context::SlotOffset(var->index()), rdx, rbx, kDontSaveFPRegs);
- }
- }
-
- } else if (!var->is_const_mode() || op == Token::INIT_CONST_HARMONY) {
- // Assignment to var or initializing assignment to let/const
- // in harmony mode.
- if (var->IsStackAllocated() || var->IsContextSlot()) {
- MemOperand location = VarOperand(var, rcx);
- if (generate_debug_code_ && op == Token::INIT_LET) {
- // Check for an uninitialized let binding.
- __ movq(rdx, location);
- __ CompareRoot(rdx, Heap::kTheHoleValueRootIndex);
- __ Check(equal, "Let binding re-initialization.");
- }
- // Perform the assignment.
- __ movq(location, rax);
- if (var->IsContextSlot()) {
- __ movq(rdx, rax);
- __ RecordWriteContextSlot(
- rcx, Context::SlotOffset(var->index()), rdx, rbx, kDontSaveFPRegs);
- }
- } else {
- ASSERT(var->IsLookupSlot());
- __ push(rax); // Value.
- __ push(rsi); // Context.
- __ Push(var->name());
- __ Push(Smi::FromInt(language_mode()));
- __ CallRuntime(Runtime::kStoreContextSlot, 4);
- }
- }
- // Non-initializing assignments to consts are ignored.
-}
-
-
-void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
- // Assignment to a property, using a named store IC.
- Property* prop = expr->target()->AsProperty();
- ASSERT(prop != NULL);
- ASSERT(prop->key()->AsLiteral() != NULL);
-
- // Record source code position before IC call.
- SetSourcePosition(expr->position());
- __ Move(rcx, prop->key()->AsLiteral()->handle());
- __ pop(rdx);
- Handle<Code> ic = is_classic_mode()
- ? isolate()->builtins()->StoreIC_Initialize()
- : isolate()->builtins()->StoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, expr->AssignmentFeedbackId());
-
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
- // Assignment to a property, using a keyed store IC.
-
- __ pop(rcx);
- __ pop(rdx);
- // Record source code position before IC call.
- SetSourcePosition(expr->position());
- Handle<Code> ic = is_classic_mode()
- ? isolate()->builtins()->KeyedStoreIC_Initialize()
- : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, expr->AssignmentFeedbackId());
-
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::VisitProperty(Property* expr) {
- Comment cmnt(masm_, "[ Property");
- Expression* key = expr->key();
-
- if (key->IsPropertyName()) {
- VisitForAccumulatorValue(expr->obj());
- EmitNamedPropertyLoad(expr);
- PrepareForBailoutForId(expr->LoadId(), TOS_REG);
- context()->Plug(rax);
- } else {
- VisitForStackValue(expr->obj());
- VisitForAccumulatorValue(expr->key());
- __ pop(rdx);
- EmitKeyedPropertyLoad(expr);
- context()->Plug(rax);
- }
-}
-
-
-void FullCodeGenerator::CallIC(Handle<Code> code,
- RelocInfo::Mode rmode,
- TypeFeedbackId ast_id) {
- ic_total_count_++;
- __ call(code, rmode, ast_id);
-}
-
-
-void FullCodeGenerator::EmitCallWithIC(Call* expr,
- Handle<Object> name,
- RelocInfo::Mode mode) {
- // Code common for calls using the IC.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
- { PreservePositionScope scope(masm()->positions_recorder());
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
- __ Move(rcx, name);
- }
- // Record source position for debugger.
- SetSourcePosition(expr->position());
- // Call the IC initialization code.
- Handle<Code> ic =
- isolate()->stub_cache()->ComputeCallInitialize(arg_count, mode);
- CallIC(ic, mode, expr->CallFeedbackId());
- RecordJSReturnSite(expr);
- // Restore context register.
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr,
- Expression* key) {
- // Load the key.
- VisitForAccumulatorValue(key);
-
- // Swap the name of the function and the receiver on the stack to follow
- // the calling convention for call ICs.
- __ pop(rcx);
- __ push(rax);
- __ push(rcx);
-
- // Load the arguments.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
- { PreservePositionScope scope(masm()->positions_recorder());
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
- }
- // Record source position for debugger.
- SetSourcePosition(expr->position());
- // Call the IC initialization code.
- Handle<Code> ic =
- isolate()->stub_cache()->ComputeKeyedCallInitialize(arg_count);
- __ movq(rcx, Operand(rsp, (arg_count + 1) * kPointerSize)); // Key.
- CallIC(ic, RelocInfo::CODE_TARGET, expr->CallFeedbackId());
- RecordJSReturnSite(expr);
- // Restore context register.
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- context()->DropAndPlug(1, rax); // Drop the key still on the stack.
-}
-
-
-void FullCodeGenerator::EmitCallWithStub(Call* expr, CallFunctionFlags flags) {
- // Code common for calls using the call stub.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
- { PreservePositionScope scope(masm()->positions_recorder());
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
- }
- // Record source position for debugger.
- SetSourcePosition(expr->position());
-
- // Record call targets in unoptimized code.
- flags = static_cast<CallFunctionFlags>(flags | RECORD_CALL_TARGET);
- Handle<Object> uninitialized =
- TypeFeedbackCells::UninitializedSentinel(isolate());
- Handle<JSGlobalPropertyCell> cell =
- isolate()->factory()->NewJSGlobalPropertyCell(uninitialized);
- RecordTypeFeedbackCell(expr->CallFeedbackId(), cell);
- __ Move(rbx, cell);
-
- CallFunctionStub stub(arg_count, flags);
- __ movq(rdi, Operand(rsp, (arg_count + 1) * kPointerSize));
- __ CallStub(&stub, expr->CallFeedbackId());
- RecordJSReturnSite(expr);
- // Restore context register.
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- // Discard the function left on TOS.
- context()->DropAndPlug(1, rax);
-}
-
-
-void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
- // Push copy of the first argument or undefined if it doesn't exist.
- if (arg_count > 0) {
- __ push(Operand(rsp, arg_count * kPointerSize));
- } else {
- __ PushRoot(Heap::kUndefinedValueRootIndex);
- }
-
- // Push the receiver of the enclosing function and do runtime call.
- __ push(Operand(rbp, (2 + info_->scope()->num_parameters()) * kPointerSize));
-
- // Push the language mode.
- __ Push(Smi::FromInt(language_mode()));
-
- // Push the start position of the scope the calls resides in.
- __ Push(Smi::FromInt(scope()->start_position()));
-
- // Push the qml mode flag
- __ Push(Smi::FromInt(is_qml_mode()));
-
- // Do the runtime call.
- __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 6);
-}
-
-
-void FullCodeGenerator::VisitCall(Call* expr) {
-#ifdef DEBUG
- // We want to verify that RecordJSReturnSite gets called on all paths
- // through this function. Avoid early returns.
- expr->return_is_recorded_ = false;
-#endif
-
- Comment cmnt(masm_, "[ Call");
- Expression* callee = expr->expression();
- VariableProxy* proxy = callee->AsVariableProxy();
- Property* property = callee->AsProperty();
-
- if (proxy != NULL && proxy->var()->is_possibly_eval(isolate())) {
- // In a call to eval, we first call %ResolvePossiblyDirectEval to
- // resolve the function we need to call and the receiver of the call.
- // Then we call the resolved function using the given arguments.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
- { PreservePositionScope pos_scope(masm()->positions_recorder());
- VisitForStackValue(callee);
- __ PushRoot(Heap::kUndefinedValueRootIndex); // Reserved receiver slot.
-
- // Push the arguments.
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
-
- // Push a copy of the function (found below the arguments) and resolve
- // eval.
- __ push(Operand(rsp, (arg_count + 1) * kPointerSize));
- EmitResolvePossiblyDirectEval(arg_count);
-
- // The runtime call returns a pair of values in rax (function) and
- // rdx (receiver). Touch up the stack with the right values.
- __ movq(Operand(rsp, (arg_count + 0) * kPointerSize), rdx);
- __ movq(Operand(rsp, (arg_count + 1) * kPointerSize), rax);
- }
- // Record source position for debugger.
- SetSourcePosition(expr->position());
- CallFunctionStub stub(arg_count, RECEIVER_MIGHT_BE_IMPLICIT);
- __ movq(rdi, Operand(rsp, (arg_count + 1) * kPointerSize));
- __ CallStub(&stub);
- RecordJSReturnSite(expr);
- // Restore context register.
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- context()->DropAndPlug(1, rax);
- } else if (proxy != NULL && proxy->var()->IsUnallocated()) {
- // Call to a global variable. Push global object as receiver for the
- // call IC lookup.
- __ push(proxy->var()->is_qml_global()
- ? QmlGlobalObjectOperand()
- : GlobalObjectOperand());
- EmitCallWithIC(expr, proxy->name(), RelocInfo::CODE_TARGET_CONTEXT);
- } else if (proxy != NULL && proxy->var()->IsLookupSlot()) {
- // Call to a lookup slot (dynamically introduced variable).
- Label slow, done;
-
- { PreservePositionScope scope(masm()->positions_recorder());
- // Generate code for loading from variables potentially shadowed by
- // eval-introduced variables.
- EmitDynamicLookupFastCase(proxy->var(), NOT_INSIDE_TYPEOF, &slow, &done);
- }
- __ bind(&slow);
- // Call the runtime to find the function to call (returned in rax) and
- // the object holding it (returned in rdx).
- __ push(context_register());
- __ Push(proxy->name());
- __ CallRuntime(Runtime::kLoadContextSlot, 2);
- __ push(rax); // Function.
- __ push(rdx); // Receiver.
-
- // If fast case code has been generated, emit code to push the function
- // and receiver and have the slow path jump around this code.
- if (done.is_linked()) {
- Label call;
- __ jmp(&call, Label::kNear);
- __ bind(&done);
- // Push function.
- __ push(rax);
- // The receiver is implicitly the global receiver. Indicate this by
- // passing the hole to the call function stub.
- __ PushRoot(Heap::kTheHoleValueRootIndex);
- __ bind(&call);
- }
-
- // The receiver is either the global receiver or an object found by
- // LoadContextSlot. That object could be the hole if the receiver is
- // implicitly the global object.
- EmitCallWithStub(expr, RECEIVER_MIGHT_BE_IMPLICIT);
- } else if (property != NULL) {
- { PreservePositionScope scope(masm()->positions_recorder());
- VisitForStackValue(property->obj());
- }
- if (property->key()->IsPropertyName()) {
- EmitCallWithIC(expr,
- property->key()->AsLiteral()->handle(),
- RelocInfo::CODE_TARGET);
- } else {
- EmitKeyedCallWithIC(expr, property->key());
- }
- } else {
- // Call to an arbitrary expression not handled specially above.
- { PreservePositionScope scope(masm()->positions_recorder());
- VisitForStackValue(callee);
- }
- // Load global receiver object.
- __ movq(rbx, GlobalObjectOperand());
- __ push(FieldOperand(rbx, GlobalObject::kGlobalReceiverOffset));
- // Emit function call.
- EmitCallWithStub(expr, NO_CALL_FUNCTION_FLAGS);
- }
-
-#ifdef DEBUG
- // RecordJSReturnSite should have been called.
- ASSERT(expr->return_is_recorded_);
-#endif
-}
-
-
-void FullCodeGenerator::VisitCallNew(CallNew* expr) {
- Comment cmnt(masm_, "[ CallNew");
- // According to ECMA-262, section 11.2.2, page 44, the function
- // expression in new calls must be evaluated before the
- // arguments.
-
- // Push constructor on the stack. If it's not a function it's used as
- // receiver for CALL_NON_FUNCTION, otherwise the value on the stack is
- // ignored.
- VisitForStackValue(expr->expression());
-
- // Push the arguments ("left-to-right") on the stack.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
-
- // Call the construct call builtin that handles allocation and
- // constructor invocation.
- SetSourcePosition(expr->position());
-
- // Load function and argument count into rdi and rax.
- __ Set(rax, arg_count);
- __ movq(rdi, Operand(rsp, arg_count * kPointerSize));
-
- // Record call targets in unoptimized code, but not in the snapshot.
- Handle<Object> uninitialized =
- TypeFeedbackCells::UninitializedSentinel(isolate());
- Handle<JSGlobalPropertyCell> cell =
- isolate()->factory()->NewJSGlobalPropertyCell(uninitialized);
- RecordTypeFeedbackCell(expr->CallNewFeedbackId(), cell);
- __ Move(rbx, cell);
-
- CallConstructStub stub(RECORD_CALL_TARGET);
- __ Call(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL);
- PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- __ JumpIfSmi(rax, if_true);
- __ jmp(if_false);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsNonNegativeSmi(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Condition non_negative_smi = masm()->CheckNonNegativeSmi(rax);
- Split(non_negative_smi, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsObject(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ JumpIfSmi(rax, if_false);
- __ CompareRoot(rax, Heap::kNullValueRootIndex);
- __ j(equal, if_true);
- __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
- // Undetectable objects behave like undefined when tested with typeof.
- __ testb(FieldOperand(rbx, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsUndetectable));
- __ j(not_zero, if_false);
- __ movzxbq(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
- __ cmpq(rbx, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
- __ j(below, if_false);
- __ cmpq(rbx, Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(below_equal, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsSpecObject(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ JumpIfSmi(rax, if_false);
- __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rbx);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(above_equal, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsUndetectableObject(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ JumpIfSmi(rax, if_false);
- __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
- __ testb(FieldOperand(rbx, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsUndetectable));
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(not_zero, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
- CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ AssertNotSmi(rax);
-
- // Check whether this map has already been checked to be safe for default
- // valueOf.
- __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
- __ testb(FieldOperand(rbx, Map::kBitField2Offset),
- Immediate(1 << Map::kStringWrapperSafeForDefaultValueOf));
- __ j(not_zero, if_true);
-
- // Check for fast case object. Generate false result for slow case object.
- __ movq(rcx, FieldOperand(rax, JSObject::kPropertiesOffset));
- __ movq(rcx, FieldOperand(rcx, HeapObject::kMapOffset));
- __ CompareRoot(rcx, Heap::kHashTableMapRootIndex);
- __ j(equal, if_false);
-
- // Look for valueOf string in the descriptor array, and indicate false if
- // found. Since we omit an enumeration index check, if it is added via a
- // transition that shares its descriptor array, this is a false positive.
- Label entry, loop, done;
-
- // Skip loop if no descriptors are valid.
- __ NumberOfOwnDescriptors(rcx, rbx);
- __ cmpq(rcx, Immediate(0));
- __ j(equal, &done);
-
- __ LoadInstanceDescriptors(rbx, rbx);
- // rbx: descriptor array.
- // rcx: valid entries in the descriptor array.
- // Calculate the end of the descriptor array.
- __ imul(rcx, rcx, Immediate(DescriptorArray::kDescriptorSize));
- SmiIndex index = masm_->SmiToIndex(rdx, rcx, kPointerSizeLog2);
- __ lea(rcx,
- Operand(
- rbx, index.reg, index.scale, DescriptorArray::kFirstOffset));
- // Calculate location of the first key name.
- __ addq(rbx, Immediate(DescriptorArray::kFirstOffset));
- // Loop through all the keys in the descriptor array. If one of these is the
- // internalized string "valueOf" the result is false.
- __ jmp(&entry);
- __ bind(&loop);
- __ movq(rdx, FieldOperand(rbx, 0));
- __ Cmp(rdx, FACTORY->value_of_string());
- __ j(equal, if_false);
- __ addq(rbx, Immediate(DescriptorArray::kDescriptorSize * kPointerSize));
- __ bind(&entry);
- __ cmpq(rbx, rcx);
- __ j(not_equal, &loop);
-
- __ bind(&done);
- // Reload map as register rbx was used as temporary above.
- __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
-
- // If a valueOf property is not found on the object check that its
- // prototype is the un-modified String prototype. If not result is false.
- __ movq(rcx, FieldOperand(rbx, Map::kPrototypeOffset));
- __ testq(rcx, Immediate(kSmiTagMask));
- __ j(zero, if_false);
- __ movq(rcx, FieldOperand(rcx, HeapObject::kMapOffset));
- __ movq(rdx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ movq(rdx, FieldOperand(rdx, GlobalObject::kNativeContextOffset));
- __ cmpq(rcx,
- ContextOperand(rdx, Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX));
- __ j(not_equal, if_false);
- // Set the bit in the map to indicate that it has been checked safe for
- // default valueOf and set true result.
- __ or_(FieldOperand(rbx, Map::kBitField2Offset),
- Immediate(1 << Map::kStringWrapperSafeForDefaultValueOf));
- __ jmp(if_true);
-
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsSymbol(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ JumpIfSmi(rax, if_false);
- __ CmpObjectType(rax, SYMBOL_TYPE, rbx);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(equal, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ JumpIfSmi(rax, if_false);
- __ CmpObjectType(rax, JS_FUNCTION_TYPE, rbx);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(equal, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsArray(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ JumpIfSmi(rax, if_false);
- __ CmpObjectType(rax, JS_ARRAY_TYPE, rbx);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(equal, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsRegExp(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ JumpIfSmi(rax, if_false);
- __ CmpObjectType(rax, JS_REGEXP_TYPE, rbx);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(equal, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-
-void FullCodeGenerator::EmitIsConstructCall(CallRuntime* expr) {
- ASSERT(expr->arguments()->length() == 0);
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- // Get the frame pointer for the calling frame.
- __ movq(rax, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
-
- // Skip the arguments adaptor frame if it exists.
- Label check_frame_marker;
- __ Cmp(Operand(rax, StandardFrameConstants::kContextOffset),
- Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
- __ j(not_equal, &check_frame_marker);
- __ movq(rax, Operand(rax, StandardFrameConstants::kCallerFPOffset));
-
- // Check the marker in the calling frame.
- __ bind(&check_frame_marker);
- __ Cmp(Operand(rax, StandardFrameConstants::kMarkerOffset),
- Smi::FromInt(StackFrame::CONSTRUCT));
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(equal, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitObjectEquals(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 2);
-
- // Load the two objects into registers and perform the comparison.
- VisitForStackValue(args->at(0));
- VisitForAccumulatorValue(args->at(1));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ pop(rbx);
- __ cmpq(rax, rbx);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(equal, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitArguments(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
-
- // ArgumentsAccessStub expects the key in rdx and the formal
- // parameter count in rax.
- VisitForAccumulatorValue(args->at(0));
- __ movq(rdx, rax);
- __ Move(rax, Smi::FromInt(info_->scope()->num_parameters()));
- ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
- __ CallStub(&stub);
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::EmitArgumentsLength(CallRuntime* expr) {
- ASSERT(expr->arguments()->length() == 0);
-
- Label exit;
- // Get the number of formal parameters.
- __ Move(rax, Smi::FromInt(info_->scope()->num_parameters()));
-
- // Check if the calling frame is an arguments adaptor frame.
- __ movq(rbx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
- __ Cmp(Operand(rbx, StandardFrameConstants::kContextOffset),
- Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
- __ j(not_equal, &exit, Label::kNear);
-
- // Arguments adaptor case: Read the arguments length from the
- // adaptor frame.
- __ movq(rax, Operand(rbx, ArgumentsAdaptorFrameConstants::kLengthOffset));
-
- __ bind(&exit);
- __ AssertSmi(rax);
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
- Label done, null, function, non_function_constructor;
-
- VisitForAccumulatorValue(args->at(0));
-
- // If the object is a smi, we return null.
- __ JumpIfSmi(rax, &null);
-
- // Check that the object is a JS object but take special care of JS
- // functions to make sure they have 'Function' as their class.
- // Assume that there are only two callable types, and one of them is at
- // either end of the type range for JS object types. Saves extra comparisons.
- STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
- __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rax);
- // Map is now in rax.
- __ j(below, &null);
- STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
- FIRST_SPEC_OBJECT_TYPE + 1);
- __ j(equal, &function);
-
- __ CmpInstanceType(rax, LAST_SPEC_OBJECT_TYPE);
- STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
- LAST_SPEC_OBJECT_TYPE - 1);
- __ j(equal, &function);
- // Assume that there is no larger type.
- STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == LAST_TYPE - 1);
-
- // Check if the constructor in the map is a JS function.
- __ movq(rax, FieldOperand(rax, Map::kConstructorOffset));
- __ CmpObjectType(rax, JS_FUNCTION_TYPE, rbx);
- __ j(not_equal, &non_function_constructor);
-
- // rax now contains the constructor function. Grab the
- // instance class name from there.
- __ movq(rax, FieldOperand(rax, JSFunction::kSharedFunctionInfoOffset));
- __ movq(rax, FieldOperand(rax, SharedFunctionInfo::kInstanceClassNameOffset));
- __ jmp(&done);
-
- // Functions have class 'Function'.
- __ bind(&function);
- __ Move(rax, isolate()->factory()->function_class_string());
- __ jmp(&done);
-
- // Objects with a non-function constructor have class 'Object'.
- __ bind(&non_function_constructor);
- __ Move(rax, isolate()->factory()->Object_string());
- __ jmp(&done);
-
- // Non-JS objects have class null.
- __ bind(&null);
- __ LoadRoot(rax, Heap::kNullValueRootIndex);
-
- // All done.
- __ bind(&done);
-
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::EmitLog(CallRuntime* expr) {
- // Conditionally generate a log call.
- // Args:
- // 0 (literal string): The type of logging (corresponds to the flags).
- // This is used to determine whether or not to generate the log call.
- // 1 (string): Format string. Access the string at argument index 2
- // with '%2s' (see Logger::LogRuntime for all the formats).
- // 2 (array): Arguments to the format string.
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT_EQ(args->length(), 3);
- if (CodeGenerator::ShouldGenerateLog(args->at(0))) {
- VisitForStackValue(args->at(1));
- VisitForStackValue(args->at(2));
- __ CallRuntime(Runtime::kLog, 2);
- }
- // Finally, we're expected to leave a value on the top of the stack.
- __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::EmitRandomHeapNumber(CallRuntime* expr) {
- ASSERT(expr->arguments()->length() == 0);
-
- Label slow_allocate_heapnumber;
- Label heapnumber_allocated;
-
- __ AllocateHeapNumber(rbx, rcx, &slow_allocate_heapnumber);
- __ jmp(&heapnumber_allocated);
-
- __ bind(&slow_allocate_heapnumber);
- // Allocate a heap number.
- __ CallRuntime(Runtime::kNumberAlloc, 0);
- __ movq(rbx, rax);
-
- __ bind(&heapnumber_allocated);
-
- // Return a random uint32 number in rax.
- // The fresh HeapNumber is in rbx, which is callee-save on both x64 ABIs.
- __ PrepareCallCFunction(1);
-#ifdef _WIN64
- __ movq(rcx,
- ContextOperand(context_register(), Context::GLOBAL_OBJECT_INDEX));
- __ movq(rcx, FieldOperand(rcx, GlobalObject::kNativeContextOffset));
-
-#else
- __ movq(rdi,
- ContextOperand(context_register(), Context::GLOBAL_OBJECT_INDEX));
- __ movq(rdi, FieldOperand(rdi, GlobalObject::kNativeContextOffset));
-#endif
- __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
-
- // Convert 32 random bits in rax to 0.(32 random bits) in a double
- // by computing:
- // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
- __ movl(rcx, Immediate(0x49800000)); // 1.0 x 2^20 as single.
- __ movd(xmm1, rcx);
- __ movd(xmm0, rax);
- __ cvtss2sd(xmm1, xmm1);
- __ xorps(xmm0, xmm1);
- __ subsd(xmm0, xmm1);
- __ movsd(FieldOperand(rbx, HeapNumber::kValueOffset), xmm0);
-
- __ movq(rax, rbx);
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::EmitSubString(CallRuntime* expr) {
- // Load the arguments on the stack and call the stub.
- SubStringStub stub;
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 3);
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
- VisitForStackValue(args->at(2));
- __ CallStub(&stub);
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::EmitRegExpExec(CallRuntime* expr) {
- // Load the arguments on the stack and call the stub.
- RegExpExecStub stub;
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 4);
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
- VisitForStackValue(args->at(2));
- VisitForStackValue(args->at(3));
- __ CallStub(&stub);
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::EmitValueOf(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0)); // Load the object.
-
- Label done;
- // If the object is a smi return the object.
- __ JumpIfSmi(rax, &done);
- // If the object is not a value type, return the object.
- __ CmpObjectType(rax, JS_VALUE_TYPE, rbx);
- __ j(not_equal, &done);
- __ movq(rax, FieldOperand(rax, JSValue::kValueOffset));
-
- __ bind(&done);
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 2);
- ASSERT_NE(NULL, args->at(1)->AsLiteral());
- Smi* index = Smi::cast(*(args->at(1)->AsLiteral()->handle()));
-
- VisitForAccumulatorValue(args->at(0)); // Load the object.
-
- Label runtime, done, not_date_object;
- Register object = rax;
- Register result = rax;
- Register scratch = rcx;
-
- __ JumpIfSmi(object, &not_date_object);
- __ CmpObjectType(object, JS_DATE_TYPE, scratch);
- __ j(not_equal, &not_date_object);
-
- if (index->value() == 0) {
- __ movq(result, FieldOperand(object, JSDate::kValueOffset));
- __ jmp(&done);
- } else {
- if (index->value() < JSDate::kFirstUncachedField) {
- ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
- __ movq(scratch, stamp);
- __ cmpq(scratch, FieldOperand(object, JSDate::kCacheStampOffset));
- __ j(not_equal, &runtime, Label::kNear);
- __ movq(result, FieldOperand(object, JSDate::kValueOffset +
- kPointerSize * index->value()));
- __ jmp(&done);
- }
- __ bind(&runtime);
- __ PrepareCallCFunction(2);
-#ifdef _WIN64
- __ movq(rcx, object);
- __ movq(rdx, index, RelocInfo::NONE64);
-#else
- __ movq(rdi, object);
- __ movq(rsi, index, RelocInfo::NONE64);
-#endif
- __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- __ jmp(&done);
- }
-
- __ bind(&not_date_object);
- __ CallRuntime(Runtime::kThrowNotDateError, 0);
- __ bind(&done);
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT_EQ(3, args->length());
-
- VisitForStackValue(args->at(1)); // index
- VisitForStackValue(args->at(2)); // value
- __ pop(rcx);
- __ pop(rbx);
- VisitForAccumulatorValue(args->at(0)); // string
-
- static const String::Encoding encoding = String::ONE_BYTE_ENCODING;
- SeqStringSetCharGenerator::Generate(masm_, encoding, rax, rbx, rcx);
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT_EQ(3, args->length());
-
- VisitForStackValue(args->at(1)); // index
- VisitForStackValue(args->at(2)); // value
- __ pop(rcx);
- __ pop(rbx);
- VisitForAccumulatorValue(args->at(0)); // string
-
- static const String::Encoding encoding = String::TWO_BYTE_ENCODING;
- SeqStringSetCharGenerator::Generate(masm_, encoding, rax, rbx, rcx);
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
- // Load the arguments on the stack and call the runtime function.
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 2);
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
- MathPowStub stub(MathPowStub::ON_STACK);
- __ CallStub(&stub);
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::EmitSetValueOf(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 2);
-
- VisitForStackValue(args->at(0)); // Load the object.
- VisitForAccumulatorValue(args->at(1)); // Load the value.
- __ pop(rbx); // rax = value. rbx = object.
-
- Label done;
- // If the object is a smi, return the value.
- __ JumpIfSmi(rbx, &done);
-
- // If the object is not a value type, return the value.
- __ CmpObjectType(rbx, JS_VALUE_TYPE, rcx);
- __ j(not_equal, &done);
-
- // Store the value.
- __ movq(FieldOperand(rbx, JSValue::kValueOffset), rax);
- // Update the write barrier. Save the value as it will be
- // overwritten by the write barrier code and is needed afterward.
- __ movq(rdx, rax);
- __ RecordWriteField(rbx, JSValue::kValueOffset, rdx, rcx, kDontSaveFPRegs);
-
- __ bind(&done);
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT_EQ(args->length(), 1);
-
- // Load the argument on the stack and call the stub.
- VisitForStackValue(args->at(0));
-
- NumberToStringStub stub;
- __ CallStub(&stub);
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label done;
- StringCharFromCodeGenerator generator(rax, rbx);
- generator.GenerateFast(masm_);
- __ jmp(&done);
-
- NopRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm_, call_helper);
-
- __ bind(&done);
- context()->Plug(rbx);
-}
-
-
-void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 2);
-
- VisitForStackValue(args->at(0));
- VisitForAccumulatorValue(args->at(1));
-
- Register object = rbx;
- Register index = rax;
- Register result = rdx;
-
- __ pop(object);
-
- Label need_conversion;
- Label index_out_of_range;
- Label done;
- StringCharCodeAtGenerator generator(object,
- index,
- result,
- &need_conversion,
- &need_conversion,
- &index_out_of_range,
- STRING_INDEX_IS_NUMBER);
- generator.GenerateFast(masm_);
- __ jmp(&done);
-
- __ bind(&index_out_of_range);
- // When the index is out of range, the spec requires us to return
- // NaN.
- __ LoadRoot(result, Heap::kNanValueRootIndex);
- __ jmp(&done);
-
- __ bind(&need_conversion);
- // Move the undefined value into the result register, which will
- // trigger conversion.
- __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
- __ jmp(&done);
-
- NopRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm_, call_helper);
-
- __ bind(&done);
- context()->Plug(result);
-}
-
-
-void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 2);
-
- VisitForStackValue(args->at(0));
- VisitForAccumulatorValue(args->at(1));
-
- Register object = rbx;
- Register index = rax;
- Register scratch = rdx;
- Register result = rax;
-
- __ pop(object);
-
- Label need_conversion;
- Label index_out_of_range;
- Label done;
- StringCharAtGenerator generator(object,
- index,
- scratch,
- result,
- &need_conversion,
- &need_conversion,
- &index_out_of_range,
- STRING_INDEX_IS_NUMBER);
- generator.GenerateFast(masm_);
- __ jmp(&done);
-
- __ bind(&index_out_of_range);
- // When the index is out of range, the spec requires us to return
- // the empty string.
- __ LoadRoot(result, Heap::kempty_stringRootIndex);
- __ jmp(&done);
-
- __ bind(&need_conversion);
- // Move smi zero into the result register, which will trigger
- // conversion.
- __ Move(result, Smi::FromInt(0));
- __ jmp(&done);
-
- NopRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm_, call_helper);
-
- __ bind(&done);
- context()->Plug(result);
-}
-
-
-void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT_EQ(2, args->length());
-
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
-
- StringAddStub stub(NO_STRING_ADD_FLAGS);
- __ CallStub(&stub);
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::EmitStringCompare(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT_EQ(2, args->length());
-
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
-
- StringCompareStub stub;
- __ CallStub(&stub);
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::EmitMathSin(CallRuntime* expr) {
- // Load the argument on the stack and call the stub.
- TranscendentalCacheStub stub(TranscendentalCache::SIN,
- TranscendentalCacheStub::TAGGED);
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
- VisitForStackValue(args->at(0));
- __ CallStub(&stub);
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::EmitMathCos(CallRuntime* expr) {
- // Load the argument on the stack and call the stub.
- TranscendentalCacheStub stub(TranscendentalCache::COS,
- TranscendentalCacheStub::TAGGED);
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
- VisitForStackValue(args->at(0));
- __ CallStub(&stub);
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::EmitMathTan(CallRuntime* expr) {
- // Load the argument on the stack and call the stub.
- TranscendentalCacheStub stub(TranscendentalCache::TAN,
- TranscendentalCacheStub::TAGGED);
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
- VisitForStackValue(args->at(0));
- __ CallStub(&stub);
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::EmitMathLog(CallRuntime* expr) {
- // Load the argument on the stack and call the stub.
- TranscendentalCacheStub stub(TranscendentalCache::LOG,
- TranscendentalCacheStub::TAGGED);
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
- VisitForStackValue(args->at(0));
- __ CallStub(&stub);
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::EmitMathSqrt(CallRuntime* expr) {
- // Load the argument on the stack and call the runtime function.
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
- VisitForStackValue(args->at(0));
- __ CallRuntime(Runtime::kMath_sqrt, 1);
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() >= 2);
-
- int arg_count = args->length() - 2; // 2 ~ receiver and function.
- for (int i = 0; i < arg_count + 1; i++) {
- VisitForStackValue(args->at(i));
- }
- VisitForAccumulatorValue(args->last()); // Function.
-
- Label runtime, done;
- // Check for non-function argument (including proxy).
- __ JumpIfSmi(rax, &runtime);
- __ CmpObjectType(rax, JS_FUNCTION_TYPE, rbx);
- __ j(not_equal, &runtime);
-
- // InvokeFunction requires the function in rdi. Move it in there.
- __ movq(rdi, result_register());
- ParameterCount count(arg_count);
- __ InvokeFunction(rdi, count, CALL_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- __ jmp(&done);
-
- __ bind(&runtime);
- __ push(rax);
- __ CallRuntime(Runtime::kCall, args->length());
- __ bind(&done);
-
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
- RegExpConstructResultStub stub;
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 3);
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
- VisitForStackValue(args->at(2));
- __ CallStub(&stub);
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT_EQ(2, args->length());
-
- ASSERT_NE(NULL, args->at(0)->AsLiteral());
- int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->handle()))->value();
-
- Handle<FixedArray> jsfunction_result_caches(
- isolate()->native_context()->jsfunction_result_caches());
- if (jsfunction_result_caches->length() <= cache_id) {
- __ Abort("Attempt to use undefined cache.");
- __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
- context()->Plug(rax);
- return;
- }
-
- VisitForAccumulatorValue(args->at(1));
-
- Register key = rax;
- Register cache = rbx;
- Register tmp = rcx;
- __ movq(cache, ContextOperand(rsi, Context::GLOBAL_OBJECT_INDEX));
- __ movq(cache,
- FieldOperand(cache, GlobalObject::kNativeContextOffset));
- __ movq(cache,
- ContextOperand(cache, Context::JSFUNCTION_RESULT_CACHES_INDEX));
- __ movq(cache,
- FieldOperand(cache, FixedArray::OffsetOfElementAt(cache_id)));
-
- Label done, not_found;
- // tmp now holds finger offset as a smi.
- STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
- __ movq(tmp, FieldOperand(cache, JSFunctionResultCache::kFingerOffset));
- SmiIndex index =
- __ SmiToIndex(kScratchRegister, tmp, kPointerSizeLog2);
- __ cmpq(key, FieldOperand(cache,
- index.reg,
- index.scale,
- FixedArray::kHeaderSize));
- __ j(not_equal, &not_found, Label::kNear);
- __ movq(rax, FieldOperand(cache,
- index.reg,
- index.scale,
- FixedArray::kHeaderSize + kPointerSize));
- __ jmp(&done, Label::kNear);
-
- __ bind(&not_found);
- // Call runtime to perform the lookup.
- __ push(cache);
- __ push(key);
- __ CallRuntime(Runtime::kGetFromCache, 2);
-
- __ bind(&done);
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::EmitIsRegExpEquivalent(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT_EQ(2, args->length());
-
- Register right = rax;
- Register left = rbx;
- Register tmp = rcx;
-
- VisitForStackValue(args->at(0));
- VisitForAccumulatorValue(args->at(1));
- __ pop(left);
-
- Label done, fail, ok;
- __ cmpq(left, right);
- __ j(equal, &ok, Label::kNear);
- // Fail if either is a non-HeapObject.
- Condition either_smi = masm()->CheckEitherSmi(left, right, tmp);
- __ j(either_smi, &fail, Label::kNear);
- __ j(zero, &fail, Label::kNear);
- __ movq(tmp, FieldOperand(left, HeapObject::kMapOffset));
- __ cmpb(FieldOperand(tmp, Map::kInstanceTypeOffset),
- Immediate(JS_REGEXP_TYPE));
- __ j(not_equal, &fail, Label::kNear);
- __ cmpq(tmp, FieldOperand(right, HeapObject::kMapOffset));
- __ j(not_equal, &fail, Label::kNear);
- __ movq(tmp, FieldOperand(left, JSRegExp::kDataOffset));
- __ cmpq(tmp, FieldOperand(right, JSRegExp::kDataOffset));
- __ j(equal, &ok, Label::kNear);
- __ bind(&fail);
- __ Move(rax, isolate()->factory()->false_value());
- __ jmp(&done, Label::kNear);
- __ bind(&ok);
- __ Move(rax, isolate()->factory()->true_value());
- __ bind(&done);
-
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ testl(FieldOperand(rax, String::kHashFieldOffset),
- Immediate(String::kContainsCachedArrayIndexMask));
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- __ j(zero, if_true);
- __ jmp(if_false);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 1);
- VisitForAccumulatorValue(args->at(0));
-
- __ AssertString(rax);
-
- __ movl(rax, FieldOperand(rax, String::kHashFieldOffset));
- ASSERT(String::kHashShift >= kSmiTagSize);
- __ IndexFromHash(rax, rax);
-
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
- Label bailout, return_result, done, one_char_separator, long_separator,
- non_trivial_array, not_size_one_array, loop,
- loop_1, loop_1_condition, loop_2, loop_2_entry, loop_3, loop_3_entry;
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT(args->length() == 2);
- // We will leave the separator on the stack until the end of the function.
- VisitForStackValue(args->at(1));
- // Load this to rax (= array)
- VisitForAccumulatorValue(args->at(0));
- // All aliases of the same register have disjoint lifetimes.
- Register array = rax;
- Register elements = no_reg; // Will be rax.
-
- Register index = rdx;
-
- Register string_length = rcx;
-
- Register string = rsi;
-
- Register scratch = rbx;
-
- Register array_length = rdi;
- Register result_pos = no_reg; // Will be rdi.
-
- Operand separator_operand = Operand(rsp, 2 * kPointerSize);
- Operand result_operand = Operand(rsp, 1 * kPointerSize);
- Operand array_length_operand = Operand(rsp, 0 * kPointerSize);
- // Separator operand is already pushed. Make room for the two
- // other stack fields, and clear the direction flag in anticipation
- // of calling CopyBytes.
- __ subq(rsp, Immediate(2 * kPointerSize));
- __ cld();
- // Check that the array is a JSArray
- __ JumpIfSmi(array, &bailout);
- __ CmpObjectType(array, JS_ARRAY_TYPE, scratch);
- __ j(not_equal, &bailout);
-
- // Check that the array has fast elements.
- __ CheckFastElements(scratch, &bailout);
-
- // Array has fast elements, so its length must be a smi.
- // If the array has length zero, return the empty string.
- __ movq(array_length, FieldOperand(array, JSArray::kLengthOffset));
- __ SmiCompare(array_length, Smi::FromInt(0));
- __ j(not_zero, &non_trivial_array);
- __ LoadRoot(rax, Heap::kempty_stringRootIndex);
- __ jmp(&return_result);
-
- // Save the array length on the stack.
- __ bind(&non_trivial_array);
- __ SmiToInteger32(array_length, array_length);
- __ movl(array_length_operand, array_length);
-
- // Save the FixedArray containing array's elements.
- // End of array's live range.
- elements = array;
- __ movq(elements, FieldOperand(array, JSArray::kElementsOffset));
- array = no_reg;
-
-
- // Check that all array elements are sequential ASCII strings, and
- // accumulate the sum of their lengths, as a smi-encoded value.
- __ Set(index, 0);
- __ Set(string_length, 0);
- // Loop condition: while (index < array_length).
- // Live loop registers: index(int32), array_length(int32), string(String*),
- // scratch, string_length(int32), elements(FixedArray*).
- if (generate_debug_code_) {
- __ cmpq(index, array_length);
- __ Assert(below, "No empty arrays here in EmitFastAsciiArrayJoin");
- }
- __ bind(&loop);
- __ movq(string, FieldOperand(elements,
- index,
- times_pointer_size,
- FixedArray::kHeaderSize));
- __ JumpIfSmi(string, &bailout);
- __ movq(scratch, FieldOperand(string, HeapObject::kMapOffset));
- __ movzxbl(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
- __ andb(scratch, Immediate(
- kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask));
- __ cmpb(scratch, Immediate(kStringTag | kOneByteStringTag | kSeqStringTag));
- __ j(not_equal, &bailout);
- __ AddSmiField(string_length,
- FieldOperand(string, SeqOneByteString::kLengthOffset));
- __ j(overflow, &bailout);
- __ incl(index);
- __ cmpl(index, array_length);
- __ j(less, &loop);
-
- // Live registers:
- // string_length: Sum of string lengths.
- // elements: FixedArray of strings.
- // index: Array length.
- // array_length: Array length.
-
- // If array_length is 1, return elements[0], a string.
- __ cmpl(array_length, Immediate(1));
- __ j(not_equal, &not_size_one_array);
- __ movq(rax, FieldOperand(elements, FixedArray::kHeaderSize));
- __ jmp(&return_result);
-
- __ bind(&not_size_one_array);
-
- // End of array_length live range.
- result_pos = array_length;
- array_length = no_reg;
-
- // Live registers:
- // string_length: Sum of string lengths.
- // elements: FixedArray of strings.
- // index: Array length.
-
- // Check that the separator is a sequential ASCII string.
- __ movq(string, separator_operand);
- __ JumpIfSmi(string, &bailout);
- __ movq(scratch, FieldOperand(string, HeapObject::kMapOffset));
- __ movzxbl(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
- __ andb(scratch, Immediate(
- kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask));
- __ cmpb(scratch, Immediate(kStringTag | kOneByteStringTag | kSeqStringTag));
- __ j(not_equal, &bailout);
-
- // Live registers:
- // string_length: Sum of string lengths.
- // elements: FixedArray of strings.
- // index: Array length.
- // string: Separator string.
-
- // Add (separator length times (array_length - 1)) to string_length.
- __ SmiToInteger32(scratch,
- FieldOperand(string, SeqOneByteString::kLengthOffset));
- __ decl(index);
- __ imull(scratch, index);
- __ j(overflow, &bailout);
- __ addl(string_length, scratch);
- __ j(overflow, &bailout);
-
- // Live registers and stack values:
- // string_length: Total length of result string.
- // elements: FixedArray of strings.
- __ AllocateAsciiString(result_pos, string_length, scratch,
- index, string, &bailout);
- __ movq(result_operand, result_pos);
- __ lea(result_pos, FieldOperand(result_pos, SeqOneByteString::kHeaderSize));
-
- __ movq(string, separator_operand);
- __ SmiCompare(FieldOperand(string, SeqOneByteString::kLengthOffset),
- Smi::FromInt(1));
- __ j(equal, &one_char_separator);
- __ j(greater, &long_separator);
-
-
- // Empty separator case:
- __ Set(index, 0);
- __ movl(scratch, array_length_operand);
- __ jmp(&loop_1_condition);
- // Loop condition: while (index < array_length).
- __ bind(&loop_1);
- // Each iteration of the loop concatenates one string to the result.
- // Live values in registers:
- // index: which element of the elements array we are adding to the result.
- // result_pos: the position to which we are currently copying characters.
- // elements: the FixedArray of strings we are joining.
- // scratch: array length.
-
- // Get string = array[index].
- __ movq(string, FieldOperand(elements, index,
- times_pointer_size,
- FixedArray::kHeaderSize));
- __ SmiToInteger32(string_length,
- FieldOperand(string, String::kLengthOffset));
- __ lea(string,
- FieldOperand(string, SeqOneByteString::kHeaderSize));
- __ CopyBytes(result_pos, string, string_length);
- __ incl(index);
- __ bind(&loop_1_condition);
- __ cmpl(index, scratch);
- __ j(less, &loop_1); // Loop while (index < array_length).
- __ jmp(&done);
-
- // Generic bailout code used from several places.
- __ bind(&bailout);
- __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
- __ jmp(&return_result);
-
-
- // One-character separator case
- __ bind(&one_char_separator);
- // Get the separator ASCII character value.
- // Register "string" holds the separator.
- __ movzxbl(scratch, FieldOperand(string, SeqOneByteString::kHeaderSize));
- __ Set(index, 0);
- // Jump into the loop after the code that copies the separator, so the first
- // element is not preceded by a separator
- __ jmp(&loop_2_entry);
- // Loop condition: while (index < length).
- __ bind(&loop_2);
- // Each iteration of the loop concatenates one string to the result.
- // Live values in registers:
- // elements: The FixedArray of strings we are joining.
- // index: which element of the elements array we are adding to the result.
- // result_pos: the position to which we are currently copying characters.
- // scratch: Separator character.
-
- // Copy the separator character to the result.
- __ movb(Operand(result_pos, 0), scratch);
- __ incq(result_pos);
-
- __ bind(&loop_2_entry);
- // Get string = array[index].
- __ movq(string, FieldOperand(elements, index,
- times_pointer_size,
- FixedArray::kHeaderSize));
- __ SmiToInteger32(string_length,
- FieldOperand(string, String::kLengthOffset));
- __ lea(string,
- FieldOperand(string, SeqOneByteString::kHeaderSize));
- __ CopyBytes(result_pos, string, string_length);
- __ incl(index);
- __ cmpl(index, array_length_operand);
- __ j(less, &loop_2); // End while (index < length).
- __ jmp(&done);
-
-
- // Long separator case (separator is more than one character).
- __ bind(&long_separator);
-
- // Make elements point to end of elements array, and index
- // count from -array_length to zero, so we don't need to maintain
- // a loop limit.
- __ movl(index, array_length_operand);
- __ lea(elements, FieldOperand(elements, index, times_pointer_size,
- FixedArray::kHeaderSize));
- __ neg(index);
-
- // Replace separator string with pointer to its first character, and
- // make scratch be its length.
- __ movq(string, separator_operand);
- __ SmiToInteger32(scratch,
- FieldOperand(string, String::kLengthOffset));
- __ lea(string,
- FieldOperand(string, SeqOneByteString::kHeaderSize));
- __ movq(separator_operand, string);
-
- // Jump into the loop after the code that copies the separator, so the first
- // element is not preceded by a separator
- __ jmp(&loop_3_entry);
- // Loop condition: while (index < length).
- __ bind(&loop_3);
- // Each iteration of the loop concatenates one string to the result.
- // Live values in registers:
- // index: which element of the elements array we are adding to the result.
- // result_pos: the position to which we are currently copying characters.
- // scratch: Separator length.
- // separator_operand (rsp[0x10]): Address of first char of separator.
-
- // Copy the separator to the result.
- __ movq(string, separator_operand);
- __ movl(string_length, scratch);
- __ CopyBytes(result_pos, string, string_length, 2);
-
- __ bind(&loop_3_entry);
- // Get string = array[index].
- __ movq(string, Operand(elements, index, times_pointer_size, 0));
- __ SmiToInteger32(string_length,
- FieldOperand(string, String::kLengthOffset));
- __ lea(string,
- FieldOperand(string, SeqOneByteString::kHeaderSize));
- __ CopyBytes(result_pos, string, string_length);
- __ incq(index);
- __ j(not_equal, &loop_3); // Loop while (index < 0).
-
- __ bind(&done);
- __ movq(rax, result_operand);
-
- __ bind(&return_result);
- // Drop temp values from the stack, and restore context register.
- __ addq(rsp, Immediate(3 * kPointerSize));
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
- Handle<String> name = expr->name();
- if (name->length() > 0 && name->Get(0) == '_') {
- Comment cmnt(masm_, "[ InlineRuntimeCall");
- EmitInlineRuntimeCall(expr);
- return;
- }
-
- Comment cmnt(masm_, "[ CallRuntime");
- ZoneList<Expression*>* args = expr->arguments();
-
- if (expr->is_jsruntime()) {
- // Prepare for calling JS runtime function.
- __ movq(rax, GlobalObjectOperand());
- __ push(FieldOperand(rax, GlobalObject::kBuiltinsOffset));
- }
-
- // Push the arguments ("left-to-right").
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
-
- if (expr->is_jsruntime()) {
- // Call the JS runtime function using a call IC.
- __ Move(rcx, expr->name());
- RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
- Handle<Code> ic =
- isolate()->stub_cache()->ComputeCallInitialize(arg_count, mode);
- CallIC(ic, mode, expr->CallRuntimeFeedbackId());
- // Restore context register.
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- } else {
- __ CallRuntime(expr->function(), arg_count);
- }
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
- switch (expr->op()) {
- case Token::DELETE: {
- Comment cmnt(masm_, "[ UnaryOperation (DELETE)");
- Property* property = expr->expression()->AsProperty();
- VariableProxy* proxy = expr->expression()->AsVariableProxy();
-
- if (property != NULL) {
- VisitForStackValue(property->obj());
- VisitForStackValue(property->key());
- StrictModeFlag strict_mode_flag = (language_mode() == CLASSIC_MODE)
- ? kNonStrictMode : kStrictMode;
- __ Push(Smi::FromInt(strict_mode_flag));
- __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
- context()->Plug(rax);
- } else if (proxy != NULL) {
- Variable* var = proxy->var();
- // Delete of an unqualified identifier is disallowed in strict mode
- // but "delete this" is allowed.
- ASSERT(language_mode() == CLASSIC_MODE || var->is_this());
- if (var->IsUnallocated()) {
- __ push(var->is_qml_global()
- ? QmlGlobalObjectOperand()
- : GlobalObjectOperand());
- __ Push(var->name());
- __ Push(Smi::FromInt(kNonStrictMode));
- __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
- context()->Plug(rax);
- } else if (var->IsStackAllocated() || var->IsContextSlot()) {
- // Result of deleting non-global variables is false. 'this' is
- // not really a variable, though we implement it as one. The
- // subexpression does not have side effects.
- context()->Plug(var->is_this());
- } else {
- // Non-global variable. Call the runtime to try to delete from the
- // context where the variable was introduced.
- __ push(context_register());
- __ Push(var->name());
- __ CallRuntime(Runtime::kDeleteContextSlot, 2);
- context()->Plug(rax);
- }
- } else {
- // Result of deleting non-property, non-variable reference is true.
- // The subexpression may have side effects.
- VisitForEffect(expr->expression());
- context()->Plug(true);
- }
- break;
- }
-
- case Token::VOID: {
- Comment cmnt(masm_, "[ UnaryOperation (VOID)");
- VisitForEffect(expr->expression());
- context()->Plug(Heap::kUndefinedValueRootIndex);
- break;
- }
-
- case Token::NOT: {
- Comment cmnt(masm_, "[ UnaryOperation (NOT)");
- if (context()->IsEffect()) {
- // Unary NOT has no side effects so it's only necessary to visit the
- // subexpression. Match the optimizing compiler by not branching.
- VisitForEffect(expr->expression());
- } else if (context()->IsTest()) {
- const TestContext* test = TestContext::cast(context());
- // The labels are swapped for the recursive call.
- VisitForControl(expr->expression(),
- test->false_label(),
- test->true_label(),
- test->fall_through());
- context()->Plug(test->true_label(), test->false_label());
- } else {
- // We handle value contexts explicitly rather than simply visiting
- // for control and plugging the control flow into the context,
- // because we need to prepare a pair of extra administrative AST ids
- // for the optimizing compiler.
- ASSERT(context()->IsAccumulatorValue() || context()->IsStackValue());
- Label materialize_true, materialize_false, done;
- VisitForControl(expr->expression(),
- &materialize_false,
- &materialize_true,
- &materialize_true);
- __ bind(&materialize_true);
- PrepareForBailoutForId(expr->MaterializeTrueId(), NO_REGISTERS);
- if (context()->IsAccumulatorValue()) {
- __ LoadRoot(rax, Heap::kTrueValueRootIndex);
- } else {
- __ PushRoot(Heap::kTrueValueRootIndex);
- }
- __ jmp(&done, Label::kNear);
- __ bind(&materialize_false);
- PrepareForBailoutForId(expr->MaterializeFalseId(), NO_REGISTERS);
- if (context()->IsAccumulatorValue()) {
- __ LoadRoot(rax, Heap::kFalseValueRootIndex);
- } else {
- __ PushRoot(Heap::kFalseValueRootIndex);
- }
- __ bind(&done);
- }
- break;
- }
-
- case Token::TYPEOF: {
- Comment cmnt(masm_, "[ UnaryOperation (TYPEOF)");
- { StackValueContext context(this);
- VisitForTypeofValue(expr->expression());
- }
- __ CallRuntime(Runtime::kTypeof, 1);
- context()->Plug(rax);
- break;
- }
-
- case Token::ADD: {
- Comment cmt(masm_, "[ UnaryOperation (ADD)");
- VisitForAccumulatorValue(expr->expression());
- Label no_conversion;
- __ JumpIfSmi(result_register(), &no_conversion);
- ToNumberStub convert_stub;
- __ CallStub(&convert_stub);
- __ bind(&no_conversion);
- context()->Plug(result_register());
- break;
- }
-
- case Token::SUB:
- EmitUnaryOperation(expr, "[ UnaryOperation (SUB)");
- break;
-
- case Token::BIT_NOT:
- EmitUnaryOperation(expr, "[ UnaryOperation (BIT_NOT)");
- break;
-
- default:
- UNREACHABLE();
- }
-}
-
-
-void FullCodeGenerator::EmitUnaryOperation(UnaryOperation* expr,
- const char* comment) {
- // TODO(svenpanne): Allowing format strings in Comment would be nice here...
- Comment cmt(masm_, comment);
- bool can_overwrite = expr->expression()->ResultOverwriteAllowed();
- UnaryOverwriteMode overwrite =
- can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
- UnaryOpStub stub(expr->op(), overwrite);
- // UnaryOpStub expects the argument to be in the
- // accumulator register rax.
- VisitForAccumulatorValue(expr->expression());
- SetSourcePosition(expr->position());
- CallIC(stub.GetCode(isolate()), RelocInfo::CODE_TARGET,
- expr->UnaryOperationFeedbackId());
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
- Comment cmnt(masm_, "[ CountOperation");
- SetSourcePosition(expr->position());
-
- // Invalid left-hand-sides are rewritten to have a 'throw
- // ReferenceError' as the left-hand side.
- if (!expr->expression()->IsValidLeftHandSide()) {
- VisitForEffect(expr->expression());
- return;
- }
-
- // Expression can only be a property, a global or a (parameter or local)
- // slot.
- enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
- LhsKind assign_type = VARIABLE;
- Property* prop = expr->expression()->AsProperty();
- // In case of a property we use the uninitialized expression context
- // of the key to detect a named property.
- if (prop != NULL) {
- assign_type =
- (prop->key()->IsPropertyName()) ? NAMED_PROPERTY : KEYED_PROPERTY;
- }
-
- // Evaluate expression and get value.
- if (assign_type == VARIABLE) {
- ASSERT(expr->expression()->AsVariableProxy()->var() != NULL);
- AccumulatorValueContext context(this);
- EmitVariableLoad(expr->expression()->AsVariableProxy());
- } else {
- // Reserve space for result of postfix operation.
- if (expr->is_postfix() && !context()->IsEffect()) {
- __ Push(Smi::FromInt(0));
- }
- if (assign_type == NAMED_PROPERTY) {
- VisitForAccumulatorValue(prop->obj());
- __ push(rax); // Copy of receiver, needed for later store.
- EmitNamedPropertyLoad(prop);
- } else {
- VisitForStackValue(prop->obj());
- VisitForAccumulatorValue(prop->key());
- __ movq(rdx, Operand(rsp, 0)); // Leave receiver on stack
- __ push(rax); // Copy of key, needed for later store.
- EmitKeyedPropertyLoad(prop);
- }
- }
-
- // We need a second deoptimization point after loading the value
- // in case evaluating the property load my have a side effect.
- if (assign_type == VARIABLE) {
- PrepareForBailout(expr->expression(), TOS_REG);
- } else {
- PrepareForBailoutForId(prop->LoadId(), TOS_REG);
- }
-
- // Call ToNumber only if operand is not a smi.
- Label no_conversion;
- __ JumpIfSmi(rax, &no_conversion, Label::kNear);
- ToNumberStub convert_stub;
- __ CallStub(&convert_stub);
- __ bind(&no_conversion);
-
- // Save result for postfix expressions.
- if (expr->is_postfix()) {
- if (!context()->IsEffect()) {
- // Save the result on the stack. If we have a named or keyed property
- // we store the result under the receiver that is currently on top
- // of the stack.
- switch (assign_type) {
- case VARIABLE:
- __ push(rax);
- break;
- case NAMED_PROPERTY:
- __ movq(Operand(rsp, kPointerSize), rax);
- break;
- case KEYED_PROPERTY:
- __ movq(Operand(rsp, 2 * kPointerSize), rax);
- break;
- }
- }
- }
-
- // Inline smi case if we are in a loop.
- Label done, stub_call;
- JumpPatchSite patch_site(masm_);
-
- if (ShouldInlineSmiCase(expr->op())) {
- if (expr->op() == Token::INC) {
- __ SmiAddConstant(rax, rax, Smi::FromInt(1));
- } else {
- __ SmiSubConstant(rax, rax, Smi::FromInt(1));
- }
- __ j(overflow, &stub_call, Label::kNear);
- // We could eliminate this smi check if we split the code at
- // the first smi check before calling ToNumber.
- patch_site.EmitJumpIfSmi(rax, &done, Label::kNear);
-
- __ bind(&stub_call);
- // Call stub. Undo operation first.
- if (expr->op() == Token::INC) {
- __ SmiSubConstant(rax, rax, Smi::FromInt(1));
- } else {
- __ SmiAddConstant(rax, rax, Smi::FromInt(1));
- }
- }
-
- // Record position before stub call.
- SetSourcePosition(expr->position());
-
- // Call stub for +1/-1.
- __ movq(rdx, rax);
- __ Move(rax, Smi::FromInt(1));
- BinaryOpStub stub(expr->binary_op(), NO_OVERWRITE);
- CallIC(stub.GetCode(isolate()),
- RelocInfo::CODE_TARGET,
- expr->CountBinOpFeedbackId());
- patch_site.EmitPatchInfo();
- __ bind(&done);
-
- // Store the value returned in rax.
- switch (assign_type) {
- case VARIABLE:
- if (expr->is_postfix()) {
- // Perform the assignment as if via '='.
- { EffectContext context(this);
- EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
- Token::ASSIGN);
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
- context.Plug(rax);
- }
- // For all contexts except kEffect: We have the result on
- // top of the stack.
- if (!context()->IsEffect()) {
- context()->PlugTOS();
- }
- } else {
- // Perform the assignment as if via '='.
- EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
- Token::ASSIGN);
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
- context()->Plug(rax);
- }
- break;
- case NAMED_PROPERTY: {
- __ Move(rcx, prop->key()->AsLiteral()->handle());
- __ pop(rdx);
- Handle<Code> ic = is_classic_mode()
- ? isolate()->builtins()->StoreIC_Initialize()
- : isolate()->builtins()->StoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, expr->CountStoreFeedbackId());
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
- if (expr->is_postfix()) {
- if (!context()->IsEffect()) {
- context()->PlugTOS();
- }
- } else {
- context()->Plug(rax);
- }
- break;
- }
- case KEYED_PROPERTY: {
- __ pop(rcx);
- __ pop(rdx);
- Handle<Code> ic = is_classic_mode()
- ? isolate()->builtins()->KeyedStoreIC_Initialize()
- : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
- CallIC(ic, RelocInfo::CODE_TARGET, expr->CountStoreFeedbackId());
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
- if (expr->is_postfix()) {
- if (!context()->IsEffect()) {
- context()->PlugTOS();
- }
- } else {
- context()->Plug(rax);
- }
- break;
- }
- }
-}
-
-
-void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
- VariableProxy* proxy = expr->AsVariableProxy();
- ASSERT(!context()->IsEffect());
- ASSERT(!context()->IsTest());
-
- if (proxy != NULL && proxy->var()->IsUnallocated()) {
- Comment cmnt(masm_, "Global variable");
- __ Move(rcx, proxy->name());
- __ movq(rax, proxy->var()->is_qml_global()
- ? QmlGlobalObjectOperand()
- : GlobalObjectOperand());
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- // Use a regular load, not a contextual load, to avoid a reference
- // error.
- CallIC(ic);
- PrepareForBailout(expr, TOS_REG);
- context()->Plug(rax);
- } else if (proxy != NULL && proxy->var()->IsLookupSlot()) {
- Label done, slow;
-
- // Generate code for loading from variables potentially shadowed
- // by eval-introduced variables.
- EmitDynamicLookupFastCase(proxy->var(), INSIDE_TYPEOF, &slow, &done);
-
- __ bind(&slow);
- __ push(rsi);
- __ Push(proxy->name());
- __ CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
- PrepareForBailout(expr, TOS_REG);
- __ bind(&done);
-
- context()->Plug(rax);
- } else {
- // This expression cannot throw a reference error at the top level.
- VisitInDuplicateContext(expr);
- }
-}
-
-
-void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
- Expression* sub_expr,
- Handle<String> check) {
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- { AccumulatorValueContext context(this);
- VisitForTypeofValue(sub_expr);
- }
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
-
- if (check->Equals(isolate()->heap()->number_string())) {
- __ JumpIfSmi(rax, if_true);
- __ movq(rax, FieldOperand(rax, HeapObject::kMapOffset));
- __ CompareRoot(rax, Heap::kHeapNumberMapRootIndex);
- Split(equal, if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->string_string())) {
- __ JumpIfSmi(rax, if_false);
- // Check for undetectable objects => false.
- __ CmpObjectType(rax, FIRST_NONSTRING_TYPE, rdx);
- __ j(above_equal, if_false);
- __ testb(FieldOperand(rdx, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsUndetectable));
- Split(zero, if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->boolean_string())) {
- __ CompareRoot(rax, Heap::kTrueValueRootIndex);
- __ j(equal, if_true);
- __ CompareRoot(rax, Heap::kFalseValueRootIndex);
- Split(equal, if_true, if_false, fall_through);
- } else if (FLAG_harmony_typeof &&
- check->Equals(isolate()->heap()->null_string())) {
- __ CompareRoot(rax, Heap::kNullValueRootIndex);
- Split(equal, if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->undefined_string())) {
- __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
- __ j(equal, if_true);
- __ JumpIfSmi(rax, if_false);
- // Check for undetectable objects => true.
- __ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset));
- __ testb(FieldOperand(rdx, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsUndetectable));
- Split(not_zero, if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->function_string())) {
- __ JumpIfSmi(rax, if_false);
- STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
- __ CmpObjectType(rax, JS_FUNCTION_TYPE, rdx);
- __ j(equal, if_true);
- __ CmpInstanceType(rdx, JS_FUNCTION_PROXY_TYPE);
- Split(equal, if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->object_string())) {
- __ JumpIfSmi(rax, if_false);
- if (!FLAG_harmony_typeof) {
- __ CompareRoot(rax, Heap::kNullValueRootIndex);
- __ j(equal, if_true);
- }
- if (FLAG_harmony_symbols) {
- __ CmpObjectType(rax, SYMBOL_TYPE, rdx);
- __ j(equal, if_true);
- }
- __ CmpObjectType(rax, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, rdx);
- __ j(below, if_false);
- __ CmpInstanceType(rdx, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
- __ j(above, if_false);
- // Check for undetectable objects => false.
- __ testb(FieldOperand(rdx, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsUndetectable));
- Split(zero, if_true, if_false, fall_through);
- } else {
- if (if_false != fall_through) __ jmp(if_false);
- }
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
- Comment cmnt(masm_, "[ CompareOperation");
- SetSourcePosition(expr->position());
-
- // First we try a fast inlined version of the compare when one of
- // the operands is a literal.
- if (TryLiteralCompare(expr)) return;
-
- // Always perform the comparison for its control flow. Pack the result
- // into the expression's context after the comparison is performed.
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- Token::Value op = expr->op();
- VisitForStackValue(expr->left());
- switch (op) {
- case Token::IN:
- VisitForStackValue(expr->right());
- __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION);
- PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
- __ CompareRoot(rax, Heap::kTrueValueRootIndex);
- Split(equal, if_true, if_false, fall_through);
- break;
-
- case Token::INSTANCEOF: {
- VisitForStackValue(expr->right());
- InstanceofStub stub(InstanceofStub::kNoFlags);
- __ CallStub(&stub);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- __ testq(rax, rax);
- // The stub returns 0 for true.
- Split(zero, if_true, if_false, fall_through);
- break;
- }
-
- default: {
- VisitForAccumulatorValue(expr->right());
- Condition cc = CompareIC::ComputeCondition(op);
- __ pop(rdx);
-
- bool inline_smi_code = ShouldInlineSmiCase(op);
- JumpPatchSite patch_site(masm_);
- if (inline_smi_code) {
- Label slow_case;
- __ movq(rcx, rdx);
- __ or_(rcx, rax);
- patch_site.EmitJumpIfNotSmi(rcx, &slow_case, Label::kNear);
- __ cmpq(rdx, rax);
- Split(cc, if_true, if_false, NULL);
- __ bind(&slow_case);
- }
-
- // Record position and call the compare IC.
- SetSourcePosition(expr->position());
- Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
- CallIC(ic, RelocInfo::CODE_TARGET, expr->CompareOperationFeedbackId());
- patch_site.EmitPatchInfo();
-
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- __ testq(rax, rax);
- Split(cc, if_true, if_false, fall_through);
- }
- }
-
- // Convert the result of the comparison into one expected for this
- // expression's context.
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
- Expression* sub_expr,
- NilValue nil) {
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- VisitForAccumulatorValue(sub_expr);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Heap::RootListIndex nil_value = nil == kNullValue ?
- Heap::kNullValueRootIndex :
- Heap::kUndefinedValueRootIndex;
- __ CompareRoot(rax, nil_value);
- if (expr->op() == Token::EQ_STRICT) {
- Split(equal, if_true, if_false, fall_through);
- } else {
- Heap::RootListIndex other_nil_value = nil == kNullValue ?
- Heap::kUndefinedValueRootIndex :
- Heap::kNullValueRootIndex;
- __ j(equal, if_true);
- __ CompareRoot(rax, other_nil_value);
- __ j(equal, if_true);
- __ JumpIfSmi(rax, if_false);
- // It can be an undetectable object.
- __ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset));
- __ testb(FieldOperand(rdx, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsUndetectable));
- Split(not_zero, if_true, if_false, fall_through);
- }
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::VisitThisFunction(ThisFunction* expr) {
- __ movq(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- context()->Plug(rax);
-}
-
-
-Register FullCodeGenerator::result_register() {
- return rax;
-}
-
-
-Register FullCodeGenerator::context_register() {
- return rsi;
-}
-
-
-void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
- ASSERT(IsAligned(frame_offset, kPointerSize));
- __ movq(Operand(rbp, frame_offset), value);
-}
-
-
-void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
- __ movq(dst, ContextOperand(rsi, context_index));
-}
-
-
-void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
- Scope* declaration_scope = scope()->DeclarationScope();
- if (declaration_scope->is_global_scope() ||
- declaration_scope->is_module_scope()) {
- // Contexts nested in the native context have a canonical empty function
- // as their closure, not the anonymous closure containing the global
- // code. Pass a smi sentinel and let the runtime look up the empty
- // function.
- __ Push(Smi::FromInt(0));
- } else if (declaration_scope->is_eval_scope()) {
- // Contexts created by a call to eval have the same closure as the
- // context calling eval, not the anonymous closure containing the eval
- // code. Fetch it from the context.
- __ push(ContextOperand(rsi, Context::CLOSURE_INDEX));
- } else {
- ASSERT(declaration_scope->is_function_scope());
- __ push(Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- }
-}
-
-
-// ----------------------------------------------------------------------------
-// Non-local control flow support.
-
-
-void FullCodeGenerator::EnterFinallyBlock() {
- ASSERT(!result_register().is(rdx));
- ASSERT(!result_register().is(rcx));
- // Cook return address on top of stack (smi encoded Code* delta)
- __ pop(rdx);
- __ Move(rcx, masm_->CodeObject());
- __ subq(rdx, rcx);
- __ Integer32ToSmi(rdx, rdx);
- __ push(rdx);
-
- // Store result register while executing finally block.
- __ push(result_register());
-
- // Store pending message while executing finally block.
- ExternalReference pending_message_obj =
- ExternalReference::address_of_pending_message_obj(isolate());
- __ Load(rdx, pending_message_obj);
- __ push(rdx);
-
- ExternalReference has_pending_message =
- ExternalReference::address_of_has_pending_message(isolate());
- __ Load(rdx, has_pending_message);
- __ Integer32ToSmi(rdx, rdx);
- __ push(rdx);
-
- ExternalReference pending_message_script =
- ExternalReference::address_of_pending_message_script(isolate());
- __ Load(rdx, pending_message_script);
- __ push(rdx);
-}
-
-
-void FullCodeGenerator::ExitFinallyBlock() {
- ASSERT(!result_register().is(rdx));
- ASSERT(!result_register().is(rcx));
- // Restore pending message from stack.
- __ pop(rdx);
- ExternalReference pending_message_script =
- ExternalReference::address_of_pending_message_script(isolate());
- __ Store(pending_message_script, rdx);
-
- __ pop(rdx);
- __ SmiToInteger32(rdx, rdx);
- ExternalReference has_pending_message =
- ExternalReference::address_of_has_pending_message(isolate());
- __ Store(has_pending_message, rdx);
-
- __ pop(rdx);
- ExternalReference pending_message_obj =
- ExternalReference::address_of_pending_message_obj(isolate());
- __ Store(pending_message_obj, rdx);
-
- // Restore result register from stack.
- __ pop(result_register());
-
- // Uncook return address.
- __ pop(rdx);
- __ SmiToInteger32(rdx, rdx);
- __ Move(rcx, masm_->CodeObject());
- __ addq(rdx, rcx);
- __ jmp(rdx);
-}
-
-
-#undef __
-
-#define __ ACCESS_MASM(masm())
-
-FullCodeGenerator::NestedStatement* FullCodeGenerator::TryFinally::Exit(
- int* stack_depth,
- int* context_length) {
- // The macros used here must preserve the result register.
-
- // Because the handler block contains the context of the finally
- // code, we can restore it directly from there for the finally code
- // rather than iteratively unwinding contexts via their previous
- // links.
- __ Drop(*stack_depth); // Down to the handler block.
- if (*context_length > 0) {
- // Restore the context to its dedicated register and the stack.
- __ movq(rsi, Operand(rsp, StackHandlerConstants::kContextOffset));
- __ movq(Operand(rbp, StandardFrameConstants::kContextOffset), rsi);
- }
- __ PopTryHandler();
- __ call(finally_entry_);
-
- *stack_depth = 0;
- *context_length = 0;
- return previous_;
-}
-
-
-#undef __
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_X64
diff --git a/src/3rdparty/v8/src/x64/ic-x64.cc b/src/3rdparty/v8/src/x64/ic-x64.cc
deleted file mode 100644
index 15423e4..0000000
--- a/src/3rdparty/v8/src/x64/ic-x64.cc
+++ /dev/null
@@ -1,1690 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_X64)
-
-#include "codegen.h"
-#include "ic-inl.h"
-#include "runtime.h"
-#include "stub-cache.h"
-
-namespace v8 {
-namespace internal {
-
-// ----------------------------------------------------------------------------
-// Static IC stub generators.
-//
-
-#define __ ACCESS_MASM(masm)
-
-
-static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm,
- Register type,
- Label* global_object) {
- // Register usage:
- // type: holds the receiver instance type on entry.
- __ cmpb(type, Immediate(JS_GLOBAL_OBJECT_TYPE));
- __ j(equal, global_object);
- __ cmpb(type, Immediate(JS_BUILTINS_OBJECT_TYPE));
- __ j(equal, global_object);
- __ cmpb(type, Immediate(JS_GLOBAL_PROXY_TYPE));
- __ j(equal, global_object);
-}
-
-
-// Generated code falls through if the receiver is a regular non-global
-// JS object with slow properties and no interceptors.
-static void GenerateStringDictionaryReceiverCheck(MacroAssembler* masm,
- Register receiver,
- Register r0,
- Register r1,
- Label* miss) {
- // Register usage:
- // receiver: holds the receiver on entry and is unchanged.
- // r0: used to hold receiver instance type.
- // Holds the property dictionary on fall through.
- // r1: used to hold receivers map.
-
- __ JumpIfSmi(receiver, miss);
-
- // Check that the receiver is a valid JS object.
- __ movq(r1, FieldOperand(receiver, HeapObject::kMapOffset));
- __ movb(r0, FieldOperand(r1, Map::kInstanceTypeOffset));
- __ cmpb(r0, Immediate(FIRST_SPEC_OBJECT_TYPE));
- __ j(below, miss);
-
- // If this assert fails, we have to check upper bound too.
- STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
-
- GenerateGlobalInstanceTypeCheck(masm, r0, miss);
-
- // Check for non-global object that requires access check.
- __ testb(FieldOperand(r1, Map::kBitFieldOffset),
- Immediate((1 << Map::kIsAccessCheckNeeded) |
- (1 << Map::kHasNamedInterceptor)));
- __ j(not_zero, miss);
-
- __ movq(r0, FieldOperand(receiver, JSObject::kPropertiesOffset));
- __ CompareRoot(FieldOperand(r0, HeapObject::kMapOffset),
- Heap::kHashTableMapRootIndex);
- __ j(not_equal, miss);
-}
-
-
-
-// Helper function used to load a property from a dictionary backing storage.
-// This function may return false negatives, so miss_label
-// must always call a backup property load that is complete.
-// This function is safe to call if name is not an internalized string,
-// and will jump to the miss_label in that case.
-// The generated code assumes that the receiver has slow properties,
-// is not a global object and does not have interceptors.
-static void GenerateDictionaryLoad(MacroAssembler* masm,
- Label* miss_label,
- Register elements,
- Register name,
- Register r0,
- Register r1,
- Register result) {
- // Register use:
- //
- // elements - holds the property dictionary on entry and is unchanged.
- //
- // name - holds the name of the property on entry and is unchanged.
- //
- // r0 - used to hold the capacity of the property dictionary.
- //
- // r1 - used to hold the index into the property dictionary.
- //
- // result - holds the result on exit if the load succeeded.
-
- Label done;
-
- // Probe the dictionary.
- StringDictionaryLookupStub::GeneratePositiveLookup(masm,
- miss_label,
- &done,
- elements,
- name,
- r0,
- r1);
-
- // If probing finds an entry in the dictionary, r1 contains the
- // index into the dictionary. Check that the value is a normal
- // property.
- __ bind(&done);
- const int kElementsStartOffset =
- StringDictionary::kHeaderSize +
- StringDictionary::kElementsStartIndex * kPointerSize;
- const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
- __ Test(Operand(elements, r1, times_pointer_size,
- kDetailsOffset - kHeapObjectTag),
- Smi::FromInt(PropertyDetails::TypeField::kMask));
- __ j(not_zero, miss_label);
-
- // Get the value at the masked, scaled index.
- const int kValueOffset = kElementsStartOffset + kPointerSize;
- __ movq(result,
- Operand(elements, r1, times_pointer_size,
- kValueOffset - kHeapObjectTag));
-}
-
-
-// Helper function used to store a property to a dictionary backing
-// storage. This function may fail to store a property even though it
-// is in the dictionary, so code at miss_label must always call a
-// backup property store that is complete. This function is safe to
-// call if name is not an internalized string, and will jump to the miss_label
-// in that case. The generated code assumes that the receiver has slow
-// properties, is not a global object and does not have interceptors.
-static void GenerateDictionaryStore(MacroAssembler* masm,
- Label* miss_label,
- Register elements,
- Register name,
- Register value,
- Register scratch0,
- Register scratch1) {
- // Register use:
- //
- // elements - holds the property dictionary on entry and is clobbered.
- //
- // name - holds the name of the property on entry and is unchanged.
- //
- // value - holds the value to store and is unchanged.
- //
- // scratch0 - used during the positive dictionary lookup and is clobbered.
- //
- // scratch1 - used for index into the property dictionary and is clobbered.
- Label done;
-
- // Probe the dictionary.
- StringDictionaryLookupStub::GeneratePositiveLookup(masm,
- miss_label,
- &done,
- elements,
- name,
- scratch0,
- scratch1);
-
- // If probing finds an entry in the dictionary, scratch0 contains the
- // index into the dictionary. Check that the value is a normal
- // property that is not read only.
- __ bind(&done);
- const int kElementsStartOffset =
- StringDictionary::kHeaderSize +
- StringDictionary::kElementsStartIndex * kPointerSize;
- const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
- const int kTypeAndReadOnlyMask =
- (PropertyDetails::TypeField::kMask |
- PropertyDetails::AttributesField::encode(READ_ONLY)) << kSmiTagSize;
- __ Test(Operand(elements,
- scratch1,
- times_pointer_size,
- kDetailsOffset - kHeapObjectTag),
- Smi::FromInt(kTypeAndReadOnlyMask));
- __ j(not_zero, miss_label);
-
- // Store the value at the masked, scaled index.
- const int kValueOffset = kElementsStartOffset + kPointerSize;
- __ lea(scratch1, Operand(elements,
- scratch1,
- times_pointer_size,
- kValueOffset - kHeapObjectTag));
- __ movq(Operand(scratch1, 0), value);
-
- // Update write barrier. Make sure not to clobber the value.
- __ movq(scratch0, value);
- __ RecordWrite(elements, scratch1, scratch0, kDontSaveFPRegs);
-}
-
-
-// Checks the receiver for special cases (value type, slow case bits).
-// Falls through for regular JS object.
-static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
- Register receiver,
- Register map,
- int interceptor_bit,
- Label* slow) {
- // Register use:
- // receiver - holds the receiver and is unchanged.
- // Scratch registers:
- // map - used to hold the map of the receiver.
-
- // Check that the object isn't a smi.
- __ JumpIfSmi(receiver, slow);
-
- // Check that the object is some kind of JS object EXCEPT JS Value type.
- // In the case that the object is a value-wrapper object,
- // we enter the runtime system to make sure that indexing
- // into string objects work as intended.
- ASSERT(JS_OBJECT_TYPE > JS_VALUE_TYPE);
- __ CmpObjectType(receiver, JS_OBJECT_TYPE, map);
- __ j(below, slow);
-
- // Check bit field.
- __ testb(FieldOperand(map, Map::kBitFieldOffset),
- Immediate((1 << Map::kIsAccessCheckNeeded) |
- (1 << interceptor_bit)));
- __ j(not_zero, slow);
-}
-
-
-// Loads an indexed element from a fast case array.
-// If not_fast_array is NULL, doesn't perform the elements map check.
-static void GenerateFastArrayLoad(MacroAssembler* masm,
- Register receiver,
- Register key,
- Register elements,
- Register scratch,
- Register result,
- Label* not_fast_array,
- Label* out_of_range) {
- // Register use:
- //
- // receiver - holds the receiver on entry.
- // Unchanged unless 'result' is the same register.
- //
- // key - holds the smi key on entry.
- // Unchanged unless 'result' is the same register.
- //
- // elements - holds the elements of the receiver on exit.
- //
- // result - holds the result on exit if the load succeeded.
- // Allowed to be the the same as 'receiver' or 'key'.
- // Unchanged on bailout so 'receiver' and 'key' can be safely
- // used by further computation.
- //
- // Scratch registers:
- //
- // scratch - used to hold elements of the receiver and the loaded value.
-
- __ movq(elements, FieldOperand(receiver, JSObject::kElementsOffset));
- if (not_fast_array != NULL) {
- // Check that the object is in fast mode and writable.
- __ CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
- Heap::kFixedArrayMapRootIndex);
- __ j(not_equal, not_fast_array);
- } else {
- __ AssertFastElements(elements);
- }
- // Check that the key (index) is within bounds.
- __ SmiCompare(key, FieldOperand(elements, FixedArray::kLengthOffset));
- // Unsigned comparison rejects negative indices.
- __ j(above_equal, out_of_range);
- // Fast case: Do the load.
- SmiIndex index = masm->SmiToIndex(scratch, key, kPointerSizeLog2);
- __ movq(scratch, FieldOperand(elements,
- index.reg,
- index.scale,
- FixedArray::kHeaderSize));
- __ CompareRoot(scratch, Heap::kTheHoleValueRootIndex);
- // In case the loaded value is the_hole we have to consult GetProperty
- // to ensure the prototype chain is searched.
- __ j(equal, out_of_range);
- if (!result.is(scratch)) {
- __ movq(result, scratch);
- }
-}
-
-
-// Checks whether a key is an array index string or an internalized string.
-// Falls through if the key is an internalized string.
-static void GenerateKeyStringCheck(MacroAssembler* masm,
- Register key,
- Register map,
- Register hash,
- Label* index_string,
- Label* not_internalized) {
- // Register use:
- // key - holds the key and is unchanged. Assumed to be non-smi.
- // Scratch registers:
- // map - used to hold the map of the key.
- // hash - used to hold the hash of the key.
- __ CmpObjectType(key, FIRST_NONSTRING_TYPE, map);
- __ j(above_equal, not_internalized);
- // Is the string an array index, with cached numeric value?
- __ movl(hash, FieldOperand(key, String::kHashFieldOffset));
- __ testl(hash, Immediate(String::kContainsCachedArrayIndexMask));
- __ j(zero, index_string); // The value in hash is used at jump target.
-
- // Is the string internalized?
- STATIC_ASSERT(kInternalizedTag != 0);
- __ testb(FieldOperand(map, Map::kInstanceTypeOffset),
- Immediate(kIsInternalizedMask));
- __ j(zero, not_internalized);
-}
-
-
-
-void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rax : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- Label slow, check_string, index_smi, index_string, property_array_property;
- Label probe_dictionary, check_number_dictionary;
-
- // Check that the key is a smi.
- __ JumpIfNotSmi(rax, &check_string);
- __ bind(&index_smi);
- // Now the key is known to be a smi. This place is also jumped to from below
- // where a numeric string is converted to a smi.
-
- GenerateKeyedLoadReceiverCheck(
- masm, rdx, rcx, Map::kHasIndexedInterceptor, &slow);
-
- // Check the receiver's map to see if it has fast elements.
- __ CheckFastElements(rcx, &check_number_dictionary);
-
- GenerateFastArrayLoad(masm,
- rdx,
- rax,
- rcx,
- rbx,
- rax,
- NULL,
- &slow);
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->keyed_load_generic_smi(), 1);
- __ ret(0);
-
- __ bind(&check_number_dictionary);
- __ SmiToInteger32(rbx, rax);
- __ movq(rcx, FieldOperand(rdx, JSObject::kElementsOffset));
-
- // Check whether the elements is a number dictionary.
- // rdx: receiver
- // rax: key
- // rbx: key as untagged int32
- // rcx: elements
- __ CompareRoot(FieldOperand(rcx, HeapObject::kMapOffset),
- Heap::kHashTableMapRootIndex);
- __ j(not_equal, &slow);
- __ LoadFromNumberDictionary(&slow, rcx, rax, rbx, r9, rdi, rax);
- __ ret(0);
-
- __ bind(&slow);
- // Slow case: Jump to runtime.
- // rdx: receiver
- // rax: key
- __ IncrementCounter(counters->keyed_load_generic_slow(), 1);
- GenerateRuntimeGetProperty(masm);
-
- __ bind(&check_string);
- GenerateKeyStringCheck(masm, rax, rcx, rbx, &index_string, &slow);
-
- GenerateKeyedLoadReceiverCheck(
- masm, rdx, rcx, Map::kHasNamedInterceptor, &slow);
-
- // If the receiver is a fast-case object, check the keyed lookup
- // cache. Otherwise probe the dictionary leaving result in rcx.
- __ movq(rbx, FieldOperand(rdx, JSObject::kPropertiesOffset));
- __ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
- Heap::kHashTableMapRootIndex);
- __ j(equal, &probe_dictionary);
-
- // Load the map of the receiver, compute the keyed lookup cache hash
- // based on 32 bits of the map pointer and the string hash.
- __ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
- __ movl(rcx, rbx);
- __ shr(rcx, Immediate(KeyedLookupCache::kMapHashShift));
- __ movl(rdi, FieldOperand(rax, String::kHashFieldOffset));
- __ shr(rdi, Immediate(String::kHashShift));
- __ xor_(rcx, rdi);
- int mask = (KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask);
- __ and_(rcx, Immediate(mask));
-
- // Load the key (consisting of map and internalized string) from the cache and
- // check for match.
- Label load_in_object_property;
- static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket;
- Label hit_on_nth_entry[kEntriesPerBucket];
- ExternalReference cache_keys
- = ExternalReference::keyed_lookup_cache_keys(masm->isolate());
-
- for (int i = 0; i < kEntriesPerBucket - 1; i++) {
- Label try_next_entry;
- __ movq(rdi, rcx);
- __ shl(rdi, Immediate(kPointerSizeLog2 + 1));
- __ LoadAddress(kScratchRegister, cache_keys);
- int off = kPointerSize * i * 2;
- __ cmpq(rbx, Operand(kScratchRegister, rdi, times_1, off));
- __ j(not_equal, &try_next_entry);
- __ cmpq(rax, Operand(kScratchRegister, rdi, times_1, off + kPointerSize));
- __ j(equal, &hit_on_nth_entry[i]);
- __ bind(&try_next_entry);
- }
-
- int off = kPointerSize * (kEntriesPerBucket - 1) * 2;
- __ cmpq(rbx, Operand(kScratchRegister, rdi, times_1, off));
- __ j(not_equal, &slow);
- __ cmpq(rax, Operand(kScratchRegister, rdi, times_1, off + kPointerSize));
- __ j(not_equal, &slow);
-
- // Get field offset, which is a 32-bit integer.
- ExternalReference cache_field_offsets
- = ExternalReference::keyed_lookup_cache_field_offsets(masm->isolate());
-
- // Hit on nth entry.
- for (int i = kEntriesPerBucket - 1; i >= 0; i--) {
- __ bind(&hit_on_nth_entry[i]);
- if (i != 0) {
- __ addl(rcx, Immediate(i));
- }
- __ LoadAddress(kScratchRegister, cache_field_offsets);
- __ movl(rdi, Operand(kScratchRegister, rcx, times_4, 0));
- __ movzxbq(rcx, FieldOperand(rbx, Map::kInObjectPropertiesOffset));
- __ subq(rdi, rcx);
- __ j(above_equal, &property_array_property);
- if (i != 0) {
- __ jmp(&load_in_object_property);
- }
- }
-
- // Load in-object property.
- __ bind(&load_in_object_property);
- __ movzxbq(rcx, FieldOperand(rbx, Map::kInstanceSizeOffset));
- __ addq(rcx, rdi);
- __ movq(rax, FieldOperand(rdx, rcx, times_pointer_size, 0));
- __ IncrementCounter(counters->keyed_load_generic_lookup_cache(), 1);
- __ ret(0);
-
- // Load property array property.
- __ bind(&property_array_property);
- __ movq(rax, FieldOperand(rdx, JSObject::kPropertiesOffset));
- __ movq(rax, FieldOperand(rax, rdi, times_pointer_size,
- FixedArray::kHeaderSize));
- __ IncrementCounter(counters->keyed_load_generic_lookup_cache(), 1);
- __ ret(0);
-
- // Do a quick inline probe of the receiver's dictionary, if it
- // exists.
- __ bind(&probe_dictionary);
- // rdx: receiver
- // rax: key
- // rbx: elements
-
- __ movq(rcx, FieldOperand(rdx, JSObject::kMapOffset));
- __ movb(rcx, FieldOperand(rcx, Map::kInstanceTypeOffset));
- GenerateGlobalInstanceTypeCheck(masm, rcx, &slow);
-
- GenerateDictionaryLoad(masm, &slow, rbx, rax, rcx, rdi, rax);
- __ IncrementCounter(counters->keyed_load_generic_symbol(), 1);
- __ ret(0);
-
- __ bind(&index_string);
- __ IndexFromHash(rbx, rax);
- __ jmp(&index_smi);
-}
-
-
-void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rax : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- Label miss;
-
- Register receiver = rdx;
- Register index = rax;
- Register scratch = rcx;
- Register result = rax;
-
- StringCharAtGenerator char_at_generator(receiver,
- index,
- scratch,
- result,
- &miss, // When not a string.
- &miss, // When not a number.
- &miss, // When index out of range.
- STRING_INDEX_IS_ARRAY_INDEX);
- char_at_generator.GenerateFast(masm);
- __ ret(0);
-
- StubRuntimeCallHelper call_helper;
- char_at_generator.GenerateSlow(masm, call_helper);
-
- __ bind(&miss);
- GenerateMiss(masm, MISS);
-}
-
-
-void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rax : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- Label slow;
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(rdx, &slow);
-
- // Check that the key is an array index, that is Uint32.
- STATIC_ASSERT(kSmiValueSize <= 32);
- __ JumpUnlessNonNegativeSmi(rax, &slow);
-
- // Get the map of the receiver.
- __ movq(rcx, FieldOperand(rdx, HeapObject::kMapOffset));
-
- // Check that it has indexed interceptor and access checks
- // are not enabled for this object.
- __ movb(rcx, FieldOperand(rcx, Map::kBitFieldOffset));
- __ andb(rcx, Immediate(kSlowCaseBitFieldMask));
- __ cmpb(rcx, Immediate(1 << Map::kHasIndexedInterceptor));
- __ j(not_zero, &slow);
-
- // Everything is fine, call runtime.
- __ pop(rcx);
- __ push(rdx); // receiver
- __ push(rax); // key
- __ push(rcx); // return address
-
- // Perform tail call to the entry.
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(kKeyedLoadPropertyWithInterceptor),
- masm->isolate()),
- 2,
- 1);
-
- __ bind(&slow);
- GenerateMiss(masm, MISS);
-}
-
-
-static void KeyedStoreGenerateGenericHelper(
- MacroAssembler* masm,
- Label* fast_object,
- Label* fast_double,
- Label* slow,
- KeyedStoreCheckMap check_map,
- KeyedStoreIncrementLength increment_length) {
- Label transition_smi_elements;
- Label finish_object_store, non_double_value, transition_double_elements;
- Label fast_double_without_map_check;
- // Fast case: Do the store, could be either Object or double.
- __ bind(fast_object);
- // rax: value
- // rbx: receiver's elements array (a FixedArray)
- // rcx: index
- // rdx: receiver (a JSArray)
- // r9: map of receiver
- if (check_map == kCheckMap) {
- __ movq(rdi, FieldOperand(rbx, HeapObject::kMapOffset));
- __ CompareRoot(rdi, Heap::kFixedArrayMapRootIndex);
- __ j(not_equal, fast_double);
- }
- // Smi stores don't require further checks.
- Label non_smi_value;
- __ JumpIfNotSmi(rax, &non_smi_value);
- if (increment_length == kIncrementLength) {
- // Add 1 to receiver->length.
- __ leal(rdi, Operand(rcx, 1));
- __ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rdi);
- }
- // It's irrelevant whether array is smi-only or not when writing a smi.
- __ movq(FieldOperand(rbx, rcx, times_pointer_size, FixedArray::kHeaderSize),
- rax);
- __ ret(0);
-
- __ bind(&non_smi_value);
- // Writing a non-smi, check whether array allows non-smi elements.
- // r9: receiver's map
- __ CheckFastObjectElements(r9, &transition_smi_elements);
-
- __ bind(&finish_object_store);
- if (increment_length == kIncrementLength) {
- // Add 1 to receiver->length.
- __ leal(rdi, Operand(rcx, 1));
- __ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rdi);
- }
- __ movq(FieldOperand(rbx, rcx, times_pointer_size, FixedArray::kHeaderSize),
- rax);
- __ movq(rdx, rax); // Preserve the value which is returned.
- __ RecordWriteArray(
- rbx, rdx, rcx, kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
- __ ret(0);
-
- __ bind(fast_double);
- if (check_map == kCheckMap) {
- // Check for fast double array case. If this fails, call through to the
- // runtime.
- // rdi: elements array's map
- __ CompareRoot(rdi, Heap::kFixedDoubleArrayMapRootIndex);
- __ j(not_equal, slow);
- }
- __ bind(&fast_double_without_map_check);
- __ StoreNumberToDoubleElements(rax, rbx, rcx, xmm0,
- &transition_double_elements);
- if (increment_length == kIncrementLength) {
- // Add 1 to receiver->length.
- __ leal(rdi, Operand(rcx, 1));
- __ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rdi);
- }
- __ ret(0);
-
- __ bind(&transition_smi_elements);
- __ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
-
- // Transition the array appropriately depending on the value type.
- __ movq(r9, FieldOperand(rax, HeapObject::kMapOffset));
- __ CompareRoot(r9, Heap::kHeapNumberMapRootIndex);
- __ j(not_equal, &non_double_value);
-
- // Value is a double. Transition FAST_SMI_ELEMENTS ->
- // FAST_DOUBLE_ELEMENTS and complete the store.
- __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- FAST_DOUBLE_ELEMENTS,
- rbx,
- rdi,
- slow);
- AllocationSiteMode mode = AllocationSiteInfo::GetMode(FAST_SMI_ELEMENTS,
- FAST_DOUBLE_ELEMENTS);
- ElementsTransitionGenerator::GenerateSmiToDouble(masm, mode, slow);
- __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
- __ jmp(&fast_double_without_map_check);
-
- __ bind(&non_double_value);
- // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
- __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- FAST_ELEMENTS,
- rbx,
- rdi,
- slow);
- mode = AllocationSiteInfo::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
- ElementsTransitionGenerator::GenerateMapChangeElementsTransition(masm, mode,
- slow);
- __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
- __ jmp(&finish_object_store);
-
- __ bind(&transition_double_elements);
- // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
- // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
- // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
- __ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
- __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS,
- FAST_ELEMENTS,
- rbx,
- rdi,
- slow);
- mode = AllocationSiteInfo::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
- ElementsTransitionGenerator::GenerateDoubleToObject(masm, mode, slow);
- __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
- __ jmp(&finish_object_store);
-}
-
-
-void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
- StrictModeFlag strict_mode) {
- // ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- Label slow, slow_with_tagged_index, fast_object, fast_object_grow;
- Label fast_double, fast_double_grow;
- Label array, extra, check_if_double_array;
-
- // Check that the object isn't a smi.
- __ JumpIfSmi(rdx, &slow_with_tagged_index);
- // Get the map from the receiver.
- __ movq(r9, FieldOperand(rdx, HeapObject::kMapOffset));
- // Check that the receiver does not require access checks. We need
- // to do this because this generic stub does not perform map checks.
- __ testb(FieldOperand(r9, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsAccessCheckNeeded));
- __ j(not_zero, &slow_with_tagged_index);
- // Check that the key is a smi.
- __ JumpIfNotSmi(rcx, &slow_with_tagged_index);
- __ SmiToInteger32(rcx, rcx);
-
- __ CmpInstanceType(r9, JS_ARRAY_TYPE);
- __ j(equal, &array);
- // Check that the object is some kind of JSObject.
- __ CmpInstanceType(r9, FIRST_JS_OBJECT_TYPE);
- __ j(below, &slow);
-
- // Object case: Check key against length in the elements array.
- // rax: value
- // rdx: JSObject
- // rcx: index
- __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
- // Check array bounds.
- __ SmiCompareInteger32(FieldOperand(rbx, FixedArray::kLengthOffset), rcx);
- // rax: value
- // rbx: FixedArray
- // rcx: index
- __ j(above, &fast_object);
-
- // Slow case: call runtime.
- __ bind(&slow);
- __ Integer32ToSmi(rcx, rcx);
- __ bind(&slow_with_tagged_index);
- GenerateRuntimeSetProperty(masm, strict_mode);
- // Never returns to here.
-
- // Extra capacity case: Check if there is extra capacity to
- // perform the store and update the length. Used for adding one
- // element to the array by writing to array[array.length].
- __ bind(&extra);
- // rax: value
- // rdx: receiver (a JSArray)
- // rbx: receiver's elements array (a FixedArray)
- // rcx: index
- // flags: smicompare (rdx.length(), rbx)
- __ j(not_equal, &slow); // do not leave holes in the array
- __ SmiCompareInteger32(FieldOperand(rbx, FixedArray::kLengthOffset), rcx);
- __ j(below_equal, &slow);
- // Increment index to get new length.
- __ movq(rdi, FieldOperand(rbx, HeapObject::kMapOffset));
- __ CompareRoot(rdi, Heap::kFixedArrayMapRootIndex);
- __ j(not_equal, &check_if_double_array);
- __ jmp(&fast_object_grow);
-
- __ bind(&check_if_double_array);
- // rdi: elements array's map
- __ CompareRoot(rdi, Heap::kFixedDoubleArrayMapRootIndex);
- __ j(not_equal, &slow);
- __ jmp(&fast_double_grow);
-
- // Array case: Get the length and the elements array from the JS
- // array. Check that the array is in fast mode (and writable); if it
- // is the length is always a smi.
- __ bind(&array);
- // rax: value
- // rdx: receiver (a JSArray)
- // rcx: index
- __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
-
- // Check the key against the length in the array, compute the
- // address to store into and fall through to fast case.
- __ SmiCompareInteger32(FieldOperand(rdx, JSArray::kLengthOffset), rcx);
- __ j(below_equal, &extra);
-
- KeyedStoreGenerateGenericHelper(masm, &fast_object, &fast_double,
- &slow, kCheckMap, kDontIncrementLength);
- KeyedStoreGenerateGenericHelper(masm, &fast_object_grow, &fast_double_grow,
- &slow, kDontCheckMap, kIncrementLength);
-}
-
-
-// The generated code does not accept smi keys.
-// The generated code falls through if both probes miss.
-void CallICBase::GenerateMonomorphicCacheProbe(MacroAssembler* masm,
- int argc,
- Code::Kind kind,
- Code::ExtraICState extra_state) {
- // ----------- S t a t e -------------
- // rcx : function name
- // rdx : receiver
- // -----------------------------------
- Label number, non_number, non_string, boolean, probe, miss;
-
- // Probe the stub cache.
- Code::Flags flags = Code::ComputeFlags(kind,
- MONOMORPHIC,
- extra_state,
- Code::NORMAL,
- argc);
- Isolate::Current()->stub_cache()->GenerateProbe(masm, flags, rdx, rcx, rbx,
- rax);
-
- // If the stub cache probing failed, the receiver might be a value.
- // For value objects, we use the map of the prototype objects for
- // the corresponding JSValue for the cache and that is what we need
- // to probe.
- //
- // Check for number.
- __ JumpIfSmi(rdx, &number);
- __ CmpObjectType(rdx, HEAP_NUMBER_TYPE, rbx);
- __ j(not_equal, &non_number);
- __ bind(&number);
- StubCompiler::GenerateLoadGlobalFunctionPrototype(
- masm, Context::NUMBER_FUNCTION_INDEX, rdx);
- __ jmp(&probe);
-
- // Check for string.
- __ bind(&non_number);
- __ CmpInstanceType(rbx, FIRST_NONSTRING_TYPE);
- __ j(above_equal, &non_string);
- StubCompiler::GenerateLoadGlobalFunctionPrototype(
- masm, Context::STRING_FUNCTION_INDEX, rdx);
- __ jmp(&probe);
-
- // Check for boolean.
- __ bind(&non_string);
- __ CompareRoot(rdx, Heap::kTrueValueRootIndex);
- __ j(equal, &boolean);
- __ CompareRoot(rdx, Heap::kFalseValueRootIndex);
- __ j(not_equal, &miss);
- __ bind(&boolean);
- StubCompiler::GenerateLoadGlobalFunctionPrototype(
- masm, Context::BOOLEAN_FUNCTION_INDEX, rdx);
-
- // Probe the stub cache for the value object.
- __ bind(&probe);
- Isolate::Current()->stub_cache()->GenerateProbe(masm, flags, rdx, rcx, rbx,
- no_reg);
-
- __ bind(&miss);
-}
-
-
-static void GenerateFunctionTailCall(MacroAssembler* masm,
- int argc,
- Label* miss) {
- // ----------- S t a t e -------------
- // rcx : function name
- // rdi : function
- // rsp[0] : return address
- // rsp[8] : argument argc
- // rsp[16] : argument argc - 1
- // ...
- // rsp[argc * 8] : argument 1
- // rsp[(argc + 1) * 8] : argument 0 = receiver
- // -----------------------------------
- __ JumpIfSmi(rdi, miss);
- // Check that the value is a JavaScript function.
- __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rdx);
- __ j(not_equal, miss);
-
- // Invoke the function.
- ParameterCount actual(argc);
- __ InvokeFunction(rdi, actual, JUMP_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
-}
-
-
-// The generated code falls through if the call should be handled by runtime.
-void CallICBase::GenerateNormal(MacroAssembler* masm, int argc) {
- // ----------- S t a t e -------------
- // rcx : function name
- // rsp[0] : return address
- // rsp[8] : argument argc
- // rsp[16] : argument argc - 1
- // ...
- // rsp[argc * 8] : argument 1
- // rsp[(argc + 1) * 8] : argument 0 = receiver
- // -----------------------------------
- Label miss;
-
- // Get the receiver of the function from the stack.
- __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
-
- GenerateStringDictionaryReceiverCheck(masm, rdx, rax, rbx, &miss);
-
- // rax: elements
- // Search the dictionary placing the result in rdi.
- GenerateDictionaryLoad(masm, &miss, rax, rcx, rbx, rdi, rdi);
-
- GenerateFunctionTailCall(masm, argc, &miss);
-
- __ bind(&miss);
-}
-
-
-void CallICBase::GenerateMiss(MacroAssembler* masm,
- int argc,
- IC::UtilityId id,
- Code::ExtraICState extra_state) {
- // ----------- S t a t e -------------
- // rcx : function name
- // rsp[0] : return address
- // rsp[8] : argument argc
- // rsp[16] : argument argc - 1
- // ...
- // rsp[argc * 8] : argument 1
- // rsp[(argc + 1) * 8] : argument 0 = receiver
- // -----------------------------------
-
- Counters* counters = masm->isolate()->counters();
- if (id == IC::kCallIC_Miss) {
- __ IncrementCounter(counters->call_miss(), 1);
- } else {
- __ IncrementCounter(counters->keyed_call_miss(), 1);
- }
-
- // Get the receiver of the function from the stack; 1 ~ return address.
- __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
-
- // Enter an internal frame.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Push the receiver and the name of the function.
- __ push(rdx);
- __ push(rcx);
-
- // Call the entry.
- CEntryStub stub(1);
- __ Set(rax, 2);
- __ LoadAddress(rbx, ExternalReference(IC_Utility(id), masm->isolate()));
- __ CallStub(&stub);
-
- // Move result to rdi and exit the internal frame.
- __ movq(rdi, rax);
- }
-
- // Check if the receiver is a global object of some sort.
- // This can happen only for regular CallIC but not KeyedCallIC.
- if (id == IC::kCallIC_Miss) {
- Label invoke, global;
- __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize)); // receiver
- __ JumpIfSmi(rdx, &invoke);
- __ CmpObjectType(rdx, JS_GLOBAL_OBJECT_TYPE, rcx);
- __ j(equal, &global);
- __ CmpInstanceType(rcx, JS_BUILTINS_OBJECT_TYPE);
- __ j(not_equal, &invoke);
-
- // Patch the receiver on the stack.
- __ bind(&global);
- __ movq(rdx, FieldOperand(rdx, GlobalObject::kGlobalReceiverOffset));
- __ movq(Operand(rsp, (argc + 1) * kPointerSize), rdx);
- __ bind(&invoke);
- }
-
- // Invoke the function.
- CallKind call_kind = CallICBase::Contextual::decode(extra_state)
- ? CALL_AS_FUNCTION
- : CALL_AS_METHOD;
- ParameterCount actual(argc);
- __ InvokeFunction(rdi,
- actual,
- JUMP_FUNCTION,
- NullCallWrapper(),
- call_kind);
-}
-
-
-void CallIC::GenerateMegamorphic(MacroAssembler* masm,
- int argc,
- Code::ExtraICState extra_ic_state) {
- // ----------- S t a t e -------------
- // rcx : function name
- // rsp[0] : return address
- // rsp[8] : argument argc
- // rsp[16] : argument argc - 1
- // ...
- // rsp[argc * 8] : argument 1
- // rsp[(argc + 1) * 8] : argument 0 = receiver
- // -----------------------------------
-
- // Get the receiver of the function from the stack; 1 ~ return address.
- __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
- GenerateMonomorphicCacheProbe(masm, argc, Code::CALL_IC, extra_ic_state);
- GenerateMiss(masm, argc, extra_ic_state);
-}
-
-
-void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
- // ----------- S t a t e -------------
- // rcx : function name
- // rsp[0] : return address
- // rsp[8] : argument argc
- // rsp[16] : argument argc - 1
- // ...
- // rsp[argc * 8] : argument 1
- // rsp[(argc + 1) * 8] : argument 0 = receiver
- // -----------------------------------
-
- // Get the receiver of the function from the stack; 1 ~ return address.
- __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
-
- Label do_call, slow_call, slow_load;
- Label check_number_dictionary, check_string, lookup_monomorphic_cache;
- Label index_smi, index_string;
-
- // Check that the key is a smi.
- __ JumpIfNotSmi(rcx, &check_string);
-
- __ bind(&index_smi);
- // Now the key is known to be a smi. This place is also jumped to from below
- // where a numeric string is converted to a smi.
-
- GenerateKeyedLoadReceiverCheck(
- masm, rdx, rax, Map::kHasIndexedInterceptor, &slow_call);
-
- GenerateFastArrayLoad(
- masm, rdx, rcx, rax, rbx, rdi, &check_number_dictionary, &slow_load);
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->keyed_call_generic_smi_fast(), 1);
-
- __ bind(&do_call);
- // receiver in rdx is not used after this point.
- // rcx: key
- // rdi: function
- GenerateFunctionTailCall(masm, argc, &slow_call);
-
- __ bind(&check_number_dictionary);
- // rax: elements
- // rcx: smi key
- // Check whether the elements is a number dictionary.
- __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
- Heap::kHashTableMapRootIndex);
- __ j(not_equal, &slow_load);
- __ SmiToInteger32(rbx, rcx);
- // ebx: untagged index
- __ LoadFromNumberDictionary(&slow_load, rax, rcx, rbx, r9, rdi, rdi);
- __ IncrementCounter(counters->keyed_call_generic_smi_dict(), 1);
- __ jmp(&do_call);
-
- __ bind(&slow_load);
- // This branch is taken when calling KeyedCallIC_Miss is neither required
- // nor beneficial.
- __ IncrementCounter(counters->keyed_call_generic_slow_load(), 1);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(rcx); // save the key
- __ push(rdx); // pass the receiver
- __ push(rcx); // pass the key
- __ CallRuntime(Runtime::kKeyedGetProperty, 2);
- __ pop(rcx); // restore the key
- }
- __ movq(rdi, rax);
- __ jmp(&do_call);
-
- __ bind(&check_string);
- GenerateKeyStringCheck(masm, rcx, rax, rbx, &index_string, &slow_call);
-
- // The key is known to be an internalized string.
- // If the receiver is a regular JS object with slow properties then do
- // a quick inline probe of the receiver's dictionary.
- // Otherwise do the monomorphic cache probe.
- GenerateKeyedLoadReceiverCheck(
- masm, rdx, rax, Map::kHasNamedInterceptor, &lookup_monomorphic_cache);
-
- __ movq(rbx, FieldOperand(rdx, JSObject::kPropertiesOffset));
- __ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
- Heap::kHashTableMapRootIndex);
- __ j(not_equal, &lookup_monomorphic_cache);
-
- GenerateDictionaryLoad(masm, &slow_load, rbx, rcx, rax, rdi, rdi);
- __ IncrementCounter(counters->keyed_call_generic_lookup_dict(), 1);
- __ jmp(&do_call);
-
- __ bind(&lookup_monomorphic_cache);
- __ IncrementCounter(counters->keyed_call_generic_lookup_cache(), 1);
- GenerateMonomorphicCacheProbe(masm,
- argc,
- Code::KEYED_CALL_IC,
- Code::kNoExtraICState);
- // Fall through on miss.
-
- __ bind(&slow_call);
- // This branch is taken if:
- // - the receiver requires boxing or access check,
- // - the key is neither smi nor internalized string,
- // - the value loaded is not a function,
- // - there is hope that the runtime will create a monomorphic call stub
- // that will get fetched next time.
- __ IncrementCounter(counters->keyed_call_generic_slow(), 1);
- GenerateMiss(masm, argc);
-
- __ bind(&index_string);
- __ IndexFromHash(rbx, rcx);
- // Now jump to the place where smi keys are handled.
- __ jmp(&index_smi);
-}
-
-
-void KeyedCallIC::GenerateNormal(MacroAssembler* masm, int argc) {
- // ----------- S t a t e -------------
- // rcx : function name
- // rsp[0] : return address
- // rsp[8] : argument argc
- // rsp[16] : argument argc - 1
- // ...
- // rsp[argc * 8] : argument 1
- // rsp[(argc + 1) * 8] : argument 0 = receiver
- // -----------------------------------
-
- // Check if the name is a string.
- Label miss;
- __ JumpIfSmi(rcx, &miss);
- Condition cond = masm->IsObjectStringType(rcx, rax, rax);
- __ j(NegateCondition(cond), &miss);
- CallICBase::GenerateNormal(masm, argc);
- __ bind(&miss);
- GenerateMiss(masm, argc);
-}
-
-
-static Operand GenerateMappedArgumentsLookup(MacroAssembler* masm,
- Register object,
- Register key,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* unmapped_case,
- Label* slow_case) {
- Heap* heap = masm->isolate()->heap();
-
- // Check that the receiver is a JSObject. Because of the elements
- // map check later, we do not need to check for interceptors or
- // whether it requires access checks.
- __ JumpIfSmi(object, slow_case);
- // Check that the object is some kind of JSObject.
- __ CmpObjectType(object, FIRST_JS_RECEIVER_TYPE, scratch1);
- __ j(below, slow_case);
-
- // Check that the key is a positive smi.
- Condition check = masm->CheckNonNegativeSmi(key);
- __ j(NegateCondition(check), slow_case);
-
- // Load the elements into scratch1 and check its map. If not, jump
- // to the unmapped lookup with the parameter map in scratch1.
- Handle<Map> arguments_map(heap->non_strict_arguments_elements_map());
- __ movq(scratch1, FieldOperand(object, JSObject::kElementsOffset));
- __ CheckMap(scratch1, arguments_map, slow_case, DONT_DO_SMI_CHECK);
-
- // Check if element is in the range of mapped arguments.
- __ movq(scratch2, FieldOperand(scratch1, FixedArray::kLengthOffset));
- __ SmiSubConstant(scratch2, scratch2, Smi::FromInt(2));
- __ cmpq(key, scratch2);
- __ j(greater_equal, unmapped_case);
-
- // Load element index and check whether it is the hole.
- const int kHeaderSize = FixedArray::kHeaderSize + 2 * kPointerSize;
- __ SmiToInteger64(scratch3, key);
- __ movq(scratch2, FieldOperand(scratch1,
- scratch3,
- times_pointer_size,
- kHeaderSize));
- __ CompareRoot(scratch2, Heap::kTheHoleValueRootIndex);
- __ j(equal, unmapped_case);
-
- // Load value from context and return it. We can reuse scratch1 because
- // we do not jump to the unmapped lookup (which requires the parameter
- // map in scratch1).
- __ movq(scratch1, FieldOperand(scratch1, FixedArray::kHeaderSize));
- __ SmiToInteger64(scratch3, scratch2);
- return FieldOperand(scratch1,
- scratch3,
- times_pointer_size,
- Context::kHeaderSize);
-}
-
-
-static Operand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
- Register key,
- Register parameter_map,
- Register scratch,
- Label* slow_case) {
- // Element is in arguments backing store, which is referenced by the
- // second element of the parameter_map. The parameter_map register
- // must be loaded with the parameter map of the arguments object and is
- // overwritten.
- const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize;
- Register backing_store = parameter_map;
- __ movq(backing_store, FieldOperand(parameter_map, kBackingStoreOffset));
- Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map());
- __ CheckMap(backing_store, fixed_array_map, slow_case, DONT_DO_SMI_CHECK);
- __ movq(scratch, FieldOperand(backing_store, FixedArray::kLengthOffset));
- __ cmpq(key, scratch);
- __ j(greater_equal, slow_case);
- __ SmiToInteger64(scratch, key);
- return FieldOperand(backing_store,
- scratch,
- times_pointer_size,
- FixedArray::kHeaderSize);
-}
-
-
-void KeyedLoadIC::GenerateNonStrictArguments(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rax : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- Label slow, notin;
- Operand mapped_location =
- GenerateMappedArgumentsLookup(
- masm, rdx, rax, rbx, rcx, rdi, &notin, &slow);
- __ movq(rax, mapped_location);
- __ Ret();
- __ bind(&notin);
- // The unmapped lookup expects that the parameter map is in rbx.
- Operand unmapped_location =
- GenerateUnmappedArgumentsLookup(masm, rax, rbx, rcx, &slow);
- __ CompareRoot(unmapped_location, Heap::kTheHoleValueRootIndex);
- __ j(equal, &slow);
- __ movq(rax, unmapped_location);
- __ Ret();
- __ bind(&slow);
- GenerateMiss(masm, MISS);
-}
-
-
-void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- Label slow, notin;
- Operand mapped_location = GenerateMappedArgumentsLookup(
- masm, rdx, rcx, rbx, rdi, r8, &notin, &slow);
- __ movq(mapped_location, rax);
- __ lea(r9, mapped_location);
- __ movq(r8, rax);
- __ RecordWrite(rbx,
- r9,
- r8,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- INLINE_SMI_CHECK);
- __ Ret();
- __ bind(&notin);
- // The unmapped lookup expects that the parameter map is in rbx.
- Operand unmapped_location =
- GenerateUnmappedArgumentsLookup(masm, rcx, rbx, rdi, &slow);
- __ movq(unmapped_location, rax);
- __ lea(r9, unmapped_location);
- __ movq(r8, rax);
- __ RecordWrite(rbx,
- r9,
- r8,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- INLINE_SMI_CHECK);
- __ Ret();
- __ bind(&slow);
- GenerateMiss(masm, MISS);
-}
-
-
-void KeyedCallIC::GenerateNonStrictArguments(MacroAssembler* masm,
- int argc) {
- // ----------- S t a t e -------------
- // rcx : function name
- // rsp[0] : return address
- // rsp[8] : argument argc
- // rsp[16] : argument argc - 1
- // ...
- // rsp[argc * 8] : argument 1
- // rsp[(argc + 1) * 8] : argument 0 = receiver
- // -----------------------------------
- Label slow, notin;
- __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
- Operand mapped_location = GenerateMappedArgumentsLookup(
- masm, rdx, rcx, rbx, rax, r8, &notin, &slow);
- __ movq(rdi, mapped_location);
- GenerateFunctionTailCall(masm, argc, &slow);
- __ bind(&notin);
- // The unmapped lookup expects that the parameter map is in rbx.
- Operand unmapped_location =
- GenerateUnmappedArgumentsLookup(masm, rcx, rbx, rax, &slow);
- __ CompareRoot(unmapped_location, Heap::kTheHoleValueRootIndex);
- __ j(equal, &slow);
- __ movq(rdi, unmapped_location);
- GenerateFunctionTailCall(masm, argc, &slow);
- __ bind(&slow);
- GenerateMiss(masm, argc);
-}
-
-
-void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rax : receiver
- // -- rcx : name
- // -- rsp[0] : return address
- // -----------------------------------
-
- // Probe the stub cache.
- Code::Flags flags = Code::ComputeFlags(
- Code::LOAD_IC, MONOMORPHIC, Code::HANDLER_FRAGMENT);
- Isolate::Current()->stub_cache()->GenerateProbe(
- masm, flags, rax, rcx, rbx, rdx);
-
- // Cache miss: Jump to runtime.
- StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
-}
-
-
-void LoadIC::GenerateNormal(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rax : receiver
- // -- rcx : name
- // -- rsp[0] : return address
- // -----------------------------------
- Label miss;
-
- GenerateStringDictionaryReceiverCheck(masm, rax, rdx, rbx, &miss);
-
- // rdx: elements
- // Search the dictionary placing the result in rax.
- GenerateDictionaryLoad(masm, &miss, rdx, rcx, rbx, rdi, rax);
- __ ret(0);
-
- // Cache miss: Jump to runtime.
- __ bind(&miss);
- GenerateMiss(masm);
-}
-
-
-void LoadIC::GenerateMiss(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rax : receiver
- // -- rcx : name
- // -- rsp[0] : return address
- // -----------------------------------
-
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->load_miss(), 1);
-
- __ pop(rbx);
- __ push(rax); // receiver
- __ push(rcx); // name
- __ push(rbx); // return address
-
- // Perform tail call to the entry.
- ExternalReference ref =
- ExternalReference(IC_Utility(kLoadIC_Miss), masm->isolate());
- __ TailCallExternalReference(ref, 2, 1);
-}
-
-
-void KeyedLoadIC::GenerateMiss(MacroAssembler* masm, ICMissMode miss_mode) {
- // ----------- S t a t e -------------
- // -- rax : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
-
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->keyed_load_miss(), 1);
-
- __ pop(rbx);
- __ push(rdx); // receiver
- __ push(rax); // name
- __ push(rbx); // return address
-
- // Perform tail call to the entry.
- ExternalReference ref = miss_mode == MISS_FORCE_GENERIC
- ? ExternalReference(IC_Utility(kKeyedLoadIC_MissForceGeneric),
- masm->isolate())
- : ExternalReference(IC_Utility(kKeyedLoadIC_Miss), masm->isolate());
- __ TailCallExternalReference(ref, 2, 1);
-}
-
-
-void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rax : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
-
- __ pop(rbx);
- __ push(rdx); // receiver
- __ push(rax); // name
- __ push(rbx); // return address
-
- // Perform tail call to the entry.
- __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
-}
-
-
-void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
- StrictModeFlag strict_mode) {
- // ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : name
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
-
- // Get the receiver from the stack and probe the stub cache.
- Code::Flags flags =
- Code::ComputeFlags(Code::STORE_IC, MONOMORPHIC, strict_mode);
- Isolate::Current()->stub_cache()->GenerateProbe(masm, flags, rdx, rcx, rbx,
- no_reg);
-
- // Cache miss: Jump to runtime.
- GenerateMiss(masm);
-}
-
-
-void StoreIC::GenerateMiss(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : name
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
-
- __ pop(rbx);
- __ push(rdx); // receiver
- __ push(rcx); // name
- __ push(rax); // value
- __ push(rbx); // return address
-
- // Perform tail call to the entry.
- ExternalReference ref =
- ExternalReference(IC_Utility(kStoreIC_Miss), masm->isolate());
- __ TailCallExternalReference(ref, 3, 1);
-}
-
-
-void StoreIC::GenerateNormal(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : name
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
-
- Label miss;
-
- GenerateStringDictionaryReceiverCheck(masm, rdx, rbx, rdi, &miss);
-
- GenerateDictionaryStore(masm, &miss, rbx, rcx, rax, r8, r9);
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->store_normal_hit(), 1);
- __ ret(0);
-
- __ bind(&miss);
- __ IncrementCounter(counters->store_normal_miss(), 1);
- GenerateMiss(masm);
-}
-
-
-void StoreIC::GenerateGlobalProxy(MacroAssembler* masm,
- StrictModeFlag strict_mode) {
- // ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : name
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- __ pop(rbx);
- __ push(rdx);
- __ push(rcx);
- __ push(rax);
- __ Push(Smi::FromInt(NONE)); // PropertyAttributes
- __ Push(Smi::FromInt(strict_mode));
- __ push(rbx); // return address
-
- // Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kSetProperty, 5, 1);
-}
-
-
-void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
- StrictModeFlag strict_mode) {
- // ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
-
- __ pop(rbx);
- __ push(rdx); // receiver
- __ push(rcx); // key
- __ push(rax); // value
- __ Push(Smi::FromInt(NONE)); // PropertyAttributes
- __ Push(Smi::FromInt(strict_mode)); // Strict mode.
- __ push(rbx); // return address
-
- // Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kSetProperty, 5, 1);
-}
-
-
-void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
-
- __ pop(rbx);
- __ push(rdx); // receiver
- __ push(rcx); // key
- __ push(rax); // value
- __ push(rbx); // return address
-
- // Do tail-call to runtime routine.
- ExternalReference ref(IC_Utility(kKeyedStoreIC_Slow), masm->isolate());
- __ TailCallExternalReference(ref, 3, 1);
-}
-
-
-void KeyedStoreIC::GenerateMiss(MacroAssembler* masm, ICMissMode miss_mode) {
- // ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
-
- __ pop(rbx);
- __ push(rdx); // receiver
- __ push(rcx); // key
- __ push(rax); // value
- __ push(rbx); // return address
-
- // Do tail-call to runtime routine.
- ExternalReference ref = miss_mode == MISS_FORCE_GENERIC
- ? ExternalReference(IC_Utility(kKeyedStoreIC_MissForceGeneric),
- masm->isolate())
- : ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
- __ TailCallExternalReference(ref, 3, 1);
-}
-
-
-void KeyedStoreIC::GenerateTransitionElementsSmiToDouble(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rbx : target map
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- // Must return the modified receiver in eax.
- if (!FLAG_trace_elements_transitions) {
- Label fail;
- AllocationSiteMode mode = AllocationSiteInfo::GetMode(FAST_SMI_ELEMENTS,
- FAST_DOUBLE_ELEMENTS);
- ElementsTransitionGenerator::GenerateSmiToDouble(masm, mode, &fail);
- __ movq(rax, rdx);
- __ Ret();
- __ bind(&fail);
- }
-
- __ pop(rbx);
- __ push(rdx);
- __ push(rbx); // return address
- __ TailCallRuntime(Runtime::kTransitionElementsSmiToDouble, 1, 1);
-}
-
-
-void KeyedStoreIC::GenerateTransitionElementsDoubleToObject(
- MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rbx : target map
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- // Must return the modified receiver in eax.
- if (!FLAG_trace_elements_transitions) {
- Label fail;
- AllocationSiteMode mode = AllocationSiteInfo::GetMode(FAST_DOUBLE_ELEMENTS,
- FAST_ELEMENTS);
- ElementsTransitionGenerator::GenerateDoubleToObject(masm, mode, &fail);
- __ movq(rax, rdx);
- __ Ret();
- __ bind(&fail);
- }
-
- __ pop(rbx);
- __ push(rdx);
- __ push(rbx); // return address
- __ TailCallRuntime(Runtime::kTransitionElementsDoubleToObject, 1, 1);
-}
-
-
-#undef __
-
-
-Condition CompareIC::ComputeCondition(Token::Value op) {
- switch (op) {
- case Token::EQ_STRICT:
- case Token::EQ:
- return equal;
- case Token::LT:
- return less;
- case Token::GT:
- return greater;
- case Token::LTE:
- return less_equal;
- case Token::GTE:
- return greater_equal;
- default:
- UNREACHABLE();
- return no_condition;
- }
-}
-
-
-bool CompareIC::HasInlinedSmiCode(Address address) {
- // The address of the instruction following the call.
- Address test_instruction_address =
- address + Assembler::kCallTargetAddressOffset;
-
- // If the instruction following the call is not a test al, nothing
- // was inlined.
- return *test_instruction_address == Assembler::kTestAlByte;
-}
-
-
-void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
- // The address of the instruction following the call.
- Address test_instruction_address =
- address + Assembler::kCallTargetAddressOffset;
-
- // If the instruction following the call is not a test al, nothing
- // was inlined.
- if (*test_instruction_address != Assembler::kTestAlByte) {
- ASSERT(*test_instruction_address == Assembler::kNopByte);
- return;
- }
-
- Address delta_address = test_instruction_address + 1;
- // The delta to the start of the map check instruction and the
- // condition code uses at the patched jump.
- int8_t delta = *reinterpret_cast<int8_t*>(delta_address);
- if (FLAG_trace_ic) {
- PrintF("[ patching ic at %p, test=%p, delta=%d\n",
- address, test_instruction_address, delta);
- }
-
- // Patch with a short conditional jump. Enabling means switching from a short
- // jump-if-carry/not-carry to jump-if-zero/not-zero, whereas disabling is the
- // reverse operation of that.
- Address jmp_address = test_instruction_address - delta;
- ASSERT((check == ENABLE_INLINED_SMI_CHECK)
- ? (*jmp_address == Assembler::kJncShortOpcode ||
- *jmp_address == Assembler::kJcShortOpcode)
- : (*jmp_address == Assembler::kJnzShortOpcode ||
- *jmp_address == Assembler::kJzShortOpcode));
- Condition cc = (check == ENABLE_INLINED_SMI_CHECK)
- ? (*jmp_address == Assembler::kJncShortOpcode ? not_zero : zero)
- : (*jmp_address == Assembler::kJnzShortOpcode ? not_carry : carry);
- *jmp_address = static_cast<byte>(Assembler::kJccShortPrefix | cc);
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_X64
diff --git a/src/3rdparty/v8/src/x64/lithium-codegen-x64.cc b/src/3rdparty/v8/src/x64/lithium-codegen-x64.cc
deleted file mode 100644
index f1fe452..0000000
--- a/src/3rdparty/v8/src/x64/lithium-codegen-x64.cc
+++ /dev/null
@@ -1,5846 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_X64)
-
-#include "x64/lithium-codegen-x64.h"
-#include "code-stubs.h"
-#include "stub-cache.h"
-
-namespace v8 {
-namespace internal {
-
-
-// When invoking builtins, we need to record the safepoint in the middle of
-// the invoke instruction sequence generated by the macro assembler.
-class SafepointGenerator : public CallWrapper {
- public:
- SafepointGenerator(LCodeGen* codegen,
- LPointerMap* pointers,
- Safepoint::DeoptMode mode)
- : codegen_(codegen),
- pointers_(pointers),
- deopt_mode_(mode) { }
- virtual ~SafepointGenerator() { }
-
- virtual void BeforeCall(int call_size) const {
- codegen_->EnsureSpaceForLazyDeopt(Deoptimizer::patch_size() - call_size);
- }
-
- virtual void AfterCall() const {
- codegen_->RecordSafepoint(pointers_, deopt_mode_);
- }
-
- private:
- LCodeGen* codegen_;
- LPointerMap* pointers_;
- Safepoint::DeoptMode deopt_mode_;
-};
-
-
-#define __ masm()->
-
-bool LCodeGen::GenerateCode() {
- HPhase phase("Z_Code generation", chunk());
- ASSERT(is_unused());
- status_ = GENERATING;
-
- // Open a frame scope to indicate that there is a frame on the stack. The
- // MANUAL indicates that the scope shouldn't actually generate code to set up
- // the frame (that is done in GeneratePrologue).
- FrameScope frame_scope(masm_, StackFrame::MANUAL);
-
- return GeneratePrologue() &&
- GenerateBody() &&
- GenerateDeferredCode() &&
- GenerateJumpTable() &&
- GenerateSafepointTable();
-}
-
-
-void LCodeGen::FinishCode(Handle<Code> code) {
- ASSERT(is_done());
- code->set_stack_slots(GetStackSlotCount());
- code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
- if (FLAG_weak_embedded_maps_in_optimized_code) {
- RegisterDependentCodeForEmbeddedMaps(code);
- }
- PopulateDeoptimizationData(code);
- for (int i = 0 ; i < prototype_maps_.length(); i++) {
- prototype_maps_.at(i)->AddDependentCode(
- DependentCode::kPrototypeCheckGroup, code);
- }
-}
-
-
-void LChunkBuilder::Abort(const char* reason) {
- info()->set_bailout_reason(reason);
- status_ = ABORTED;
-}
-
-
-void LCodeGen::Comment(const char* format, ...) {
- if (!FLAG_code_comments) return;
- char buffer[4 * KB];
- StringBuilder builder(buffer, ARRAY_SIZE(buffer));
- va_list arguments;
- va_start(arguments, format);
- builder.AddFormattedList(format, arguments);
- va_end(arguments);
-
- // Copy the string before recording it in the assembler to avoid
- // issues when the stack allocated buffer goes out of scope.
- int length = builder.position();
- Vector<char> copy = Vector<char>::New(length + 1);
- memcpy(copy.start(), builder.Finalize(), copy.length());
- masm()->RecordComment(copy.start());
-}
-
-
-bool LCodeGen::GeneratePrologue() {
- ASSERT(is_generating());
-
- if (info()->IsOptimizing()) {
- ProfileEntryHookStub::MaybeCallEntryHook(masm_);
-
-#ifdef DEBUG
- if (strlen(FLAG_stop_at) > 0 &&
- info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
- __ int3();
- }
-#endif
-
- // Strict mode functions need to replace the receiver with undefined
- // when called as functions (without an explicit receiver
- // object). rcx is zero for method calls and non-zero for function
- // calls.
- if (!info_->is_classic_mode() || info_->is_native()) {
- Label ok;
- __ testq(rcx, rcx);
- __ j(zero, &ok, Label::kNear);
- // +1 for return address.
- int receiver_offset = (scope()->num_parameters() + 1) * kPointerSize;
- __ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
- __ movq(Operand(rsp, receiver_offset), kScratchRegister);
- __ bind(&ok);
- }
- }
-
- info()->set_prologue_offset(masm_->pc_offset());
- if (NeedsEagerFrame()) {
- ASSERT(!frame_is_built_);
- frame_is_built_ = true;
- __ push(rbp); // Caller's frame pointer.
- __ movq(rbp, rsp);
- __ push(rsi); // Callee's context.
- if (info()->IsStub()) {
- __ Push(Smi::FromInt(StackFrame::STUB));
- } else {
- __ push(rdi); // Callee's JS function.
- }
- }
-
- // Reserve space for the stack slots needed by the code.
- int slots = GetStackSlotCount();
- if (slots > 0) {
- if (FLAG_debug_code) {
- __ subq(rsp, Immediate(slots * kPointerSize));
- __ push(rax);
- __ Set(rax, slots);
- __ movq(kScratchRegister, kSlotsZapValue, RelocInfo::NONE64);
- Label loop;
- __ bind(&loop);
- __ movq(MemOperand(rsp, rax, times_pointer_size, 0),
- kScratchRegister);
- __ decl(rax);
- __ j(not_zero, &loop);
- __ pop(rax);
- } else {
- __ subq(rsp, Immediate(slots * kPointerSize));
-#ifdef _MSC_VER
- // On windows, you may not access the stack more than one page below
- // the most recently mapped page. To make the allocated area randomly
- // accessible, we write to each page in turn (the value is irrelevant).
- const int kPageSize = 4 * KB;
- for (int offset = slots * kPointerSize - kPageSize;
- offset > 0;
- offset -= kPageSize) {
- __ movq(Operand(rsp, offset), rax);
- }
-#endif
- }
-
- if (info()->saves_caller_doubles()) {
- Comment(";;; Save clobbered callee double registers");
- int count = 0;
- BitVector* doubles = chunk()->allocated_double_registers();
- BitVector::Iterator save_iterator(doubles);
- while (!save_iterator.Done()) {
- __ movsd(MemOperand(rsp, count * kDoubleSize),
- XMMRegister::FromAllocationIndex(save_iterator.Current()));
- save_iterator.Advance();
- count++;
- }
- }
- }
-
- // Possibly allocate a local context.
- int heap_slots = info_->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
- if (heap_slots > 0 ||
- (scope() != NULL && scope()->is_qml_mode() && scope()->is_global_scope())) {
- Comment(";;; Allocate local context");
- // Argument to NewContext is the function, which is still in rdi.
- __ push(rdi);
- if (heap_slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub((heap_slots < 0)?0:heap_slots);
- __ CallStub(&stub);
- } else {
- __ CallRuntime(Runtime::kNewFunctionContext, 1);
- }
- RecordSafepoint(Safepoint::kNoLazyDeopt);
- // Context is returned in both rax and rsi. It replaces the context
- // passed to us. It's saved in the stack and kept live in rsi.
- __ movq(Operand(rbp, StandardFrameConstants::kContextOffset), rsi);
-
- // Copy any necessary parameters into the context.
- int num_parameters = scope()->num_parameters();
- for (int i = 0; i < num_parameters; i++) {
- Variable* var = scope()->parameter(i);
- if (var->IsContextSlot()) {
- int parameter_offset = StandardFrameConstants::kCallerSPOffset +
- (num_parameters - 1 - i) * kPointerSize;
- // Load parameter from stack.
- __ movq(rax, Operand(rbp, parameter_offset));
- // Store it in the context.
- int context_offset = Context::SlotOffset(var->index());
- __ movq(Operand(rsi, context_offset), rax);
- // Update the write barrier. This clobbers rax and rbx.
- __ RecordWriteContextSlot(rsi, context_offset, rax, rbx, kSaveFPRegs);
- }
- }
- Comment(";;; End allocate local context");
- }
-
- // Trace the call.
- if (FLAG_trace && info()->IsOptimizing()) {
- __ CallRuntime(Runtime::kTraceEnter, 0);
- }
- return !is_aborted();
-}
-
-
-bool LCodeGen::GenerateBody() {
- ASSERT(is_generating());
- bool emit_instructions = true;
- for (current_instruction_ = 0;
- !is_aborted() && current_instruction_ < instructions_->length();
- current_instruction_++) {
- LInstruction* instr = instructions_->at(current_instruction_);
- if (instr->IsLabel()) {
- LLabel* label = LLabel::cast(instr);
- emit_instructions = !label->HasReplacement();
- }
-
- if (emit_instructions) {
- if (FLAG_code_comments) {
- HValue* hydrogen = instr->hydrogen_value();
- if (hydrogen != NULL) {
- if (hydrogen->IsChange()) {
- HValue* changed_value = HChange::cast(hydrogen)->value();
- int use_id = 0;
- const char* use_mnemo = "dead";
- if (hydrogen->UseCount() >= 1) {
- HValue* use_value = hydrogen->uses().value();
- use_id = use_value->id();
- use_mnemo = use_value->Mnemonic();
- }
- Comment(";;; @%d: %s. <of #%d %s for #%d %s>",
- current_instruction_, instr->Mnemonic(),
- changed_value->id(), changed_value->Mnemonic(),
- use_id, use_mnemo);
- } else {
- Comment(";;; @%d: %s. <#%d>", current_instruction_,
- instr->Mnemonic(), hydrogen->id());
- }
- } else {
- Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic());
- }
- }
- instr->CompileToNative(this);
- }
- }
- EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
- return !is_aborted();
-}
-
-
-bool LCodeGen::GenerateJumpTable() {
- Label needs_frame_not_call;
- Label needs_frame_is_call;
- for (int i = 0; i < jump_table_.length(); i++) {
- __ bind(&jump_table_[i].label);
- Address entry = jump_table_[i].address;
- bool is_lazy_deopt = jump_table_[i].is_lazy_deopt;
- Deoptimizer::BailoutType type =
- is_lazy_deopt ? Deoptimizer::LAZY : Deoptimizer::EAGER;
- int id = Deoptimizer::GetDeoptimizationId(entry, type);
- if (id == Deoptimizer::kNotDeoptimizationEntry) {
- Comment(";;; jump table entry %d.", i);
- } else {
- Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
- }
- if (jump_table_[i].needs_frame) {
- __ movq(kScratchRegister, ExternalReference::ForDeoptEntry(entry));
- if (is_lazy_deopt) {
- if (needs_frame_is_call.is_bound()) {
- __ jmp(&needs_frame_is_call);
- } else {
- __ bind(&needs_frame_is_call);
- __ push(rbp);
- __ movq(rbp, rsp);
- __ push(rsi);
- // This variant of deopt can only be used with stubs. Since we don't
- // have a function pointer to install in the stack frame that we're
- // building, install a special marker there instead.
- ASSERT(info()->IsStub());
- __ Move(rsi, Smi::FromInt(StackFrame::STUB));
- __ push(rsi);
- __ movq(rsi, MemOperand(rsp, kPointerSize));
- __ call(kScratchRegister);
- }
- } else {
- if (needs_frame_not_call.is_bound()) {
- __ jmp(&needs_frame_not_call);
- } else {
- __ bind(&needs_frame_not_call);
- __ push(rbp);
- __ movq(rbp, rsp);
- __ push(rsi);
- // This variant of deopt can only be used with stubs. Since we don't
- // have a function pointer to install in the stack frame that we're
- // building, install a special marker there instead.
- ASSERT(info()->IsStub());
- __ Move(rsi, Smi::FromInt(StackFrame::STUB));
- __ push(rsi);
- __ movq(rsi, MemOperand(rsp, kPointerSize));
- __ jmp(kScratchRegister);
- }
- }
- } else {
- if (is_lazy_deopt) {
- __ Call(entry, RelocInfo::RUNTIME_ENTRY);
- } else {
- __ Jump(entry, RelocInfo::RUNTIME_ENTRY);
- }
- }
- }
- return !is_aborted();
-}
-
-
-bool LCodeGen::GenerateDeferredCode() {
- ASSERT(is_generating());
- if (deferred_.length() > 0) {
- for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
- LDeferredCode* code = deferred_[i];
- __ bind(code->entry());
- if (NeedsDeferredFrame()) {
- Comment(";;; Deferred build frame",
- code->instruction_index(),
- code->instr()->Mnemonic());
- ASSERT(!frame_is_built_);
- ASSERT(info()->IsStub());
- frame_is_built_ = true;
- // Build the frame in such a way that esi isn't trashed.
- __ push(rbp); // Caller's frame pointer.
- __ push(Operand(rbp, StandardFrameConstants::kContextOffset));
- __ Push(Smi::FromInt(StackFrame::STUB));
- __ lea(rbp, Operand(rsp, 2 * kPointerSize));
- }
- Comment(";;; Deferred code @%d: %s.",
- code->instruction_index(),
- code->instr()->Mnemonic());
- code->Generate();
- if (NeedsDeferredFrame()) {
- Comment(";;; Deferred destroy frame",
- code->instruction_index(),
- code->instr()->Mnemonic());
- ASSERT(frame_is_built_);
- frame_is_built_ = false;
- __ movq(rsp, rbp);
- __ pop(rbp);
- }
- __ jmp(code->exit());
- }
- }
-
- // Deferred code is the last part of the instruction sequence. Mark
- // the generated code as done unless we bailed out.
- if (!is_aborted()) status_ = DONE;
- return !is_aborted();
-}
-
-
-bool LCodeGen::GenerateSafepointTable() {
- ASSERT(is_done());
- safepoints_.Emit(masm(), GetStackSlotCount());
- return !is_aborted();
-}
-
-
-Register LCodeGen::ToRegister(int index) const {
- return Register::FromAllocationIndex(index);
-}
-
-
-XMMRegister LCodeGen::ToDoubleRegister(int index) const {
- return XMMRegister::FromAllocationIndex(index);
-}
-
-
-Register LCodeGen::ToRegister(LOperand* op) const {
- ASSERT(op->IsRegister());
- return ToRegister(op->index());
-}
-
-
-XMMRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
- ASSERT(op->IsDoubleRegister());
- return ToDoubleRegister(op->index());
-}
-
-
-bool LCodeGen::IsInteger32Constant(LConstantOperand* op) const {
- return op->IsConstantOperand() &&
- chunk_->LookupLiteralRepresentation(op).IsInteger32();
-}
-
-
-bool LCodeGen::IsTaggedConstant(LConstantOperand* op) const {
- return op->IsConstantOperand() &&
- chunk_->LookupLiteralRepresentation(op).IsTagged();
-}
-
-
-int LCodeGen::ToInteger32(LConstantOperand* op) const {
- HConstant* constant = chunk_->LookupConstant(op);
- return constant->Integer32Value();
-}
-
-
-double LCodeGen::ToDouble(LConstantOperand* op) const {
- HConstant* constant = chunk_->LookupConstant(op);
- ASSERT(constant->HasDoubleValue());
- return constant->DoubleValue();
-}
-
-
-Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
- HConstant* constant = chunk_->LookupConstant(op);
- ASSERT(chunk_->LookupLiteralRepresentation(op).IsTagged());
- return constant->handle();
-}
-
-
-Operand LCodeGen::ToOperand(LOperand* op) const {
- // Does not handle registers. In X64 assembler, plain registers are not
- // representable as an Operand.
- ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
- return Operand(rbp, StackSlotOffset(op->index()));
-}
-
-
-void LCodeGen::WriteTranslation(LEnvironment* environment,
- Translation* translation,
- int* pushed_arguments_index,
- int* pushed_arguments_count) {
- if (environment == NULL) return;
-
- // The translation includes one command per value in the environment.
- int translation_size = environment->values()->length();
- // The output frame height does not include the parameters.
- int height = translation_size - environment->parameter_count();
-
- // Function parameters are arguments to the outermost environment. The
- // arguments index points to the first element of a sequence of tagged
- // values on the stack that represent the arguments. This needs to be
- // kept in sync with the LArgumentsElements implementation.
- *pushed_arguments_index = -environment->parameter_count();
- *pushed_arguments_count = environment->parameter_count();
-
- WriteTranslation(environment->outer(),
- translation,
- pushed_arguments_index,
- pushed_arguments_count);
- bool has_closure_id = !info()->closure().is_null() &&
- *info()->closure() != *environment->closure();
- int closure_id = has_closure_id
- ? DefineDeoptimizationLiteral(environment->closure())
- : Translation::kSelfLiteralId;
-
- switch (environment->frame_type()) {
- case JS_FUNCTION:
- translation->BeginJSFrame(environment->ast_id(), closure_id, height);
- break;
- case JS_CONSTRUCT:
- translation->BeginConstructStubFrame(closure_id, translation_size);
- break;
- case JS_GETTER:
- ASSERT(translation_size == 1);
- ASSERT(height == 0);
- translation->BeginGetterStubFrame(closure_id);
- break;
- case JS_SETTER:
- ASSERT(translation_size == 2);
- ASSERT(height == 0);
- translation->BeginSetterStubFrame(closure_id);
- break;
- case ARGUMENTS_ADAPTOR:
- translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
- break;
- case STUB:
- translation->BeginCompiledStubFrame();
- break;
- }
-
- // Inlined frames which push their arguments cause the index to be
- // bumped and another stack area to be used for materialization,
- // otherwise actual argument values are unknown for inlined frames.
- bool arguments_known = true;
- int arguments_index = *pushed_arguments_index;
- int arguments_count = *pushed_arguments_count;
- if (environment->entry() != NULL) {
- arguments_known = environment->entry()->arguments_pushed();
- arguments_index = arguments_index < 0
- ? GetStackSlotCount() : arguments_index + arguments_count;
- arguments_count = environment->entry()->arguments_count() + 1;
- if (environment->entry()->arguments_pushed()) {
- *pushed_arguments_index = arguments_index;
- *pushed_arguments_count = arguments_count;
- }
- }
-
- for (int i = 0; i < translation_size; ++i) {
- LOperand* value = environment->values()->at(i);
- // spilled_registers_ and spilled_double_registers_ are either
- // both NULL or both set.
- if (environment->spilled_registers() != NULL && value != NULL) {
- if (value->IsRegister() &&
- environment->spilled_registers()[value->index()] != NULL) {
- translation->MarkDuplicate();
- AddToTranslation(translation,
- environment->spilled_registers()[value->index()],
- environment->HasTaggedValueAt(i),
- environment->HasUint32ValueAt(i),
- arguments_known,
- arguments_index,
- arguments_count);
- } else if (
- value->IsDoubleRegister() &&
- environment->spilled_double_registers()[value->index()] != NULL) {
- translation->MarkDuplicate();
- AddToTranslation(
- translation,
- environment->spilled_double_registers()[value->index()],
- false,
- false,
- arguments_known,
- arguments_index,
- arguments_count);
- }
- }
-
- AddToTranslation(translation,
- value,
- environment->HasTaggedValueAt(i),
- environment->HasUint32ValueAt(i),
- arguments_known,
- arguments_index,
- arguments_count);
- }
-}
-
-
-void LCodeGen::AddToTranslation(Translation* translation,
- LOperand* op,
- bool is_tagged,
- bool is_uint32,
- bool arguments_known,
- int arguments_index,
- int arguments_count) {
- if (op == NULL) {
- // TODO(twuerthinger): Introduce marker operands to indicate that this value
- // is not present and must be reconstructed from the deoptimizer. Currently
- // this is only used for the arguments object.
- translation->StoreArgumentsObject(
- arguments_known, arguments_index, arguments_count);
- } else if (op->IsStackSlot()) {
- if (is_tagged) {
- translation->StoreStackSlot(op->index());
- } else if (is_uint32) {
- translation->StoreUint32StackSlot(op->index());
- } else {
- translation->StoreInt32StackSlot(op->index());
- }
- } else if (op->IsDoubleStackSlot()) {
- translation->StoreDoubleStackSlot(op->index());
- } else if (op->IsArgument()) {
- ASSERT(is_tagged);
- int src_index = GetStackSlotCount() + op->index();
- translation->StoreStackSlot(src_index);
- } else if (op->IsRegister()) {
- Register reg = ToRegister(op);
- if (is_tagged) {
- translation->StoreRegister(reg);
- } else if (is_uint32) {
- translation->StoreUint32Register(reg);
- } else {
- translation->StoreInt32Register(reg);
- }
- } else if (op->IsDoubleRegister()) {
- XMMRegister reg = ToDoubleRegister(op);
- translation->StoreDoubleRegister(reg);
- } else if (op->IsConstantOperand()) {
- HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
- int src_index = DefineDeoptimizationLiteral(constant->handle());
- translation->StoreLiteral(src_index);
- } else {
- UNREACHABLE();
- }
-}
-
-
-void LCodeGen::CallCodeGeneric(Handle<Code> code,
- RelocInfo::Mode mode,
- LInstruction* instr,
- SafepointMode safepoint_mode,
- int argc) {
- EnsureSpaceForLazyDeopt(Deoptimizer::patch_size() - masm()->CallSize(code));
- ASSERT(instr != NULL);
- LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
- __ call(code, mode);
- RecordSafepointWithLazyDeopt(instr, safepoint_mode, argc);
-
- // Signal that we don't inline smi code before these stubs in the
- // optimizing code generator.
- if (code->kind() == Code::BINARY_OP_IC ||
- code->kind() == Code::COMPARE_IC) {
- __ nop();
- }
-}
-
-
-void LCodeGen::CallCode(Handle<Code> code,
- RelocInfo::Mode mode,
- LInstruction* instr) {
- CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT, 0);
-}
-
-
-void LCodeGen::CallRuntime(const Runtime::Function* function,
- int num_arguments,
- LInstruction* instr) {
- ASSERT(instr != NULL);
- ASSERT(instr->HasPointerMap());
- LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
-
- __ CallRuntime(function, num_arguments);
- RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT, 0);
-}
-
-
-void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
- int argc,
- LInstruction* instr) {
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- __ CallRuntimeSaveDoubles(id);
- RecordSafepointWithRegisters(
- instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
-}
-
-
-void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
- Safepoint::DeoptMode mode) {
- if (!environment->HasBeenRegistered()) {
- // Physical stack frame layout:
- // -x ............. -4 0 ..................................... y
- // [incoming arguments] [spill slots] [pushed outgoing arguments]
-
- // Layout of the environment:
- // 0 ..................................................... size-1
- // [parameters] [locals] [expression stack including arguments]
-
- // Layout of the translation:
- // 0 ........................................................ size - 1 + 4
- // [expression stack including arguments] [locals] [4 words] [parameters]
- // |>------------ translation_size ------------<|
-
- int frame_count = 0;
- int jsframe_count = 0;
- int args_index = 0;
- int args_count = 0;
- for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
- ++frame_count;
- if (e->frame_type() == JS_FUNCTION) {
- ++jsframe_count;
- }
- }
- Translation translation(&translations_, frame_count, jsframe_count, zone());
- WriteTranslation(environment, &translation, &args_index, &args_count);
- int deoptimization_index = deoptimizations_.length();
- int pc_offset = masm()->pc_offset();
- environment->Register(deoptimization_index,
- translation.index(),
- (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
- deoptimizations_.Add(environment, environment->zone());
- }
-}
-
-
-void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
- RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
- ASSERT(environment->HasBeenRegistered());
- int id = environment->deoptimization_index();
- ASSERT(info()->IsOptimizing() || info()->IsStub());
- Deoptimizer::BailoutType bailout_type = info()->IsStub()
- ? Deoptimizer::LAZY
- : Deoptimizer::EAGER;
- Address entry =
- Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
- if (entry == NULL) {
- Abort("bailout was not prepared");
- return;
- }
-
- ASSERT(FLAG_deopt_every_n_times == 0); // Not yet implemented on x64.
-
- if (FLAG_trap_on_deopt) {
- Label done;
- if (cc != no_condition) {
- __ j(NegateCondition(cc), &done, Label::kNear);
- }
- __ int3();
- __ bind(&done);
- }
-
- ASSERT(info()->IsStub() || frame_is_built_);
- bool needs_lazy_deopt = info()->IsStub();
- if (cc == no_condition && frame_is_built_) {
- if (needs_lazy_deopt) {
- __ Call(entry, RelocInfo::RUNTIME_ENTRY);
- } else {
- __ Jump(entry, RelocInfo::RUNTIME_ENTRY);
- }
- } else {
- // We often have several deopts to the same entry, reuse the last
- // jump entry if this is the case.
- if (jump_table_.is_empty() ||
- jump_table_.last().address != entry ||
- jump_table_.last().needs_frame != !frame_is_built_ ||
- jump_table_.last().is_lazy_deopt != needs_lazy_deopt) {
- JumpTableEntry table_entry(entry, !frame_is_built_, needs_lazy_deopt);
- jump_table_.Add(table_entry, zone());
- }
- if (cc == no_condition) {
- __ jmp(&jump_table_.last().label);
- } else {
- __ j(cc, &jump_table_.last().label);
- }
- }
-}
-
-
-void LCodeGen::RegisterDependentCodeForEmbeddedMaps(Handle<Code> code) {
- ZoneList<Handle<Map> > maps(1, zone());
- int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
- for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
- RelocInfo::Mode mode = it.rinfo()->rmode();
- if (mode == RelocInfo::EMBEDDED_OBJECT &&
- it.rinfo()->target_object()->IsMap()) {
- Handle<Map> map(Map::cast(it.rinfo()->target_object()));
- if (map->CanTransition()) {
- maps.Add(map, zone());
- }
- }
- }
-#ifdef VERIFY_HEAP
- // This disables verification of weak embedded maps after full GC.
- // AddDependentCode can cause a GC, which would observe the state where
- // this code is not yet in the depended code lists of the embedded maps.
- NoWeakEmbeddedMapsVerificationScope disable_verification_of_embedded_maps;
-#endif
- for (int i = 0; i < maps.length(); i++) {
- maps.at(i)->AddDependentCode(DependentCode::kWeaklyEmbeddedGroup, code);
- }
-}
-
-
-void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
- int length = deoptimizations_.length();
- if (length == 0) return;
- Handle<DeoptimizationInputData> data =
- factory()->NewDeoptimizationInputData(length, TENURED);
-
- Handle<ByteArray> translations = translations_.CreateByteArray();
- data->SetTranslationByteArray(*translations);
- data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
-
- Handle<FixedArray> literals =
- factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
- for (int i = 0; i < deoptimization_literals_.length(); i++) {
- literals->set(i, *deoptimization_literals_[i]);
- }
- data->SetLiteralArray(*literals);
-
- data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
- data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
-
- // Populate the deoptimization entries.
- for (int i = 0; i < length; i++) {
- LEnvironment* env = deoptimizations_[i];
- data->SetAstId(i, env->ast_id());
- data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
- data->SetArgumentsStackHeight(i,
- Smi::FromInt(env->arguments_stack_height()));
- data->SetPc(i, Smi::FromInt(env->pc_offset()));
- }
- code->set_deoptimization_data(*data);
-}
-
-
-int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
- int result = deoptimization_literals_.length();
- for (int i = 0; i < deoptimization_literals_.length(); ++i) {
- if (deoptimization_literals_[i].is_identical_to(literal)) return i;
- }
- deoptimization_literals_.Add(literal, zone());
- return result;
-}
-
-
-void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
- ASSERT(deoptimization_literals_.length() == 0);
-
- const ZoneList<Handle<JSFunction> >* inlined_closures =
- chunk()->inlined_closures();
-
- for (int i = 0, length = inlined_closures->length();
- i < length;
- i++) {
- DefineDeoptimizationLiteral(inlined_closures->at(i));
- }
-
- inlined_function_count_ = deoptimization_literals_.length();
-}
-
-
-void LCodeGen::RecordSafepointWithLazyDeopt(
- LInstruction* instr, SafepointMode safepoint_mode, int argc) {
- if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
- RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
- } else {
- ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS);
- RecordSafepointWithRegisters(
- instr->pointer_map(), argc, Safepoint::kLazyDeopt);
- }
-}
-
-
-void LCodeGen::RecordSafepoint(
- LPointerMap* pointers,
- Safepoint::Kind kind,
- int arguments,
- Safepoint::DeoptMode deopt_mode) {
- ASSERT(kind == expected_safepoint_kind_);
-
- const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
-
- Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
- kind, arguments, deopt_mode);
- for (int i = 0; i < operands->length(); i++) {
- LOperand* pointer = operands->at(i);
- if (pointer->IsStackSlot()) {
- safepoint.DefinePointerSlot(pointer->index(), zone());
- } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
- safepoint.DefinePointerRegister(ToRegister(pointer), zone());
- }
- }
- if (kind & Safepoint::kWithRegisters) {
- // Register rsi always contains a pointer to the context.
- safepoint.DefinePointerRegister(rsi, zone());
- }
-}
-
-
-void LCodeGen::RecordSafepoint(LPointerMap* pointers,
- Safepoint::DeoptMode deopt_mode) {
- RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
-}
-
-
-void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
- LPointerMap empty_pointers(RelocInfo::kNoPosition, zone());
- RecordSafepoint(&empty_pointers, deopt_mode);
-}
-
-
-void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
- int arguments,
- Safepoint::DeoptMode deopt_mode) {
- RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
-}
-
-
-void LCodeGen::RecordPosition(int position) {
- if (position == RelocInfo::kNoPosition) return;
- masm()->positions_recorder()->RecordPosition(position);
-}
-
-
-void LCodeGen::DoLabel(LLabel* label) {
- if (label->is_loop_header()) {
- Comment(";;; B%d - LOOP entry", label->block_id());
- } else {
- Comment(";;; B%d", label->block_id());
- }
- __ bind(label->label());
- current_block_ = label->block_id();
- DoGap(label);
-}
-
-
-void LCodeGen::DoParallelMove(LParallelMove* move) {
- resolver_.Resolve(move);
-}
-
-
-void LCodeGen::DoGap(LGap* gap) {
- for (int i = LGap::FIRST_INNER_POSITION;
- i <= LGap::LAST_INNER_POSITION;
- i++) {
- LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
- LParallelMove* move = gap->GetParallelMove(inner_pos);
- if (move != NULL) DoParallelMove(move);
- }
-}
-
-
-void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
- DoGap(instr);
-}
-
-
-void LCodeGen::DoParameter(LParameter* instr) {
- // Nothing to do.
-}
-
-
-void LCodeGen::DoCallStub(LCallStub* instr) {
- ASSERT(ToRegister(instr->result()).is(rax));
- switch (instr->hydrogen()->major_key()) {
- case CodeStub::RegExpConstructResult: {
- RegExpConstructResultStub stub;
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- break;
- }
- case CodeStub::RegExpExec: {
- RegExpExecStub stub;
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- break;
- }
- case CodeStub::SubString: {
- SubStringStub stub;
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- break;
- }
- case CodeStub::NumberToString: {
- NumberToStringStub stub;
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- break;
- }
- case CodeStub::StringAdd: {
- StringAddStub stub(NO_STRING_ADD_FLAGS);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- break;
- }
- case CodeStub::StringCompare: {
- StringCompareStub stub;
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- break;
- }
- case CodeStub::TranscendentalCache: {
- TranscendentalCacheStub stub(instr->transcendental_type(),
- TranscendentalCacheStub::TAGGED);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- break;
- }
- default:
- UNREACHABLE();
- }
-}
-
-
-void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
- // Nothing to do.
-}
-
-
-void LCodeGen::DoModI(LModI* instr) {
- if (instr->hydrogen()->HasPowerOf2Divisor()) {
- Register dividend = ToRegister(instr->left());
-
- int32_t divisor =
- HConstant::cast(instr->hydrogen()->right())->Integer32Value();
-
- if (divisor < 0) divisor = -divisor;
-
- Label positive_dividend, done;
- __ testl(dividend, dividend);
- __ j(not_sign, &positive_dividend, Label::kNear);
- __ negl(dividend);
- __ andl(dividend, Immediate(divisor - 1));
- __ negl(dividend);
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ j(not_zero, &done, Label::kNear);
- DeoptimizeIf(no_condition, instr->environment());
- } else {
- __ jmp(&done, Label::kNear);
- }
- __ bind(&positive_dividend);
- __ andl(dividend, Immediate(divisor - 1));
- __ bind(&done);
- } else {
- Label done, remainder_eq_dividend, slow, do_subtraction, both_positive;
- Register left_reg = ToRegister(instr->left());
- Register right_reg = ToRegister(instr->right());
- Register result_reg = ToRegister(instr->result());
-
- ASSERT(left_reg.is(rax));
- ASSERT(result_reg.is(rdx));
- ASSERT(!right_reg.is(rax));
- ASSERT(!right_reg.is(rdx));
-
- // Check for x % 0.
- if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
- __ testl(right_reg, right_reg);
- DeoptimizeIf(zero, instr->environment());
- }
-
- __ testl(left_reg, left_reg);
- __ j(zero, &remainder_eq_dividend, Label::kNear);
- __ j(sign, &slow, Label::kNear);
-
- __ testl(right_reg, right_reg);
- __ j(not_sign, &both_positive, Label::kNear);
- // The sign of the divisor doesn't matter.
- __ neg(right_reg);
-
- __ bind(&both_positive);
- // If the dividend is smaller than the nonnegative
- // divisor, the dividend is the result.
- __ cmpl(left_reg, right_reg);
- __ j(less, &remainder_eq_dividend, Label::kNear);
-
- // Check if the divisor is a PowerOfTwo integer.
- Register scratch = ToRegister(instr->temp());
- __ movl(scratch, right_reg);
- __ subl(scratch, Immediate(1));
- __ testl(scratch, right_reg);
- __ j(not_zero, &do_subtraction, Label::kNear);
- __ andl(left_reg, scratch);
- __ jmp(&remainder_eq_dividend, Label::kNear);
-
- __ bind(&do_subtraction);
- const int kUnfolds = 3;
- // Try a few subtractions of the dividend.
- __ movl(scratch, left_reg);
- for (int i = 0; i < kUnfolds; i++) {
- // Reduce the dividend by the divisor.
- __ subl(left_reg, right_reg);
- // Check if the dividend is less than the divisor.
- __ cmpl(left_reg, right_reg);
- __ j(less, &remainder_eq_dividend, Label::kNear);
- }
- __ movl(left_reg, scratch);
-
- // Slow case, using idiv instruction.
- __ bind(&slow);
-
- // Check for (kMinInt % -1).
- if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- Label left_not_min_int;
- __ cmpl(left_reg, Immediate(kMinInt));
- __ j(not_zero, &left_not_min_int, Label::kNear);
- __ cmpl(right_reg, Immediate(-1));
- DeoptimizeIf(zero, instr->environment());
- __ bind(&left_not_min_int);
- }
-
- // Sign extend eax to edx.
- // (We are using only the low 32 bits of the values.)
- __ cdq();
-
- // Check for (0 % -x) that will produce negative zero.
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- Label positive_left;
- Label done;
- __ testl(left_reg, left_reg);
- __ j(not_sign, &positive_left, Label::kNear);
- __ idivl(right_reg);
-
- // Test the remainder for 0, because then the result would be -0.
- __ testl(result_reg, result_reg);
- __ j(not_zero, &done, Label::kNear);
-
- DeoptimizeIf(no_condition, instr->environment());
- __ bind(&positive_left);
- __ idivl(right_reg);
- __ bind(&done);
- } else {
- __ idivl(right_reg);
- }
- __ jmp(&done, Label::kNear);
-
- __ bind(&remainder_eq_dividend);
- __ movl(result_reg, left_reg);
-
- __ bind(&done);
- }
-}
-
-
-void LCodeGen::DoMathFloorOfDiv(LMathFloorOfDiv* instr) {
- ASSERT(instr->right()->IsConstantOperand());
-
- const Register dividend = ToRegister(instr->left());
- int32_t divisor = ToInteger32(LConstantOperand::cast(instr->right()));
- const Register result = ToRegister(instr->result());
-
- switch (divisor) {
- case 0:
- DeoptimizeIf(no_condition, instr->environment());
- return;
-
- case 1:
- if (!result.is(dividend)) {
- __ movl(result, dividend);
- }
- return;
-
- case -1:
- if (!result.is(dividend)) {
- __ movl(result, dividend);
- }
- __ negl(result);
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(zero, instr->environment());
- }
- if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- DeoptimizeIf(overflow, instr->environment());
- }
- return;
- }
-
- uint32_t divisor_abs = abs(divisor);
- if (IsPowerOf2(divisor_abs)) {
- int32_t power = WhichPowerOf2(divisor_abs);
- if (divisor < 0) {
- __ movsxlq(result, dividend);
- __ neg(result);
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(zero, instr->environment());
- }
- __ sar(result, Immediate(power));
- } else {
- if (!result.is(dividend)) {
- __ movl(result, dividend);
- }
- __ sarl(result, Immediate(power));
- }
- } else {
- Register reg1 = ToRegister(instr->temp());
- Register reg2 = ToRegister(instr->result());
-
- // Find b which: 2^b < divisor_abs < 2^(b+1).
- unsigned b = 31 - CompilerIntrinsics::CountLeadingZeros(divisor_abs);
- unsigned shift = 32 + b; // Precision +1bit (effectively).
- double multiplier_f =
- static_cast<double>(static_cast<uint64_t>(1) << shift) / divisor_abs;
- int64_t multiplier;
- if (multiplier_f - floor(multiplier_f) < 0.5) {
- multiplier = static_cast<int64_t>(floor(multiplier_f));
- } else {
- multiplier = static_cast<int64_t>(floor(multiplier_f)) + 1;
- }
- // The multiplier is a uint32.
- ASSERT(multiplier > 0 &&
- multiplier < (static_cast<int64_t>(1) << 32));
- // The multiply is int64, so sign-extend to r64.
- __ movsxlq(reg1, dividend);
- if (divisor < 0 &&
- instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ neg(reg1);
- DeoptimizeIf(zero, instr->environment());
- }
- __ movq(reg2, multiplier, RelocInfo::NONE64);
- // Result just fit in r64, because it's int32 * uint32.
- __ imul(reg2, reg1);
-
- __ addq(reg2, Immediate(1 << 30));
- __ sar(reg2, Immediate(shift));
- }
-}
-
-
-void LCodeGen::DoDivI(LDivI* instr) {
- if (!instr->is_flooring() && instr->hydrogen()->HasPowerOf2Divisor()) {
- Register dividend = ToRegister(instr->left());
- int32_t divisor =
- HConstant::cast(instr->hydrogen()->right())->Integer32Value();
- int32_t test_value = 0;
- int32_t power = 0;
-
- if (divisor > 0) {
- test_value = divisor - 1;
- power = WhichPowerOf2(divisor);
- } else {
- // Check for (0 / -x) that will produce negative zero.
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ testl(dividend, dividend);
- DeoptimizeIf(zero, instr->environment());
- }
- // Check for (kMinInt / -1).
- if (divisor == -1 && instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- __ cmpl(dividend, Immediate(kMinInt));
- DeoptimizeIf(zero, instr->environment());
- }
- test_value = - divisor - 1;
- power = WhichPowerOf2(-divisor);
- }
-
- if (test_value != 0) {
- // Deoptimize if remainder is not 0.
- __ testl(dividend, Immediate(test_value));
- DeoptimizeIf(not_zero, instr->environment());
- __ sarl(dividend, Immediate(power));
- }
-
- if (divisor < 0) __ negl(dividend);
-
- return;
- }
-
- LOperand* right = instr->right();
- ASSERT(ToRegister(instr->result()).is(rax));
- ASSERT(ToRegister(instr->left()).is(rax));
- ASSERT(!ToRegister(instr->right()).is(rax));
- ASSERT(!ToRegister(instr->right()).is(rdx));
-
- Register left_reg = rax;
-
- // Check for x / 0.
- Register right_reg = ToRegister(right);
- if (instr->hydrogen_value()->CheckFlag(HValue::kCanBeDivByZero)) {
- __ testl(right_reg, right_reg);
- DeoptimizeIf(zero, instr->environment());
- }
-
- // Check for (0 / -x) that will produce negative zero.
- if (instr->hydrogen_value()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- Label left_not_zero;
- __ testl(left_reg, left_reg);
- __ j(not_zero, &left_not_zero, Label::kNear);
- __ testl(right_reg, right_reg);
- DeoptimizeIf(sign, instr->environment());
- __ bind(&left_not_zero);
- }
-
- // Check for (kMinInt / -1).
- if (instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow)) {
- Label left_not_min_int;
- __ cmpl(left_reg, Immediate(kMinInt));
- __ j(not_zero, &left_not_min_int, Label::kNear);
- __ cmpl(right_reg, Immediate(-1));
- DeoptimizeIf(zero, instr->environment());
- __ bind(&left_not_min_int);
- }
-
- // Sign extend to rdx.
- __ cdq();
- __ idivl(right_reg);
-
- if (!instr->is_flooring()) {
- // Deoptimize if remainder is not 0.
- __ testl(rdx, rdx);
- DeoptimizeIf(not_zero, instr->environment());
- } else {
- Label done;
- __ testl(rdx, rdx);
- __ j(zero, &done, Label::kNear);
- __ xorl(rdx, right_reg);
- __ sarl(rdx, Immediate(31));
- __ addl(rax, rdx);
- __ bind(&done);
- }
-}
-
-
-void LCodeGen::DoMulI(LMulI* instr) {
- Register left = ToRegister(instr->left());
- LOperand* right = instr->right();
-
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ movl(kScratchRegister, left);
- }
-
- bool can_overflow =
- instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
- if (right->IsConstantOperand()) {
- int right_value = ToInteger32(LConstantOperand::cast(right));
- if (right_value == -1) {
- __ negl(left);
- } else if (right_value == 0) {
- __ xorl(left, left);
- } else if (right_value == 2) {
- __ addl(left, left);
- } else if (!can_overflow) {
- // If the multiplication is known to not overflow, we
- // can use operations that don't set the overflow flag
- // correctly.
- switch (right_value) {
- case 1:
- // Do nothing.
- break;
- case 3:
- __ leal(left, Operand(left, left, times_2, 0));
- break;
- case 4:
- __ shll(left, Immediate(2));
- break;
- case 5:
- __ leal(left, Operand(left, left, times_4, 0));
- break;
- case 8:
- __ shll(left, Immediate(3));
- break;
- case 9:
- __ leal(left, Operand(left, left, times_8, 0));
- break;
- case 16:
- __ shll(left, Immediate(4));
- break;
- default:
- __ imull(left, left, Immediate(right_value));
- break;
- }
- } else {
- __ imull(left, left, Immediate(right_value));
- }
- } else if (right->IsStackSlot()) {
- __ imull(left, ToOperand(right));
- } else {
- __ imull(left, ToRegister(right));
- }
-
- if (can_overflow) {
- DeoptimizeIf(overflow, instr->environment());
- }
-
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- // Bail out if the result is supposed to be negative zero.
- Label done;
- __ testl(left, left);
- __ j(not_zero, &done, Label::kNear);
- if (right->IsConstantOperand()) {
- if (ToInteger32(LConstantOperand::cast(right)) < 0) {
- DeoptimizeIf(no_condition, instr->environment());
- } else if (ToInteger32(LConstantOperand::cast(right)) == 0) {
- __ cmpl(kScratchRegister, Immediate(0));
- DeoptimizeIf(less, instr->environment());
- }
- } else if (right->IsStackSlot()) {
- __ orl(kScratchRegister, ToOperand(right));
- DeoptimizeIf(sign, instr->environment());
- } else {
- // Test the non-zero operand for negative sign.
- __ orl(kScratchRegister, ToRegister(right));
- DeoptimizeIf(sign, instr->environment());
- }
- __ bind(&done);
- }
-}
-
-
-void LCodeGen::DoBitI(LBitI* instr) {
- LOperand* left = instr->left();
- LOperand* right = instr->right();
- ASSERT(left->Equals(instr->result()));
- ASSERT(left->IsRegister());
-
- if (right->IsConstantOperand()) {
- int right_operand = ToInteger32(LConstantOperand::cast(right));
- switch (instr->op()) {
- case Token::BIT_AND:
- __ andl(ToRegister(left), Immediate(right_operand));
- break;
- case Token::BIT_OR:
- __ orl(ToRegister(left), Immediate(right_operand));
- break;
- case Token::BIT_XOR:
- __ xorl(ToRegister(left), Immediate(right_operand));
- break;
- default:
- UNREACHABLE();
- break;
- }
- } else if (right->IsStackSlot()) {
- switch (instr->op()) {
- case Token::BIT_AND:
- __ andl(ToRegister(left), ToOperand(right));
- break;
- case Token::BIT_OR:
- __ orl(ToRegister(left), ToOperand(right));
- break;
- case Token::BIT_XOR:
- __ xorl(ToRegister(left), ToOperand(right));
- break;
- default:
- UNREACHABLE();
- break;
- }
- } else {
- ASSERT(right->IsRegister());
- switch (instr->op()) {
- case Token::BIT_AND:
- __ andl(ToRegister(left), ToRegister(right));
- break;
- case Token::BIT_OR:
- __ orl(ToRegister(left), ToRegister(right));
- break;
- case Token::BIT_XOR:
- __ xorl(ToRegister(left), ToRegister(right));
- break;
- default:
- UNREACHABLE();
- break;
- }
- }
-}
-
-
-void LCodeGen::DoShiftI(LShiftI* instr) {
- LOperand* left = instr->left();
- LOperand* right = instr->right();
- ASSERT(left->Equals(instr->result()));
- ASSERT(left->IsRegister());
- if (right->IsRegister()) {
- ASSERT(ToRegister(right).is(rcx));
-
- switch (instr->op()) {
- case Token::ROR:
- __ rorl_cl(ToRegister(left));
- break;
- case Token::SAR:
- __ sarl_cl(ToRegister(left));
- break;
- case Token::SHR:
- __ shrl_cl(ToRegister(left));
- if (instr->can_deopt()) {
- __ testl(ToRegister(left), ToRegister(left));
- DeoptimizeIf(negative, instr->environment());
- }
- break;
- case Token::SHL:
- __ shll_cl(ToRegister(left));
- break;
- default:
- UNREACHABLE();
- break;
- }
- } else {
- int value = ToInteger32(LConstantOperand::cast(right));
- uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
- switch (instr->op()) {
- case Token::ROR:
- if (shift_count != 0) {
- __ rorl(ToRegister(left), Immediate(shift_count));
- }
- break;
- case Token::SAR:
- if (shift_count != 0) {
- __ sarl(ToRegister(left), Immediate(shift_count));
- }
- break;
- case Token::SHR:
- if (shift_count == 0 && instr->can_deopt()) {
- __ testl(ToRegister(left), ToRegister(left));
- DeoptimizeIf(negative, instr->environment());
- } else {
- __ shrl(ToRegister(left), Immediate(shift_count));
- }
- break;
- case Token::SHL:
- if (shift_count != 0) {
- __ shll(ToRegister(left), Immediate(shift_count));
- }
- break;
- default:
- UNREACHABLE();
- break;
- }
- }
-}
-
-
-void LCodeGen::DoSubI(LSubI* instr) {
- LOperand* left = instr->left();
- LOperand* right = instr->right();
- ASSERT(left->Equals(instr->result()));
-
- if (right->IsConstantOperand()) {
- __ subl(ToRegister(left),
- Immediate(ToInteger32(LConstantOperand::cast(right))));
- } else if (right->IsRegister()) {
- __ subl(ToRegister(left), ToRegister(right));
- } else {
- __ subl(ToRegister(left), ToOperand(right));
- }
-
- if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- DeoptimizeIf(overflow, instr->environment());
- }
-}
-
-
-void LCodeGen::DoConstantI(LConstantI* instr) {
- ASSERT(instr->result()->IsRegister());
- __ Set(ToRegister(instr->result()), instr->value());
-}
-
-
-void LCodeGen::DoConstantD(LConstantD* instr) {
- ASSERT(instr->result()->IsDoubleRegister());
- XMMRegister res = ToDoubleRegister(instr->result());
- double v = instr->value();
- uint64_t int_val = BitCast<uint64_t, double>(v);
- // Use xor to produce +0.0 in a fast and compact way, but avoid to
- // do so if the constant is -0.0.
- if (int_val == 0) {
- __ xorps(res, res);
- } else {
- Register tmp = ToRegister(instr->temp());
- __ Set(tmp, int_val);
- __ movq(res, tmp);
- }
-}
-
-
-void LCodeGen::DoConstantT(LConstantT* instr) {
- Handle<Object> value = instr->value();
- if (value->IsSmi()) {
- __ Move(ToRegister(instr->result()), value);
- } else {
- __ LoadHeapObject(ToRegister(instr->result()),
- Handle<HeapObject>::cast(value));
- }
-}
-
-
-void LCodeGen::DoJSArrayLength(LJSArrayLength* instr) {
- Register result = ToRegister(instr->result());
- Register array = ToRegister(instr->value());
- __ movq(result, FieldOperand(array, JSArray::kLengthOffset));
-}
-
-
-void LCodeGen::DoFixedArrayBaseLength(LFixedArrayBaseLength* instr) {
- Register result = ToRegister(instr->result());
- Register array = ToRegister(instr->value());
- __ movq(result, FieldOperand(array, FixedArrayBase::kLengthOffset));
-}
-
-
-void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
- Register result = ToRegister(instr->result());
- Register map = ToRegister(instr->value());
- __ EnumLength(result, map);
-}
-
-
-void LCodeGen::DoElementsKind(LElementsKind* instr) {
- Register result = ToRegister(instr->result());
- Register input = ToRegister(instr->value());
-
- // Load map into |result|.
- __ movq(result, FieldOperand(input, HeapObject::kMapOffset));
- // Load the map's "bit field 2" into |result|. We only need the first byte.
- __ movzxbq(result, FieldOperand(result, Map::kBitField2Offset));
- // Retrieve elements_kind from bit field 2.
- __ and_(result, Immediate(Map::kElementsKindMask));
- __ shr(result, Immediate(Map::kElementsKindShift));
-}
-
-
-void LCodeGen::DoValueOf(LValueOf* instr) {
- Register input = ToRegister(instr->value());
- Register result = ToRegister(instr->result());
- ASSERT(input.is(result));
- Label done;
- // If the object is a smi return the object.
- __ JumpIfSmi(input, &done, Label::kNear);
-
- // If the object is not a value type, return the object.
- __ CmpObjectType(input, JS_VALUE_TYPE, kScratchRegister);
- __ j(not_equal, &done, Label::kNear);
- __ movq(result, FieldOperand(input, JSValue::kValueOffset));
-
- __ bind(&done);
-}
-
-
-void LCodeGen::DoDateField(LDateField* instr) {
- Register object = ToRegister(instr->date());
- Register result = ToRegister(instr->result());
- Smi* index = instr->index();
- Label runtime, done, not_date_object;
- ASSERT(object.is(result));
- ASSERT(object.is(rax));
-
- Condition cc = masm()->CheckSmi(object);
- DeoptimizeIf(cc, instr->environment());
- __ CmpObjectType(object, JS_DATE_TYPE, kScratchRegister);
- DeoptimizeIf(not_equal, instr->environment());
-
- if (index->value() == 0) {
- __ movq(result, FieldOperand(object, JSDate::kValueOffset));
- } else {
- if (index->value() < JSDate::kFirstUncachedField) {
- ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
- __ movq(kScratchRegister, stamp);
- __ cmpq(kScratchRegister, FieldOperand(object,
- JSDate::kCacheStampOffset));
- __ j(not_equal, &runtime, Label::kNear);
- __ movq(result, FieldOperand(object, JSDate::kValueOffset +
- kPointerSize * index->value()));
- __ jmp(&done);
- }
- __ bind(&runtime);
- __ PrepareCallCFunction(2);
-#ifdef _WIN64
- __ movq(rcx, object);
- __ movq(rdx, index, RelocInfo::NONE64);
-#else
- __ movq(rdi, object);
- __ movq(rsi, index, RelocInfo::NONE64);
-#endif
- __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- __ bind(&done);
- }
-}
-
-
-void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
- SeqStringSetCharGenerator::Generate(masm(),
- instr->encoding(),
- ToRegister(instr->string()),
- ToRegister(instr->index()),
- ToRegister(instr->value()));
-}
-
-
-void LCodeGen::DoBitNotI(LBitNotI* instr) {
- LOperand* input = instr->value();
- ASSERT(input->Equals(instr->result()));
- __ not_(ToRegister(input));
-}
-
-
-void LCodeGen::DoThrow(LThrow* instr) {
- __ push(ToRegister(instr->value()));
- CallRuntime(Runtime::kThrow, 1, instr);
-
- if (FLAG_debug_code) {
- Comment("Unreachable code.");
- __ int3();
- }
-}
-
-
-void LCodeGen::DoAddI(LAddI* instr) {
- LOperand* left = instr->left();
- LOperand* right = instr->right();
- ASSERT(left->Equals(instr->result()));
-
- if (right->IsConstantOperand()) {
- __ addl(ToRegister(left),
- Immediate(ToInteger32(LConstantOperand::cast(right))));
- } else if (right->IsRegister()) {
- __ addl(ToRegister(left), ToRegister(right));
- } else {
- __ addl(ToRegister(left), ToOperand(right));
- }
-
- if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- DeoptimizeIf(overflow, instr->environment());
- }
-}
-
-
-void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
- LOperand* left = instr->left();
- LOperand* right = instr->right();
- ASSERT(left->Equals(instr->result()));
- HMathMinMax::Operation operation = instr->hydrogen()->operation();
- if (instr->hydrogen()->representation().IsInteger32()) {
- Label return_left;
- Condition condition = (operation == HMathMinMax::kMathMin)
- ? less_equal
- : greater_equal;
- Register left_reg = ToRegister(left);
- if (right->IsConstantOperand()) {
- Immediate right_imm =
- Immediate(ToInteger32(LConstantOperand::cast(right)));
- __ cmpl(left_reg, right_imm);
- __ j(condition, &return_left, Label::kNear);
- __ movq(left_reg, right_imm);
- } else if (right->IsRegister()) {
- Register right_reg = ToRegister(right);
- __ cmpl(left_reg, right_reg);
- __ j(condition, &return_left, Label::kNear);
- __ movq(left_reg, right_reg);
- } else {
- Operand right_op = ToOperand(right);
- __ cmpl(left_reg, right_op);
- __ j(condition, &return_left, Label::kNear);
- __ movq(left_reg, right_op);
- }
- __ bind(&return_left);
- } else {
- ASSERT(instr->hydrogen()->representation().IsDouble());
- Label check_nan_left, check_zero, return_left, return_right;
- Condition condition = (operation == HMathMinMax::kMathMin) ? below : above;
- XMMRegister left_reg = ToDoubleRegister(left);
- XMMRegister right_reg = ToDoubleRegister(right);
- __ ucomisd(left_reg, right_reg);
- __ j(parity_even, &check_nan_left, Label::kNear); // At least one NaN.
- __ j(equal, &check_zero, Label::kNear); // left == right.
- __ j(condition, &return_left, Label::kNear);
- __ jmp(&return_right, Label::kNear);
-
- __ bind(&check_zero);
- XMMRegister xmm_scratch = xmm0;
- __ xorps(xmm_scratch, xmm_scratch);
- __ ucomisd(left_reg, xmm_scratch);
- __ j(not_equal, &return_left, Label::kNear); // left == right != 0.
- // At this point, both left and right are either 0 or -0.
- if (operation == HMathMinMax::kMathMin) {
- __ orpd(left_reg, right_reg);
- } else {
- // Since we operate on +0 and/or -0, addsd and andsd have the same effect.
- __ addsd(left_reg, right_reg);
- }
- __ jmp(&return_left, Label::kNear);
-
- __ bind(&check_nan_left);
- __ ucomisd(left_reg, left_reg); // NaN check.
- __ j(parity_even, &return_left, Label::kNear);
- __ bind(&return_right);
- __ movsd(left_reg, right_reg);
-
- __ bind(&return_left);
- }
-}
-
-
-void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
- XMMRegister left = ToDoubleRegister(instr->left());
- XMMRegister right = ToDoubleRegister(instr->right());
- XMMRegister result = ToDoubleRegister(instr->result());
- // All operations except MOD are computed in-place.
- ASSERT(instr->op() == Token::MOD || left.is(result));
- switch (instr->op()) {
- case Token::ADD:
- __ addsd(left, right);
- break;
- case Token::SUB:
- __ subsd(left, right);
- break;
- case Token::MUL:
- __ mulsd(left, right);
- break;
- case Token::DIV:
- __ divsd(left, right);
- __ movaps(left, left);
- break;
- case Token::MOD:
- __ PrepareCallCFunction(2);
- __ movaps(xmm0, left);
- ASSERT(right.is(xmm1));
- __ CallCFunction(
- ExternalReference::double_fp_operation(Token::MOD, isolate()), 2);
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- __ movaps(result, xmm0);
- break;
- default:
- UNREACHABLE();
- break;
- }
-}
-
-
-void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
- ASSERT(ToRegister(instr->left()).is(rdx));
- ASSERT(ToRegister(instr->right()).is(rax));
- ASSERT(ToRegister(instr->result()).is(rax));
-
- BinaryOpStub stub(instr->op(), NO_OVERWRITE);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- __ nop(); // Signals no inlined code.
-}
-
-
-int LCodeGen::GetNextEmittedBlock(int block) {
- for (int i = block + 1; i < graph()->blocks()->length(); ++i) {
- LLabel* label = chunk_->GetLabel(i);
- if (!label->HasReplacement()) return i;
- }
- return -1;
-}
-
-
-void LCodeGen::EmitBranch(int left_block, int right_block, Condition cc) {
- int next_block = GetNextEmittedBlock(current_block_);
- right_block = chunk_->LookupDestination(right_block);
- left_block = chunk_->LookupDestination(left_block);
-
- if (right_block == left_block) {
- EmitGoto(left_block);
- } else if (left_block == next_block) {
- __ j(NegateCondition(cc), chunk_->GetAssemblyLabel(right_block));
- } else if (right_block == next_block) {
- __ j(cc, chunk_->GetAssemblyLabel(left_block));
- } else {
- __ j(cc, chunk_->GetAssemblyLabel(left_block));
- if (cc != always) {
- __ jmp(chunk_->GetAssemblyLabel(right_block));
- }
- }
-}
-
-
-void LCodeGen::DoBranch(LBranch* instr) {
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- Representation r = instr->hydrogen()->value()->representation();
- if (r.IsInteger32()) {
- Register reg = ToRegister(instr->value());
- __ testl(reg, reg);
- EmitBranch(true_block, false_block, not_zero);
- } else if (r.IsDouble()) {
- XMMRegister reg = ToDoubleRegister(instr->value());
- __ xorps(xmm0, xmm0);
- __ ucomisd(reg, xmm0);
- EmitBranch(true_block, false_block, not_equal);
- } else {
- ASSERT(r.IsTagged());
- Register reg = ToRegister(instr->value());
- HType type = instr->hydrogen()->value()->type();
- if (type.IsBoolean()) {
- __ CompareRoot(reg, Heap::kTrueValueRootIndex);
- EmitBranch(true_block, false_block, equal);
- } else if (type.IsSmi()) {
- __ SmiCompare(reg, Smi::FromInt(0));
- EmitBranch(true_block, false_block, not_equal);
- } else {
- Label* true_label = chunk_->GetAssemblyLabel(true_block);
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
-
- ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
- // Avoid deopts in the case where we've never executed this path before.
- if (expected.IsEmpty()) expected = ToBooleanStub::all_types();
-
- if (expected.Contains(ToBooleanStub::UNDEFINED)) {
- // undefined -> false.
- __ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
- __ j(equal, false_label);
- }
- if (expected.Contains(ToBooleanStub::BOOLEAN)) {
- // true -> true.
- __ CompareRoot(reg, Heap::kTrueValueRootIndex);
- __ j(equal, true_label);
- // false -> false.
- __ CompareRoot(reg, Heap::kFalseValueRootIndex);
- __ j(equal, false_label);
- }
- if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
- // 'null' -> false.
- __ CompareRoot(reg, Heap::kNullValueRootIndex);
- __ j(equal, false_label);
- }
-
- if (expected.Contains(ToBooleanStub::SMI)) {
- // Smis: 0 -> false, all other -> true.
- __ Cmp(reg, Smi::FromInt(0));
- __ j(equal, false_label);
- __ JumpIfSmi(reg, true_label);
- } else if (expected.NeedsMap()) {
- // If we need a map later and have a Smi -> deopt.
- __ testb(reg, Immediate(kSmiTagMask));
- DeoptimizeIf(zero, instr->environment());
- }
-
- const Register map = kScratchRegister;
- if (expected.NeedsMap()) {
- __ movq(map, FieldOperand(reg, HeapObject::kMapOffset));
-
- if (expected.CanBeUndetectable()) {
- // Undetectable -> false.
- __ testb(FieldOperand(map, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsUndetectable));
- __ j(not_zero, false_label);
- }
- }
-
- if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
- // spec object -> true.
- __ CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
- __ j(above_equal, true_label);
- }
-
- if (expected.Contains(ToBooleanStub::STRING)) {
- // String value -> false iff empty.
- Label not_string;
- __ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
- __ j(above_equal, &not_string, Label::kNear);
- __ cmpq(FieldOperand(reg, String::kLengthOffset), Immediate(0));
- __ j(not_zero, true_label);
- __ jmp(false_label);
- __ bind(&not_string);
- }
-
- if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
- // heap number -> false iff +0, -0, or NaN.
- Label not_heap_number;
- __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
- __ j(not_equal, &not_heap_number, Label::kNear);
- __ xorps(xmm0, xmm0);
- __ ucomisd(xmm0, FieldOperand(reg, HeapNumber::kValueOffset));
- __ j(zero, false_label);
- __ jmp(true_label);
- __ bind(&not_heap_number);
- }
-
- // We've seen something for the first time -> deopt.
- DeoptimizeIf(no_condition, instr->environment());
- }
- }
-}
-
-
-void LCodeGen::EmitGoto(int block) {
- block = chunk_->LookupDestination(block);
- int next_block = GetNextEmittedBlock(current_block_);
- if (block != next_block) {
- __ jmp(chunk_->GetAssemblyLabel(block));
- }
-}
-
-
-void LCodeGen::DoGoto(LGoto* instr) {
- EmitGoto(instr->block_id());
-}
-
-
-inline Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
- Condition cond = no_condition;
- switch (op) {
- case Token::EQ:
- case Token::EQ_STRICT:
- cond = equal;
- break;
- case Token::LT:
- cond = is_unsigned ? below : less;
- break;
- case Token::GT:
- cond = is_unsigned ? above : greater;
- break;
- case Token::LTE:
- cond = is_unsigned ? below_equal : less_equal;
- break;
- case Token::GTE:
- cond = is_unsigned ? above_equal : greater_equal;
- break;
- case Token::IN:
- case Token::INSTANCEOF:
- default:
- UNREACHABLE();
- }
- return cond;
-}
-
-
-void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
- LOperand* left = instr->left();
- LOperand* right = instr->right();
- int false_block = chunk_->LookupDestination(instr->false_block_id());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- Condition cc = TokenToCondition(instr->op(), instr->is_double());
-
- if (left->IsConstantOperand() && right->IsConstantOperand()) {
- // We can statically evaluate the comparison.
- double left_val = ToDouble(LConstantOperand::cast(left));
- double right_val = ToDouble(LConstantOperand::cast(right));
- int next_block =
- EvalComparison(instr->op(), left_val, right_val) ? true_block
- : false_block;
- EmitGoto(next_block);
- } else {
- if (instr->is_double()) {
- // Don't base result on EFLAGS when a NaN is involved. Instead
- // jump to the false block.
- __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
- __ j(parity_even, chunk_->GetAssemblyLabel(false_block));
- } else {
- int32_t value;
- if (right->IsConstantOperand()) {
- value = ToInteger32(LConstantOperand::cast(right));
- __ cmpl(ToRegister(left), Immediate(value));
- } else if (left->IsConstantOperand()) {
- value = ToInteger32(LConstantOperand::cast(left));
- if (right->IsRegister()) {
- __ cmpl(ToRegister(right), Immediate(value));
- } else {
- __ cmpl(ToOperand(right), Immediate(value));
- }
- // We transposed the operands. Reverse the condition.
- cc = ReverseCondition(cc);
- } else {
- if (right->IsRegister()) {
- __ cmpl(ToRegister(left), ToRegister(right));
- } else {
- __ cmpl(ToRegister(left), ToOperand(right));
- }
- }
- }
- EmitBranch(true_block, false_block, cc);
- }
-}
-
-
-void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
- Register left = ToRegister(instr->left());
- Register right = ToRegister(instr->right());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
-
- __ cmpq(left, right);
- EmitBranch(true_block, false_block, equal);
-}
-
-
-void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) {
- Register left = ToRegister(instr->left());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- __ cmpq(left, Immediate(instr->hydrogen()->right()));
- EmitBranch(true_block, false_block, equal);
-}
-
-
-void LCodeGen::DoIsNilAndBranch(LIsNilAndBranch* instr) {
- Register reg = ToRegister(instr->value());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- // If the expression is known to be untagged or a smi, then it's definitely
- // not null, and it can't be a an undetectable object.
- if (instr->hydrogen()->representation().IsSpecialization() ||
- instr->hydrogen()->type().IsSmi()) {
- EmitGoto(false_block);
- return;
- }
-
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- Heap::RootListIndex nil_value = instr->nil() == kNullValue ?
- Heap::kNullValueRootIndex :
- Heap::kUndefinedValueRootIndex;
- __ CompareRoot(reg, nil_value);
- if (instr->kind() == kStrictEquality) {
- EmitBranch(true_block, false_block, equal);
- } else {
- Heap::RootListIndex other_nil_value = instr->nil() == kNullValue ?
- Heap::kUndefinedValueRootIndex :
- Heap::kNullValueRootIndex;
- Label* true_label = chunk_->GetAssemblyLabel(true_block);
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
- __ j(equal, true_label);
- __ CompareRoot(reg, other_nil_value);
- __ j(equal, true_label);
- __ JumpIfSmi(reg, false_label);
- // Check for undetectable objects by looking in the bit field in
- // the map. The object has already been smi checked.
- Register scratch = ToRegister(instr->temp());
- __ movq(scratch, FieldOperand(reg, HeapObject::kMapOffset));
- __ testb(FieldOperand(scratch, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsUndetectable));
- EmitBranch(true_block, false_block, not_zero);
- }
-}
-
-
-Condition LCodeGen::EmitIsObject(Register input,
- Label* is_not_object,
- Label* is_object) {
- ASSERT(!input.is(kScratchRegister));
-
- __ JumpIfSmi(input, is_not_object);
-
- __ CompareRoot(input, Heap::kNullValueRootIndex);
- __ j(equal, is_object);
-
- __ movq(kScratchRegister, FieldOperand(input, HeapObject::kMapOffset));
- // Undetectable objects behave like undefined.
- __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsUndetectable));
- __ j(not_zero, is_not_object);
-
- __ movzxbl(kScratchRegister,
- FieldOperand(kScratchRegister, Map::kInstanceTypeOffset));
- __ cmpb(kScratchRegister, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
- __ j(below, is_not_object);
- __ cmpb(kScratchRegister, Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
- return below_equal;
-}
-
-
-void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
- Register reg = ToRegister(instr->value());
-
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
- Label* true_label = chunk_->GetAssemblyLabel(true_block);
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
-
- Condition true_cond = EmitIsObject(reg, false_label, true_label);
-
- EmitBranch(true_block, false_block, true_cond);
-}
-
-
-Condition LCodeGen::EmitIsString(Register input,
- Register temp1,
- Label* is_not_string) {
- __ JumpIfSmi(input, is_not_string);
- Condition cond = masm_->IsObjectStringType(input, temp1, temp1);
-
- return cond;
-}
-
-
-void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
- Register reg = ToRegister(instr->value());
- Register temp = ToRegister(instr->temp());
-
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
-
- Condition true_cond = EmitIsString(reg, temp, false_label);
-
- EmitBranch(true_block, false_block, true_cond);
-}
-
-
-void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- Condition is_smi;
- if (instr->value()->IsRegister()) {
- Register input = ToRegister(instr->value());
- is_smi = masm()->CheckSmi(input);
- } else {
- Operand input = ToOperand(instr->value());
- is_smi = masm()->CheckSmi(input);
- }
- EmitBranch(true_block, false_block, is_smi);
-}
-
-
-void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
- Register input = ToRegister(instr->value());
- Register temp = ToRegister(instr->temp());
-
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- __ JumpIfSmi(input, chunk_->GetAssemblyLabel(false_block));
- __ movq(temp, FieldOperand(input, HeapObject::kMapOffset));
- __ testb(FieldOperand(temp, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsUndetectable));
- EmitBranch(true_block, false_block, not_zero);
-}
-
-
-void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
- Token::Value op = instr->op();
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
-
- Condition condition = TokenToCondition(op, false);
- __ testq(rax, rax);
-
- EmitBranch(true_block, false_block, condition);
-}
-
-
-static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
- InstanceType from = instr->from();
- InstanceType to = instr->to();
- if (from == FIRST_TYPE) return to;
- ASSERT(from == to || to == LAST_TYPE);
- return from;
-}
-
-
-static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
- InstanceType from = instr->from();
- InstanceType to = instr->to();
- if (from == to) return equal;
- if (to == LAST_TYPE) return above_equal;
- if (from == FIRST_TYPE) return below_equal;
- UNREACHABLE();
- return equal;
-}
-
-
-void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
- Register input = ToRegister(instr->value());
-
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
-
- __ JumpIfSmi(input, false_label);
-
- __ CmpObjectType(input, TestType(instr->hydrogen()), kScratchRegister);
- EmitBranch(true_block, false_block, BranchCondition(instr->hydrogen()));
-}
-
-
-void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
- Register input = ToRegister(instr->value());
- Register result = ToRegister(instr->result());
-
- __ AssertString(input);
-
- __ movl(result, FieldOperand(input, String::kHashFieldOffset));
- ASSERT(String::kHashShift >= kSmiTagSize);
- __ IndexFromHash(result, result);
-}
-
-
-void LCodeGen::DoHasCachedArrayIndexAndBranch(
- LHasCachedArrayIndexAndBranch* instr) {
- Register input = ToRegister(instr->value());
-
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- __ testl(FieldOperand(input, String::kHashFieldOffset),
- Immediate(String::kContainsCachedArrayIndexMask));
- EmitBranch(true_block, false_block, equal);
-}
-
-
-// Branches to a label or falls through with the answer in the z flag.
-// Trashes the temp register.
-void LCodeGen::EmitClassOfTest(Label* is_true,
- Label* is_false,
- Handle<String> class_name,
- Register input,
- Register temp,
- Register temp2) {
- ASSERT(!input.is(temp));
- ASSERT(!input.is(temp2));
- ASSERT(!temp.is(temp2));
-
- __ JumpIfSmi(input, is_false);
-
- if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Function"))) {
- // Assuming the following assertions, we can use the same compares to test
- // for both being a function type and being in the object type range.
- STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
- STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
- FIRST_SPEC_OBJECT_TYPE + 1);
- STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
- LAST_SPEC_OBJECT_TYPE - 1);
- STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
- __ CmpObjectType(input, FIRST_SPEC_OBJECT_TYPE, temp);
- __ j(below, is_false);
- __ j(equal, is_true);
- __ CmpInstanceType(temp, LAST_SPEC_OBJECT_TYPE);
- __ j(equal, is_true);
- } else {
- // Faster code path to avoid two compares: subtract lower bound from the
- // actual type and do a signed compare with the width of the type range.
- __ movq(temp, FieldOperand(input, HeapObject::kMapOffset));
- __ movzxbl(temp2, FieldOperand(temp, Map::kInstanceTypeOffset));
- __ subq(temp2, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
- __ cmpq(temp2, Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
- FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
- __ j(above, is_false);
- }
-
- // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
- // Check if the constructor in the map is a function.
- __ movq(temp, FieldOperand(temp, Map::kConstructorOffset));
-
- // Objects with a non-function constructor have class 'Object'.
- __ CmpObjectType(temp, JS_FUNCTION_TYPE, kScratchRegister);
- if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Object"))) {
- __ j(not_equal, is_true);
- } else {
- __ j(not_equal, is_false);
- }
-
- // temp now contains the constructor function. Grab the
- // instance class name from there.
- __ movq(temp, FieldOperand(temp, JSFunction::kSharedFunctionInfoOffset));
- __ movq(temp, FieldOperand(temp,
- SharedFunctionInfo::kInstanceClassNameOffset));
- // The class name we are testing against is internalized since it's a literal.
- // The name in the constructor is internalized because of the way the context
- // is booted. This routine isn't expected to work for random API-created
- // classes and it doesn't have to because you can't access it with natives
- // syntax. Since both sides are internalized it is sufficient to use an
- // identity comparison.
- ASSERT(class_name->IsInternalizedString());
- __ Cmp(temp, class_name);
- // End with the answer in the z flag.
-}
-
-
-void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
- Register input = ToRegister(instr->value());
- Register temp = ToRegister(instr->temp());
- Register temp2 = ToRegister(instr->temp2());
- Handle<String> class_name = instr->hydrogen()->class_name();
-
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- Label* true_label = chunk_->GetAssemblyLabel(true_block);
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
-
- EmitClassOfTest(true_label, false_label, class_name, input, temp, temp2);
-
- EmitBranch(true_block, false_block, equal);
-}
-
-
-void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
- Register reg = ToRegister(instr->value());
- int true_block = instr->true_block_id();
- int false_block = instr->false_block_id();
-
- __ Cmp(FieldOperand(reg, HeapObject::kMapOffset), instr->map());
- EmitBranch(true_block, false_block, equal);
-}
-
-
-void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
- InstanceofStub stub(InstanceofStub::kNoFlags);
- __ push(ToRegister(instr->left()));
- __ push(ToRegister(instr->right()));
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- Label true_value, done;
- __ testq(rax, rax);
- __ j(zero, &true_value, Label::kNear);
- __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
- __ jmp(&done, Label::kNear);
- __ bind(&true_value);
- __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
- __ bind(&done);
-}
-
-
-void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
- class DeferredInstanceOfKnownGlobal: public LDeferredCode {
- public:
- DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
- LInstanceOfKnownGlobal* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() {
- codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
- }
- virtual LInstruction* instr() { return instr_; }
- Label* map_check() { return &map_check_; }
- private:
- LInstanceOfKnownGlobal* instr_;
- Label map_check_;
- };
-
-
- DeferredInstanceOfKnownGlobal* deferred;
- deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr);
-
- Label done, false_result;
- Register object = ToRegister(instr->value());
-
- // A Smi is not an instance of anything.
- __ JumpIfSmi(object, &false_result);
-
- // This is the inlined call site instanceof cache. The two occurences of the
- // hole value will be patched to the last map/result pair generated by the
- // instanceof stub.
- Label cache_miss;
- // Use a temp register to avoid memory operands with variable lengths.
- Register map = ToRegister(instr->temp());
- __ movq(map, FieldOperand(object, HeapObject::kMapOffset));
- __ bind(deferred->map_check()); // Label for calculating code patching.
- Handle<JSGlobalPropertyCell> cache_cell =
- factory()->NewJSGlobalPropertyCell(factory()->the_hole_value());
- __ movq(kScratchRegister, cache_cell, RelocInfo::GLOBAL_PROPERTY_CELL);
- __ cmpq(map, Operand(kScratchRegister, 0));
- __ j(not_equal, &cache_miss, Label::kNear);
- // Patched to load either true or false.
- __ LoadRoot(ToRegister(instr->result()), Heap::kTheHoleValueRootIndex);
-#ifdef DEBUG
- // Check that the code size between patch label and patch sites is invariant.
- Label end_of_patched_code;
- __ bind(&end_of_patched_code);
- ASSERT(true);
-#endif
- __ jmp(&done);
-
- // The inlined call site cache did not match. Check for null and string
- // before calling the deferred code.
- __ bind(&cache_miss); // Null is not an instance of anything.
- __ CompareRoot(object, Heap::kNullValueRootIndex);
- __ j(equal, &false_result, Label::kNear);
-
- // String values are not instances of anything.
- __ JumpIfNotString(object, kScratchRegister, deferred->entry());
-
- __ bind(&false_result);
- __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
-
- __ bind(deferred->exit());
- __ bind(&done);
-}
-
-
-void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
- Label* map_check) {
- {
- PushSafepointRegistersScope scope(this);
- InstanceofStub::Flags flags = static_cast<InstanceofStub::Flags>(
- InstanceofStub::kNoFlags | InstanceofStub::kCallSiteInlineCheck);
- InstanceofStub stub(flags);
-
- __ push(ToRegister(instr->value()));
- __ PushHeapObject(instr->function());
-
- static const int kAdditionalDelta = 10;
- int delta =
- masm_->SizeOfCodeGeneratedSince(map_check) + kAdditionalDelta;
- ASSERT(delta >= 0);
- __ push_imm32(delta);
-
- // We are pushing three values on the stack but recording a
- // safepoint with two arguments because stub is going to
- // remove the third argument from the stack before jumping
- // to instanceof builtin on the slow path.
- CallCodeGeneric(stub.GetCode(isolate()),
- RelocInfo::CODE_TARGET,
- instr,
- RECORD_SAFEPOINT_WITH_REGISTERS,
- 2);
- ASSERT(delta == masm_->SizeOfCodeGeneratedSince(map_check));
- LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
- safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
- // Move result to a register that survives the end of the
- // PushSafepointRegisterScope.
- __ movq(kScratchRegister, rax);
- }
- __ testq(kScratchRegister, kScratchRegister);
- Label load_false;
- Label done;
- __ j(not_zero, &load_false);
- __ LoadRoot(rax, Heap::kTrueValueRootIndex);
- __ jmp(&done);
- __ bind(&load_false);
- __ LoadRoot(rax, Heap::kFalseValueRootIndex);
- __ bind(&done);
-}
-
-
-void LCodeGen::DoInstanceSize(LInstanceSize* instr) {
- Register object = ToRegister(instr->object());
- Register result = ToRegister(instr->result());
- __ movq(result, FieldOperand(object, HeapObject::kMapOffset));
- __ movzxbq(result, FieldOperand(result, Map::kInstanceSizeOffset));
-}
-
-
-void LCodeGen::DoCmpT(LCmpT* instr) {
- Token::Value op = instr->op();
-
- Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
-
- Condition condition = TokenToCondition(op, false);
- Label true_value, done;
- __ testq(rax, rax);
- __ j(condition, &true_value, Label::kNear);
- __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
- __ jmp(&done, Label::kNear);
- __ bind(&true_value);
- __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
- __ bind(&done);
-}
-
-
-void LCodeGen::DoReturn(LReturn* instr) {
- if (FLAG_trace && info()->IsOptimizing()) {
- // Preserve the return value on the stack and rely on the runtime
- // call to return the value in the same register.
- __ push(rax);
- __ CallRuntime(Runtime::kTraceExit, 1);
- }
- if (info()->saves_caller_doubles()) {
- ASSERT(NeedsEagerFrame());
- BitVector* doubles = chunk()->allocated_double_registers();
- BitVector::Iterator save_iterator(doubles);
- int count = 0;
- while (!save_iterator.Done()) {
- __ movsd(XMMRegister::FromAllocationIndex(save_iterator.Current()),
- MemOperand(rsp, count * kDoubleSize));
- save_iterator.Advance();
- count++;
- }
- }
- if (NeedsEagerFrame()) {
- __ movq(rsp, rbp);
- __ pop(rbp);
- }
- if (info()->IsStub()) {
- __ Ret(0, r10);
- } else {
- __ Ret((GetParameterCount() + 1) * kPointerSize, rcx);
- }
-}
-
-
-void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
- Register result = ToRegister(instr->result());
- __ LoadGlobalCell(result, instr->hydrogen()->cell());
- if (instr->hydrogen()->RequiresHoleCheck()) {
- __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
- DeoptimizeIf(equal, instr->environment());
- }
-}
-
-
-void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
- ASSERT(ToRegister(instr->global_object()).is(rax));
- ASSERT(ToRegister(instr->result()).is(rax));
-
- __ Move(rcx, instr->name());
- RelocInfo::Mode mode = instr->for_typeof() ? RelocInfo::CODE_TARGET :
- RelocInfo::CODE_TARGET_CONTEXT;
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallCode(ic, mode, instr);
-}
-
-
-void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
- Register value = ToRegister(instr->value());
- Handle<JSGlobalPropertyCell> cell_handle = instr->hydrogen()->cell();
-
- // If the cell we are storing to contains the hole it could have
- // been deleted from the property dictionary. In that case, we need
- // to update the property details in the property dictionary to mark
- // it as no longer deleted. We deoptimize in that case.
- if (instr->hydrogen()->RequiresHoleCheck()) {
- // We have a temp because CompareRoot might clobber kScratchRegister.
- Register cell = ToRegister(instr->temp());
- ASSERT(!value.is(cell));
- __ movq(cell, cell_handle, RelocInfo::GLOBAL_PROPERTY_CELL);
- __ CompareRoot(Operand(cell, 0), Heap::kTheHoleValueRootIndex);
- DeoptimizeIf(equal, instr->environment());
- // Store the value.
- __ movq(Operand(cell, 0), value);
- } else {
- // Store the value.
- __ movq(kScratchRegister, cell_handle, RelocInfo::GLOBAL_PROPERTY_CELL);
- __ movq(Operand(kScratchRegister, 0), value);
- }
- // Cells are always rescanned, so no write barrier here.
-}
-
-
-void LCodeGen::DoStoreGlobalGeneric(LStoreGlobalGeneric* instr) {
- ASSERT(ToRegister(instr->global_object()).is(rdx));
- ASSERT(ToRegister(instr->value()).is(rax));
-
- __ Move(rcx, instr->name());
- Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
- ? isolate()->builtins()->StoreIC_Initialize_Strict()
- : isolate()->builtins()->StoreIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
-}
-
-
-void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
- Register context = ToRegister(instr->context());
- Register result = ToRegister(instr->result());
- __ movq(result, ContextOperand(context, instr->slot_index()));
- if (instr->hydrogen()->RequiresHoleCheck()) {
- __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
- if (instr->hydrogen()->DeoptimizesOnHole()) {
- DeoptimizeIf(equal, instr->environment());
- } else {
- Label is_not_hole;
- __ j(not_equal, &is_not_hole, Label::kNear);
- __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
- __ bind(&is_not_hole);
- }
- }
-}
-
-
-void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
- Register context = ToRegister(instr->context());
- Register value = ToRegister(instr->value());
-
- Operand target = ContextOperand(context, instr->slot_index());
-
- Label skip_assignment;
- if (instr->hydrogen()->RequiresHoleCheck()) {
- __ CompareRoot(target, Heap::kTheHoleValueRootIndex);
- if (instr->hydrogen()->DeoptimizesOnHole()) {
- DeoptimizeIf(equal, instr->environment());
- } else {
- __ j(not_equal, &skip_assignment);
- }
- }
- __ movq(target, value);
-
- if (instr->hydrogen()->NeedsWriteBarrier()) {
- HType type = instr->hydrogen()->value()->type();
- SmiCheck check_needed =
- type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
- int offset = Context::SlotOffset(instr->slot_index());
- Register scratch = ToRegister(instr->temp());
- __ RecordWriteContextSlot(context,
- offset,
- value,
- scratch,
- kSaveFPRegs,
- EMIT_REMEMBERED_SET,
- check_needed);
- }
-
- __ bind(&skip_assignment);
-}
-
-
-void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
- Register object = ToRegister(instr->object());
- Register result = ToRegister(instr->result());
- if (instr->hydrogen()->is_in_object()) {
- __ movq(result, FieldOperand(object, instr->hydrogen()->offset()));
- } else {
- __ movq(result, FieldOperand(object, JSObject::kPropertiesOffset));
- __ movq(result, FieldOperand(result, instr->hydrogen()->offset()));
- }
-}
-
-
-void LCodeGen::EmitLoadFieldOrConstantFunction(Register result,
- Register object,
- Handle<Map> type,
- Handle<String> name,
- LEnvironment* env) {
- LookupResult lookup(isolate());
- type->LookupDescriptor(NULL, *name, &lookup);
- ASSERT(lookup.IsFound() || lookup.IsCacheable());
- if (lookup.IsField()) {
- int index = lookup.GetLocalFieldIndexFromMap(*type);
- int offset = index * kPointerSize;
- if (index < 0) {
- // Negative property indices are in-object properties, indexed
- // from the end of the fixed part of the object.
- __ movq(result, FieldOperand(object, offset + type->instance_size()));
- } else {
- // Non-negative property indices are in the properties array.
- __ movq(result, FieldOperand(object, JSObject::kPropertiesOffset));
- __ movq(result, FieldOperand(result, offset + FixedArray::kHeaderSize));
- }
- } else if (lookup.IsConstantFunction()) {
- Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*type));
- __ LoadHeapObject(result, function);
- } else {
- // Negative lookup.
- // Check prototypes.
- Handle<HeapObject> current(HeapObject::cast((*type)->prototype()));
- Heap* heap = type->GetHeap();
- while (*current != heap->null_value()) {
- __ LoadHeapObject(result, current);
- __ Cmp(FieldOperand(result, HeapObject::kMapOffset),
- Handle<Map>(current->map()));
- DeoptimizeIf(not_equal, env);
- current =
- Handle<HeapObject>(HeapObject::cast(current->map()->prototype()));
- }
- __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
- }
-}
-
-
-// Check for cases where EmitLoadFieldOrConstantFunction needs to walk the
-// prototype chain, which causes unbounded code generation.
-static bool CompactEmit(SmallMapList* list,
- Handle<String> name,
- int i,
- Isolate* isolate) {
- Handle<Map> map = list->at(i);
- // If the map has ElementsKind transitions, we will generate map checks
- // for each kind in __ CompareMap(..., ALLOW_ELEMENTS_TRANSITION_MAPS).
- if (map->HasElementsTransition()) return false;
- LookupResult lookup(isolate);
- map->LookupDescriptor(NULL, *name, &lookup);
- return lookup.IsField() || lookup.IsConstantFunction();
-}
-
-
-void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) {
- Register object = ToRegister(instr->object());
- Register result = ToRegister(instr->result());
-
- int map_count = instr->hydrogen()->types()->length();
- bool need_generic = instr->hydrogen()->need_generic();
-
- if (map_count == 0 && !need_generic) {
- DeoptimizeIf(no_condition, instr->environment());
- return;
- }
- Handle<String> name = instr->hydrogen()->name();
- Label done;
- bool all_are_compact = true;
- for (int i = 0; i < map_count; ++i) {
- if (!CompactEmit(instr->hydrogen()->types(), name, i, isolate())) {
- all_are_compact = false;
- break;
- }
- }
- for (int i = 0; i < map_count; ++i) {
- bool last = (i == map_count - 1);
- Handle<Map> map = instr->hydrogen()->types()->at(i);
- Label check_passed;
- __ CompareMap(object, map, &check_passed, ALLOW_ELEMENT_TRANSITION_MAPS);
- if (last && !need_generic) {
- DeoptimizeIf(not_equal, instr->environment());
- __ bind(&check_passed);
- EmitLoadFieldOrConstantFunction(
- result, object, map, name, instr->environment());
- } else {
- Label next;
- bool compact = all_are_compact ? true :
- CompactEmit(instr->hydrogen()->types(), name, i, isolate());
- __ j(not_equal, &next, compact ? Label::kNear : Label::kFar);
- __ bind(&check_passed);
- EmitLoadFieldOrConstantFunction(
- result, object, map, name, instr->environment());
- __ jmp(&done, all_are_compact ? Label::kNear : Label::kFar);
- __ bind(&next);
- }
- }
- if (need_generic) {
- __ Move(rcx, name);
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
- }
- __ bind(&done);
-}
-
-
-void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
- ASSERT(ToRegister(instr->object()).is(rax));
- ASSERT(ToRegister(instr->result()).is(rax));
-
- __ Move(rcx, instr->name());
- Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
- Register function = ToRegister(instr->function());
- Register result = ToRegister(instr->result());
-
- // Check that the function really is a function.
- __ CmpObjectType(function, JS_FUNCTION_TYPE, result);
- DeoptimizeIf(not_equal, instr->environment());
-
- // Check whether the function has an instance prototype.
- Label non_instance;
- __ testb(FieldOperand(result, Map::kBitFieldOffset),
- Immediate(1 << Map::kHasNonInstancePrototype));
- __ j(not_zero, &non_instance, Label::kNear);
-
- // Get the prototype or initial map from the function.
- __ movq(result,
- FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
-
- // Check that the function has a prototype or an initial map.
- __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
- DeoptimizeIf(equal, instr->environment());
-
- // If the function does not have an initial map, we're done.
- Label done;
- __ CmpObjectType(result, MAP_TYPE, kScratchRegister);
- __ j(not_equal, &done, Label::kNear);
-
- // Get the prototype from the initial map.
- __ movq(result, FieldOperand(result, Map::kPrototypeOffset));
- __ jmp(&done, Label::kNear);
-
- // Non-instance prototype: Fetch prototype from constructor field
- // in the function's map.
- __ bind(&non_instance);
- __ movq(result, FieldOperand(result, Map::kConstructorOffset));
-
- // All done.
- __ bind(&done);
-}
-
-
-void LCodeGen::DoLoadElements(LLoadElements* instr) {
- Register result = ToRegister(instr->result());
- Register input = ToRegister(instr->object());
- __ movq(result, FieldOperand(input, JSObject::kElementsOffset));
- if (FLAG_debug_code) {
- Label done, ok, fail;
- __ CompareRoot(FieldOperand(result, HeapObject::kMapOffset),
- Heap::kFixedArrayMapRootIndex);
- __ j(equal, &done, Label::kNear);
- __ CompareRoot(FieldOperand(result, HeapObject::kMapOffset),
- Heap::kFixedCOWArrayMapRootIndex);
- __ j(equal, &done, Label::kNear);
- Register temp((result.is(rax)) ? rbx : rax);
- __ push(temp);
- __ movq(temp, FieldOperand(result, HeapObject::kMapOffset));
- __ movzxbq(temp, FieldOperand(temp, Map::kBitField2Offset));
- __ and_(temp, Immediate(Map::kElementsKindMask));
- __ shr(temp, Immediate(Map::kElementsKindShift));
- __ cmpl(temp, Immediate(GetInitialFastElementsKind()));
- __ j(less, &fail, Label::kNear);
- __ cmpl(temp, Immediate(TERMINAL_FAST_ELEMENTS_KIND));
- __ j(less_equal, &ok, Label::kNear);
- __ cmpl(temp, Immediate(FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND));
- __ j(less, &fail, Label::kNear);
- __ cmpl(temp, Immediate(LAST_EXTERNAL_ARRAY_ELEMENTS_KIND));
- __ j(less_equal, &ok, Label::kNear);
- __ bind(&fail);
- __ Abort("Check for fast or external elements failed");
- __ bind(&ok);
- __ pop(temp);
- __ bind(&done);
- }
-}
-
-
-void LCodeGen::DoLoadExternalArrayPointer(
- LLoadExternalArrayPointer* instr) {
- Register result = ToRegister(instr->result());
- Register input = ToRegister(instr->object());
- __ movq(result, FieldOperand(input,
- ExternalPixelArray::kExternalPointerOffset));
-}
-
-
-void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
- Register arguments = ToRegister(instr->arguments());
- Register length = ToRegister(instr->length());
- Register result = ToRegister(instr->result());
- // There are two words between the frame pointer and the last argument.
- // Subtracting from length accounts for one of them add one more.
- if (instr->index()->IsRegister()) {
- __ subl(length, ToRegister(instr->index()));
- } else {
- __ subl(length, ToOperand(instr->index()));
- }
- __ movq(result, Operand(arguments, length, times_pointer_size, kPointerSize));
-}
-
-
-void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
- ElementsKind elements_kind = instr->elements_kind();
- LOperand* key = instr->key();
- if (!key->IsConstantOperand()) {
- Register key_reg = ToRegister(key);
- // Even though the HLoad/StoreKeyed (in this case) instructions force
- // the input representation for the key to be an integer, the input
- // gets replaced during bound check elimination with the index argument
- // to the bounds check, which can be tagged, so that case must be
- // handled here, too.
- if (instr->hydrogen()->key()->representation().IsTagged()) {
- __ SmiToInteger64(key_reg, key_reg);
- } else if (instr->hydrogen()->IsDehoisted()) {
- // Sign extend key because it could be a 32 bit negative value
- // and the dehoisted address computation happens in 64 bits
- __ movsxlq(key_reg, key_reg);
- }
- }
- Operand operand(BuildFastArrayOperand(
- instr->elements(),
- key,
- elements_kind,
- 0,
- instr->additional_index()));
-
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
- XMMRegister result(ToDoubleRegister(instr->result()));
- __ movss(result, operand);
- __ cvtss2sd(result, result);
- } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
- __ movsd(ToDoubleRegister(instr->result()), operand);
- } else {
- Register result(ToRegister(instr->result()));
- switch (elements_kind) {
- case EXTERNAL_BYTE_ELEMENTS:
- __ movsxbq(result, operand);
- break;
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- case EXTERNAL_PIXEL_ELEMENTS:
- __ movzxbq(result, operand);
- break;
- case EXTERNAL_SHORT_ELEMENTS:
- __ movsxwq(result, operand);
- break;
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- __ movzxwq(result, operand);
- break;
- case EXTERNAL_INT_ELEMENTS:
- __ movsxlq(result, operand);
- break;
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- __ movl(result, operand);
- if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
- __ testl(result, result);
- DeoptimizeIf(negative, instr->environment());
- }
- break;
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_SMI_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- UNREACHABLE();
- break;
- }
- }
-}
-
-
-void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
- XMMRegister result(ToDoubleRegister(instr->result()));
- LOperand* key = instr->key();
- if (!key->IsConstantOperand()) {
- Register key_reg = ToRegister(key);
- // Even though the HLoad/StoreKeyed instructions force the input
- // representation for the key to be an integer, the input gets replaced
- // during bound check elimination with the index argument to the bounds
- // check, which can be tagged, so that case must be handled here, too.
- if (instr->hydrogen()->key()->representation().IsTagged()) {
- __ SmiToInteger64(key_reg, key_reg);
- } else if (instr->hydrogen()->IsDehoisted()) {
- // Sign extend key because it could be a 32 bit negative value
- // and the dehoisted address computation happens in 64 bits
- __ movsxlq(key_reg, key_reg);
- }
- }
-
- if (instr->hydrogen()->RequiresHoleCheck()) {
- int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag +
- sizeof(kHoleNanLower32);
- Operand hole_check_operand = BuildFastArrayOperand(
- instr->elements(),
- key,
- FAST_DOUBLE_ELEMENTS,
- offset,
- instr->additional_index());
- __ cmpl(hole_check_operand, Immediate(kHoleNanUpper32));
- DeoptimizeIf(equal, instr->environment());
- }
-
- Operand double_load_operand = BuildFastArrayOperand(
- instr->elements(),
- key,
- FAST_DOUBLE_ELEMENTS,
- FixedDoubleArray::kHeaderSize - kHeapObjectTag,
- instr->additional_index());
- __ movsd(result, double_load_operand);
-}
-
-
-void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
- Register result = ToRegister(instr->result());
- LOperand* key = instr->key();
- if (!key->IsConstantOperand()) {
- Register key_reg = ToRegister(key);
- // Even though the HLoad/StoreKeyedFastElement instructions force
- // the input representation for the key to be an integer, the input
- // gets replaced during bound check elimination with the index
- // argument to the bounds check, which can be tagged, so that
- // case must be handled here, too.
- if (instr->hydrogen()->key()->representation().IsTagged()) {
- __ SmiToInteger64(key_reg, key_reg);
- } else if (instr->hydrogen()->IsDehoisted()) {
- // Sign extend key because it could be a 32 bit negative value
- // and the dehoisted address computation happens in 64 bits
- __ movsxlq(key_reg, key_reg);
- }
- }
-
- // Load the result.
- __ movq(result,
- BuildFastArrayOperand(instr->elements(),
- key,
- FAST_ELEMENTS,
- FixedArray::kHeaderSize - kHeapObjectTag,
- instr->additional_index()));
-
- // Check for the hole value.
- if (instr->hydrogen()->RequiresHoleCheck()) {
- if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
- Condition smi = __ CheckSmi(result);
- DeoptimizeIf(NegateCondition(smi), instr->environment());
- } else {
- __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
- DeoptimizeIf(equal, instr->environment());
- }
- }
-}
-
-
-void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
- if (instr->is_external()) {
- DoLoadKeyedExternalArray(instr);
- } else if (instr->hydrogen()->representation().IsDouble()) {
- DoLoadKeyedFixedDoubleArray(instr);
- } else {
- DoLoadKeyedFixedArray(instr);
- }
-}
-
-
-Operand LCodeGen::BuildFastArrayOperand(
- LOperand* elements_pointer,
- LOperand* key,
- ElementsKind elements_kind,
- uint32_t offset,
- uint32_t additional_index) {
- Register elements_pointer_reg = ToRegister(elements_pointer);
- int shift_size = ElementsKindToShiftSize(elements_kind);
- if (key->IsConstantOperand()) {
- int constant_value = ToInteger32(LConstantOperand::cast(key));
- if (constant_value & 0xF0000000) {
- Abort("array index constant value too big");
- }
- return Operand(elements_pointer_reg,
- ((constant_value + additional_index) << shift_size)
- + offset);
- } else {
- ScaleFactor scale_factor = static_cast<ScaleFactor>(shift_size);
- return Operand(elements_pointer_reg,
- ToRegister(key),
- scale_factor,
- offset + (additional_index << shift_size));
- }
-}
-
-
-void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
- ASSERT(ToRegister(instr->object()).is(rdx));
- ASSERT(ToRegister(instr->key()).is(rax));
-
- Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
- Register result = ToRegister(instr->result());
-
- if (instr->hydrogen()->from_inlined()) {
- __ lea(result, Operand(rsp, -2 * kPointerSize));
- } else {
- // Check for arguments adapter frame.
- Label done, adapted;
- __ movq(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
- __ Cmp(Operand(result, StandardFrameConstants::kContextOffset),
- Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
- __ j(equal, &adapted, Label::kNear);
-
- // No arguments adaptor frame.
- __ movq(result, rbp);
- __ jmp(&done, Label::kNear);
-
- // Arguments adaptor frame present.
- __ bind(&adapted);
- __ movq(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
-
- // Result is the frame pointer for the frame if not adapted and for the real
- // frame below the adaptor frame if adapted.
- __ bind(&done);
- }
-}
-
-
-void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
- Register result = ToRegister(instr->result());
-
- Label done;
-
- // If no arguments adaptor frame the number of arguments is fixed.
- if (instr->elements()->IsRegister()) {
- __ cmpq(rbp, ToRegister(instr->elements()));
- } else {
- __ cmpq(rbp, ToOperand(instr->elements()));
- }
- __ movl(result, Immediate(scope()->num_parameters()));
- __ j(equal, &done, Label::kNear);
-
- // Arguments adaptor frame present. Get argument length from there.
- __ movq(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
- __ SmiToInteger32(result,
- Operand(result,
- ArgumentsAdaptorFrameConstants::kLengthOffset));
-
- // Argument length is in result register.
- __ bind(&done);
-}
-
-
-void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
- Register receiver = ToRegister(instr->receiver());
- Register function = ToRegister(instr->function());
-
- // If the receiver is null or undefined, we have to pass the global
- // object as a receiver to normal functions. Values have to be
- // passed unchanged to builtins and strict-mode functions.
- Label global_object, receiver_ok;
-
- // Do not transform the receiver to object for strict mode
- // functions.
- __ movq(kScratchRegister,
- FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
- __ testb(FieldOperand(kScratchRegister,
- SharedFunctionInfo::kStrictModeByteOffset),
- Immediate(1 << SharedFunctionInfo::kStrictModeBitWithinByte));
- __ j(not_equal, &receiver_ok, Label::kNear);
-
- // Do not transform the receiver to object for builtins.
- __ testb(FieldOperand(kScratchRegister,
- SharedFunctionInfo::kNativeByteOffset),
- Immediate(1 << SharedFunctionInfo::kNativeBitWithinByte));
- __ j(not_equal, &receiver_ok, Label::kNear);
-
- // Normal function. Replace undefined or null with global receiver.
- __ CompareRoot(receiver, Heap::kNullValueRootIndex);
- __ j(equal, &global_object, Label::kNear);
- __ CompareRoot(receiver, Heap::kUndefinedValueRootIndex);
- __ j(equal, &global_object, Label::kNear);
-
- // The receiver should be a JS object.
- Condition is_smi = __ CheckSmi(receiver);
- DeoptimizeIf(is_smi, instr->environment());
- __ CmpObjectType(receiver, FIRST_SPEC_OBJECT_TYPE, kScratchRegister);
- DeoptimizeIf(below, instr->environment());
- __ jmp(&receiver_ok, Label::kNear);
-
- __ bind(&global_object);
- // TODO(kmillikin): We have a hydrogen value for the global object. See
- // if it's better to use it than to explicitly fetch it from the context
- // here.
- __ movq(receiver, ContextOperand(rsi, Context::GLOBAL_OBJECT_INDEX));
- __ movq(receiver,
- FieldOperand(receiver, JSGlobalObject::kGlobalReceiverOffset));
- __ bind(&receiver_ok);
-}
-
-
-void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
- Register receiver = ToRegister(instr->receiver());
- Register function = ToRegister(instr->function());
- Register length = ToRegister(instr->length());
- Register elements = ToRegister(instr->elements());
- ASSERT(receiver.is(rax)); // Used for parameter count.
- ASSERT(function.is(rdi)); // Required by InvokeFunction.
- ASSERT(ToRegister(instr->result()).is(rax));
-
- // Copy the arguments to this function possibly from the
- // adaptor frame below it.
- const uint32_t kArgumentsLimit = 1 * KB;
- __ cmpq(length, Immediate(kArgumentsLimit));
- DeoptimizeIf(above, instr->environment());
-
- __ push(receiver);
- __ movq(receiver, length);
-
- // Loop through the arguments pushing them onto the execution
- // stack.
- Label invoke, loop;
- // length is a small non-negative integer, due to the test above.
- __ testl(length, length);
- __ j(zero, &invoke, Label::kNear);
- __ bind(&loop);
- __ push(Operand(elements, length, times_pointer_size, 1 * kPointerSize));
- __ decl(length);
- __ j(not_zero, &loop);
-
- // Invoke the function.
- __ bind(&invoke);
- ASSERT(instr->HasPointerMap());
- LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
- SafepointGenerator safepoint_generator(
- this, pointers, Safepoint::kLazyDeopt);
- ParameterCount actual(rax);
- __ InvokeFunction(function, actual, CALL_FUNCTION,
- safepoint_generator, CALL_AS_METHOD);
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
-}
-
-
-void LCodeGen::DoPushArgument(LPushArgument* instr) {
- LOperand* argument = instr->value();
- EmitPushTaggedOperand(argument);
-}
-
-
-void LCodeGen::DoDrop(LDrop* instr) {
- __ Drop(instr->count());
-}
-
-
-void LCodeGen::DoThisFunction(LThisFunction* instr) {
- Register result = ToRegister(instr->result());
- __ movq(result, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
-}
-
-
-void LCodeGen::DoContext(LContext* instr) {
- Register result = ToRegister(instr->result());
- __ movq(result, rsi);
-}
-
-
-void LCodeGen::DoOuterContext(LOuterContext* instr) {
- Register context = ToRegister(instr->context());
- Register result = ToRegister(instr->result());
- __ movq(result,
- Operand(context, Context::SlotOffset(Context::PREVIOUS_INDEX)));
-}
-
-
-void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
- __ push(rsi); // The context is the first argument.
- __ PushHeapObject(instr->hydrogen()->pairs());
- __ Push(Smi::FromInt(instr->hydrogen()->flags()));
- CallRuntime(Runtime::kDeclareGlobals, 3, instr);
-}
-
-
-void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
- Register result = ToRegister(instr->result());
- __ movq(result, instr->qml_global()
- ? QmlGlobalObjectOperand()
- : GlobalObjectOperand());
-}
-
-
-void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) {
- Register global = ToRegister(instr->global());
- Register result = ToRegister(instr->result());
- __ movq(result, FieldOperand(global, GlobalObject::kGlobalReceiverOffset));
-}
-
-
-void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
- int arity,
- LInstruction* instr,
- CallKind call_kind,
- RDIState rdi_state) {
- bool can_invoke_directly = !function->NeedsArgumentsAdaption() ||
- function->shared()->formal_parameter_count() == arity;
-
- LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
-
- if (can_invoke_directly) {
- if (rdi_state == RDI_UNINITIALIZED) {
- __ LoadHeapObject(rdi, function);
- }
-
- // Change context.
- __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
-
- // Set rax to arguments count if adaption is not needed. Assumes that rax
- // is available to write to at this point.
- if (!function->NeedsArgumentsAdaption()) {
- __ Set(rax, arity);
- }
-
- // Invoke function.
- __ SetCallKind(rcx, call_kind);
- if (*function == *info()->closure()) {
- __ CallSelf();
- } else {
- __ call(FieldOperand(rdi, JSFunction::kCodeEntryOffset));
- }
-
- // Set up deoptimization.
- RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT, 0);
- } else {
- // We need to adapt arguments.
- SafepointGenerator generator(
- this, pointers, Safepoint::kLazyDeopt);
- ParameterCount count(arity);
- __ InvokeFunction(function, count, CALL_FUNCTION, generator, call_kind);
- }
-
- // Restore context.
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
-}
-
-
-void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
- ASSERT(ToRegister(instr->result()).is(rax));
- CallKnownFunction(instr->function(),
- instr->arity(),
- instr,
- CALL_AS_METHOD,
- RDI_UNINITIALIZED);
-}
-
-
-void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
- Register input_reg = ToRegister(instr->value());
- __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
- Heap::kHeapNumberMapRootIndex);
- DeoptimizeIf(not_equal, instr->environment());
-
- Label done;
- Register tmp = input_reg.is(rax) ? rcx : rax;
- Register tmp2 = tmp.is(rcx) ? rdx : input_reg.is(rcx) ? rdx : rcx;
-
- // Preserve the value of all registers.
- PushSafepointRegistersScope scope(this);
-
- Label negative;
- __ movl(tmp, FieldOperand(input_reg, HeapNumber::kExponentOffset));
- // Check the sign of the argument. If the argument is positive, just
- // return it. We do not need to patch the stack since |input| and
- // |result| are the same register and |input| will be restored
- // unchanged by popping safepoint registers.
- __ testl(tmp, Immediate(HeapNumber::kSignMask));
- __ j(not_zero, &negative);
- __ jmp(&done);
-
- __ bind(&negative);
-
- Label allocated, slow;
- __ AllocateHeapNumber(tmp, tmp2, &slow);
- __ jmp(&allocated);
-
- // Slow case: Call the runtime system to do the number allocation.
- __ bind(&slow);
-
- CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
- // Set the pointer to the new heap number in tmp.
- if (!tmp.is(rax)) {
- __ movq(tmp, rax);
- }
-
- // Restore input_reg after call to runtime.
- __ LoadFromSafepointRegisterSlot(input_reg, input_reg);
-
- __ bind(&allocated);
- __ movq(tmp2, FieldOperand(input_reg, HeapNumber::kValueOffset));
- __ shl(tmp2, Immediate(1));
- __ shr(tmp2, Immediate(1));
- __ movq(FieldOperand(tmp, HeapNumber::kValueOffset), tmp2);
- __ StoreToSafepointRegisterSlot(input_reg, tmp);
-
- __ bind(&done);
-}
-
-
-void LCodeGen::EmitIntegerMathAbs(LUnaryMathOperation* instr) {
- Register input_reg = ToRegister(instr->value());
- __ testl(input_reg, input_reg);
- Label is_positive;
- __ j(not_sign, &is_positive);
- __ negl(input_reg); // Sets flags.
- DeoptimizeIf(negative, instr->environment());
- __ bind(&is_positive);
-}
-
-
-void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
- // Class for deferred case.
- class DeferredMathAbsTaggedHeapNumber: public LDeferredCode {
- public:
- DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen,
- LUnaryMathOperation* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() {
- codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
- }
- virtual LInstruction* instr() { return instr_; }
- private:
- LUnaryMathOperation* instr_;
- };
-
- ASSERT(instr->value()->Equals(instr->result()));
- Representation r = instr->hydrogen()->value()->representation();
-
- if (r.IsDouble()) {
- XMMRegister scratch = xmm0;
- XMMRegister input_reg = ToDoubleRegister(instr->value());
- __ xorps(scratch, scratch);
- __ subsd(scratch, input_reg);
- __ andpd(input_reg, scratch);
- } else if (r.IsInteger32()) {
- EmitIntegerMathAbs(instr);
- } else { // Tagged case.
- DeferredMathAbsTaggedHeapNumber* deferred =
- new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
- Register input_reg = ToRegister(instr->value());
- // Smi check.
- __ JumpIfNotSmi(input_reg, deferred->entry());
- __ SmiToInteger32(input_reg, input_reg);
- EmitIntegerMathAbs(instr);
- __ Integer32ToSmi(input_reg, input_reg);
- __ bind(deferred->exit());
- }
-}
-
-
-void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
- XMMRegister xmm_scratch = xmm0;
- Register output_reg = ToRegister(instr->result());
- XMMRegister input_reg = ToDoubleRegister(instr->value());
-
- if (CpuFeatures::IsSupported(SSE4_1)) {
- CpuFeatures::Scope scope(SSE4_1);
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- // Deoptimize if minus zero.
- __ movq(output_reg, input_reg);
- __ subq(output_reg, Immediate(1));
- DeoptimizeIf(overflow, instr->environment());
- }
- __ roundsd(xmm_scratch, input_reg, Assembler::kRoundDown);
- __ cvttsd2si(output_reg, xmm_scratch);
- __ cmpl(output_reg, Immediate(0x80000000));
- DeoptimizeIf(equal, instr->environment());
- } else {
- Label negative_sign, done;
- // Deoptimize on unordered.
- __ xorps(xmm_scratch, xmm_scratch); // Zero the register.
- __ ucomisd(input_reg, xmm_scratch);
- DeoptimizeIf(parity_even, instr->environment());
- __ j(below, &negative_sign, Label::kNear);
-
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- // Check for negative zero.
- Label positive_sign;
- __ j(above, &positive_sign, Label::kNear);
- __ movmskpd(output_reg, input_reg);
- __ testq(output_reg, Immediate(1));
- DeoptimizeIf(not_zero, instr->environment());
- __ Set(output_reg, 0);
- __ jmp(&done);
- __ bind(&positive_sign);
- }
-
- // Use truncating instruction (OK because input is positive).
- __ cvttsd2si(output_reg, input_reg);
- // Overflow is signalled with minint.
- __ cmpl(output_reg, Immediate(0x80000000));
- DeoptimizeIf(equal, instr->environment());
- __ jmp(&done, Label::kNear);
-
- // Non-zero negative reaches here.
- __ bind(&negative_sign);
- // Truncate, then compare and compensate.
- __ cvttsd2si(output_reg, input_reg);
- __ cvtlsi2sd(xmm_scratch, output_reg);
- __ ucomisd(input_reg, xmm_scratch);
- __ j(equal, &done, Label::kNear);
- __ subl(output_reg, Immediate(1));
- DeoptimizeIf(overflow, instr->environment());
-
- __ bind(&done);
- }
-}
-
-
-void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
- const XMMRegister xmm_scratch = xmm0;
- Register output_reg = ToRegister(instr->result());
- XMMRegister input_reg = ToDoubleRegister(instr->value());
- static int64_t one_half = V8_INT64_C(0x3FE0000000000000); // 0.5
- static int64_t minus_one_half = V8_INT64_C(0xBFE0000000000000); // -0.5
-
- Label done, round_to_zero, below_one_half, do_not_compensate, restore;
- __ movq(kScratchRegister, one_half, RelocInfo::NONE64);
- __ movq(xmm_scratch, kScratchRegister);
- __ ucomisd(xmm_scratch, input_reg);
- __ j(above, &below_one_half);
-
- // CVTTSD2SI rounds towards zero, since 0.5 <= x, we use floor(0.5 + x).
- __ addsd(xmm_scratch, input_reg);
- __ cvttsd2si(output_reg, xmm_scratch);
- // Overflow is signalled with minint.
- __ cmpl(output_reg, Immediate(0x80000000));
- __ RecordComment("D2I conversion overflow");
- DeoptimizeIf(equal, instr->environment());
- __ jmp(&done);
-
- __ bind(&below_one_half);
- __ movq(kScratchRegister, minus_one_half, RelocInfo::NONE64);
- __ movq(xmm_scratch, kScratchRegister);
- __ ucomisd(xmm_scratch, input_reg);
- __ j(below_equal, &round_to_zero);
-
- // CVTTSD2SI rounds towards zero, we use ceil(x - (-0.5)) and then
- // compare and compensate.
- __ movq(kScratchRegister, input_reg); // Back up input_reg.
- __ subsd(input_reg, xmm_scratch);
- __ cvttsd2si(output_reg, input_reg);
- // Catch minint due to overflow, and to prevent overflow when compensating.
- __ cmpl(output_reg, Immediate(0x80000000));
- __ RecordComment("D2I conversion overflow");
- DeoptimizeIf(equal, instr->environment());
-
- __ cvtlsi2sd(xmm_scratch, output_reg);
- __ ucomisd(input_reg, xmm_scratch);
- __ j(equal, &restore, Label::kNear);
- __ subl(output_reg, Immediate(1));
- // No overflow because we already ruled out minint.
- __ bind(&restore);
- __ movq(input_reg, kScratchRegister); // Restore input_reg.
- __ jmp(&done);
-
- __ bind(&round_to_zero);
- // We return 0 for the input range [+0, 0.5[, or [-0.5, 0.5[ if
- // we can ignore the difference between a result of -0 and +0.
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ movq(output_reg, input_reg);
- __ testq(output_reg, output_reg);
- __ RecordComment("Minus zero");
- DeoptimizeIf(negative, instr->environment());
- }
- __ Set(output_reg, 0);
- __ bind(&done);
-}
-
-
-void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
- XMMRegister input_reg = ToDoubleRegister(instr->value());
- ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
- __ sqrtsd(input_reg, input_reg);
-}
-
-
-void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
- XMMRegister xmm_scratch = xmm0;
- XMMRegister input_reg = ToDoubleRegister(instr->value());
- ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
-
- // Note that according to ECMA-262 15.8.2.13:
- // Math.pow(-Infinity, 0.5) == Infinity
- // Math.sqrt(-Infinity) == NaN
- Label done, sqrt;
- // Check base for -Infinity. According to IEEE-754, double-precision
- // -Infinity has the highest 12 bits set and the lowest 52 bits cleared.
- __ movq(kScratchRegister, V8_INT64_C(0xFFF0000000000000), RelocInfo::NONE64);
- __ movq(xmm_scratch, kScratchRegister);
- __ ucomisd(xmm_scratch, input_reg);
- // Comparing -Infinity with NaN results in "unordered", which sets the
- // zero flag as if both were equal. However, it also sets the carry flag.
- __ j(not_equal, &sqrt, Label::kNear);
- __ j(carry, &sqrt, Label::kNear);
- // If input is -Infinity, return Infinity.
- __ xorps(input_reg, input_reg);
- __ subsd(input_reg, xmm_scratch);
- __ jmp(&done, Label::kNear);
-
- // Square root.
- __ bind(&sqrt);
- __ xorps(xmm_scratch, xmm_scratch);
- __ addsd(input_reg, xmm_scratch); // Convert -0 to +0.
- __ sqrtsd(input_reg, input_reg);
- __ bind(&done);
-}
-
-
-void LCodeGen::DoPower(LPower* instr) {
- Representation exponent_type = instr->hydrogen()->right()->representation();
- // Having marked this as a call, we can use any registers.
- // Just make sure that the input/output registers are the expected ones.
-
- // Choose register conforming to calling convention (when bailing out).
-#ifdef _WIN64
- Register exponent = rdx;
-#else
- Register exponent = rdi;
-#endif
- ASSERT(!instr->right()->IsRegister() ||
- ToRegister(instr->right()).is(exponent));
- ASSERT(!instr->right()->IsDoubleRegister() ||
- ToDoubleRegister(instr->right()).is(xmm1));
- ASSERT(ToDoubleRegister(instr->left()).is(xmm2));
- ASSERT(ToDoubleRegister(instr->result()).is(xmm3));
-
- if (exponent_type.IsTagged()) {
- Label no_deopt;
- __ JumpIfSmi(exponent, &no_deopt);
- __ CmpObjectType(exponent, HEAP_NUMBER_TYPE, rcx);
- DeoptimizeIf(not_equal, instr->environment());
- __ bind(&no_deopt);
- MathPowStub stub(MathPowStub::TAGGED);
- __ CallStub(&stub);
- } else if (exponent_type.IsInteger32()) {
- MathPowStub stub(MathPowStub::INTEGER);
- __ CallStub(&stub);
- } else {
- ASSERT(exponent_type.IsDouble());
- MathPowStub stub(MathPowStub::DOUBLE);
- __ CallStub(&stub);
- }
-}
-
-
-void LCodeGen::DoRandom(LRandom* instr) {
- class DeferredDoRandom: public LDeferredCode {
- public:
- DeferredDoRandom(LCodeGen* codegen, LRandom* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredRandom(instr_); }
- virtual LInstruction* instr() { return instr_; }
- private:
- LRandom* instr_;
- };
-
- DeferredDoRandom* deferred = new(zone()) DeferredDoRandom(this, instr);
-
- // Having marked this instruction as a call we can use any
- // registers.
- ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
-
- // Choose the right register for the first argument depending on
- // calling convention.
-#ifdef _WIN64
- ASSERT(ToRegister(instr->global_object()).is(rcx));
- Register global_object = rcx;
-#else
- ASSERT(ToRegister(instr->global_object()).is(rdi));
- Register global_object = rdi;
-#endif
-
- static const int kSeedSize = sizeof(uint32_t);
- STATIC_ASSERT(kPointerSize == 2 * kSeedSize);
-
- __ movq(global_object,
- FieldOperand(global_object, GlobalObject::kNativeContextOffset));
- static const int kRandomSeedOffset =
- FixedArray::kHeaderSize + Context::RANDOM_SEED_INDEX * kPointerSize;
- __ movq(rbx, FieldOperand(global_object, kRandomSeedOffset));
- // rbx: FixedArray of the native context's random seeds
-
- // Load state[0].
- __ movl(rax, FieldOperand(rbx, ByteArray::kHeaderSize));
- // If state[0] == 0, call runtime to initialize seeds.
- __ testl(rax, rax);
- __ j(zero, deferred->entry());
- // Load state[1].
- __ movl(rcx, FieldOperand(rbx, ByteArray::kHeaderSize + kSeedSize));
-
- // state[0] = 18273 * (state[0] & 0xFFFF) + (state[0] >> 16)
- // Only operate on the lower 32 bit of rax.
- __ movzxwl(rdx, rax);
- __ imull(rdx, rdx, Immediate(18273));
- __ shrl(rax, Immediate(16));
- __ addl(rax, rdx);
- // Save state[0].
- __ movl(FieldOperand(rbx, ByteArray::kHeaderSize), rax);
-
- // state[1] = 36969 * (state[1] & 0xFFFF) + (state[1] >> 16)
- __ movzxwl(rdx, rcx);
- __ imull(rdx, rdx, Immediate(36969));
- __ shrl(rcx, Immediate(16));
- __ addl(rcx, rdx);
- // Save state[1].
- __ movl(FieldOperand(rbx, ByteArray::kHeaderSize + kSeedSize), rcx);
-
- // Random bit pattern = (state[0] << 14) + (state[1] & 0x3FFFF)
- __ shll(rax, Immediate(14));
- __ andl(rcx, Immediate(0x3FFFF));
- __ addl(rax, rcx);
-
- __ bind(deferred->exit());
- // Convert 32 random bits in rax to 0.(32 random bits) in a double
- // by computing:
- // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
- __ movq(rcx, V8_INT64_C(0x4130000000000000),
- RelocInfo::NONE64); // 1.0 x 2^20 as double
- __ movq(xmm2, rcx);
- __ movd(xmm1, rax);
- __ xorps(xmm1, xmm2);
- __ subsd(xmm1, xmm2);
-}
-
-
-void LCodeGen::DoDeferredRandom(LRandom* instr) {
- __ PrepareCallCFunction(1);
- __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- // Return value is in rax.
-}
-
-
-void LCodeGen::DoMathExp(LMathExp* instr) {
- XMMRegister input = ToDoubleRegister(instr->value());
- XMMRegister result = ToDoubleRegister(instr->result());
- Register temp1 = ToRegister(instr->temp1());
- Register temp2 = ToRegister(instr->temp2());
-
- MathExpGenerator::EmitMathExp(masm(), input, result, xmm0, temp1, temp2);
-}
-
-
-void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
- ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
- TranscendentalCacheStub stub(TranscendentalCache::LOG,
- TranscendentalCacheStub::UNTAGGED);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoMathTan(LUnaryMathOperation* instr) {
- ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
- TranscendentalCacheStub stub(TranscendentalCache::TAN,
- TranscendentalCacheStub::UNTAGGED);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoMathCos(LUnaryMathOperation* instr) {
- ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
- TranscendentalCacheStub stub(TranscendentalCache::COS,
- TranscendentalCacheStub::UNTAGGED);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoMathSin(LUnaryMathOperation* instr) {
- ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
- TranscendentalCacheStub stub(TranscendentalCache::SIN,
- TranscendentalCacheStub::UNTAGGED);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) {
- switch (instr->op()) {
- case kMathAbs:
- DoMathAbs(instr);
- break;
- case kMathFloor:
- DoMathFloor(instr);
- break;
- case kMathRound:
- DoMathRound(instr);
- break;
- case kMathSqrt:
- DoMathSqrt(instr);
- break;
- case kMathPowHalf:
- DoMathPowHalf(instr);
- break;
- case kMathCos:
- DoMathCos(instr);
- break;
- case kMathSin:
- DoMathSin(instr);
- break;
- case kMathTan:
- DoMathTan(instr);
- break;
- case kMathLog:
- DoMathLog(instr);
- break;
-
- default:
- UNREACHABLE();
- }
-}
-
-
-void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
- ASSERT(ToRegister(instr->function()).is(rdi));
- ASSERT(instr->HasPointerMap());
-
- if (instr->known_function().is_null()) {
- LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
- SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
- ParameterCount count(instr->arity());
- __ InvokeFunction(rdi, count, CALL_FUNCTION, generator, CALL_AS_METHOD);
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- } else {
- CallKnownFunction(instr->known_function(),
- instr->arity(),
- instr,
- CALL_AS_METHOD,
- RDI_CONTAINS_TARGET);
- }
-}
-
-
-void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
- ASSERT(ToRegister(instr->key()).is(rcx));
- ASSERT(ToRegister(instr->result()).is(rax));
-
- int arity = instr->arity();
- Handle<Code> ic =
- isolate()->stub_cache()->ComputeKeyedCallInitialize(arity);
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
-}
-
-
-void LCodeGen::DoCallNamed(LCallNamed* instr) {
- ASSERT(ToRegister(instr->result()).is(rax));
-
- int arity = instr->arity();
- RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
- Handle<Code> ic =
- isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
- __ Move(rcx, instr->name());
- CallCode(ic, mode, instr);
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
-}
-
-
-void LCodeGen::DoCallFunction(LCallFunction* instr) {
- ASSERT(ToRegister(instr->function()).is(rdi));
- ASSERT(ToRegister(instr->result()).is(rax));
-
- int arity = instr->arity();
- CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
-}
-
-
-void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
- ASSERT(ToRegister(instr->result()).is(rax));
- int arity = instr->arity();
- RelocInfo::Mode mode = RelocInfo::CODE_TARGET_CONTEXT;
- Handle<Code> ic =
- isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
- __ Move(rcx, instr->name());
- CallCode(ic, mode, instr);
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
-}
-
-
-void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
- ASSERT(ToRegister(instr->result()).is(rax));
- CallKnownFunction(instr->target(),
- instr->arity(),
- instr,
- CALL_AS_FUNCTION,
- RDI_UNINITIALIZED);
-}
-
-
-void LCodeGen::DoCallNew(LCallNew* instr) {
- ASSERT(ToRegister(instr->constructor()).is(rdi));
- ASSERT(ToRegister(instr->result()).is(rax));
-
- __ Set(rax, instr->arity());
- if (FLAG_optimize_constructed_arrays) {
- // No cell in ebx for construct type feedback in optimized code
- Handle<Object> undefined_value(isolate()->factory()->undefined_value());
- __ Move(rbx, undefined_value);
- }
- CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
- CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
-}
-
-
-void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
- ASSERT(ToRegister(instr->constructor()).is(rdi));
- ASSERT(ToRegister(instr->result()).is(rax));
- ASSERT(FLAG_optimize_constructed_arrays);
-
- __ Set(rax, instr->arity());
- __ Move(rbx, instr->hydrogen()->property_cell());
- Handle<Code> array_construct_code =
- isolate()->builtins()->ArrayConstructCode();
- CallCode(array_construct_code, RelocInfo::CONSTRUCT_CALL, instr);
-}
-
-
-void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
- CallRuntime(instr->function(), instr->arity(), instr);
-}
-
-
-void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
- Register object = ToRegister(instr->object());
- Register value = ToRegister(instr->value());
- int offset = instr->offset();
-
- if (!instr->transition().is_null()) {
- if (!instr->hydrogen()->NeedsWriteBarrierForMap()) {
- __ Move(FieldOperand(object, HeapObject::kMapOffset),
- instr->transition());
- } else {
- Register temp = ToRegister(instr->temp());
- __ Move(kScratchRegister, instr->transition());
- __ movq(FieldOperand(object, HeapObject::kMapOffset), kScratchRegister);
- // Update the write barrier for the map field.
- __ RecordWriteField(object,
- HeapObject::kMapOffset,
- kScratchRegister,
- temp,
- kSaveFPRegs,
- OMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- }
- }
-
- // Do the store.
- HType type = instr->hydrogen()->value()->type();
- SmiCheck check_needed =
- type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
- if (instr->is_in_object()) {
- __ movq(FieldOperand(object, offset), value);
- if (instr->hydrogen()->NeedsWriteBarrier()) {
- Register temp = ToRegister(instr->temp());
- // Update the write barrier for the object for in-object properties.
- __ RecordWriteField(object,
- offset,
- value,
- temp,
- kSaveFPRegs,
- EMIT_REMEMBERED_SET,
- check_needed);
- }
- } else {
- Register temp = ToRegister(instr->temp());
- __ movq(temp, FieldOperand(object, JSObject::kPropertiesOffset));
- __ movq(FieldOperand(temp, offset), value);
- if (instr->hydrogen()->NeedsWriteBarrier()) {
- // Update the write barrier for the properties array.
- // object is used as a scratch register.
- __ RecordWriteField(temp,
- offset,
- value,
- object,
- kSaveFPRegs,
- EMIT_REMEMBERED_SET,
- check_needed);
- }
- }
-}
-
-
-void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
- ASSERT(ToRegister(instr->object()).is(rdx));
- ASSERT(ToRegister(instr->value()).is(rax));
-
- __ Move(rcx, instr->hydrogen()->name());
- Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
- ? isolate()->builtins()->StoreIC_Initialize_Strict()
- : isolate()->builtins()->StoreIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
- if (instr->hydrogen()->skip_check()) return;
-
- if (instr->length()->IsRegister()) {
- Register reg = ToRegister(instr->length());
- if (!instr->hydrogen()->length()->representation().IsTagged()) {
- __ AssertZeroExtended(reg);
- }
- if (instr->index()->IsConstantOperand()) {
- int constant_index =
- ToInteger32(LConstantOperand::cast(instr->index()));
- if (instr->hydrogen()->length()->representation().IsTagged()) {
- __ Cmp(reg, Smi::FromInt(constant_index));
- } else {
- __ cmpq(reg, Immediate(constant_index));
- }
- } else {
- Register reg2 = ToRegister(instr->index());
- if (!instr->hydrogen()->index()->representation().IsTagged()) {
- __ AssertZeroExtended(reg2);
- }
- __ cmpq(reg, reg2);
- }
- } else {
- Operand length = ToOperand(instr->length());
- if (instr->index()->IsConstantOperand()) {
- int constant_index =
- ToInteger32(LConstantOperand::cast(instr->index()));
- if (instr->hydrogen()->length()->representation().IsTagged()) {
- __ Cmp(length, Smi::FromInt(constant_index));
- } else {
- __ cmpq(length, Immediate(constant_index));
- }
- } else {
- __ cmpq(length, ToRegister(instr->index()));
- }
- }
- DeoptimizeIf(below_equal, instr->environment());
-}
-
-
-void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
- ElementsKind elements_kind = instr->elements_kind();
- LOperand* key = instr->key();
- if (!key->IsConstantOperand()) {
- Register key_reg = ToRegister(key);
- // Even though the HLoad/StoreKeyedFastElement instructions force
- // the input representation for the key to be an integer, the input
- // gets replaced during bound check elimination with the index
- // argument to the bounds check, which can be tagged, so that case
- // must be handled here, too.
- if (instr->hydrogen()->key()->representation().IsTagged()) {
- __ SmiToInteger64(key_reg, key_reg);
- } else if (instr->hydrogen()->IsDehoisted()) {
- // Sign extend key because it could be a 32 bit negative value
- // and the dehoisted address computation happens in 64 bits
- __ movsxlq(key_reg, key_reg);
- }
- }
- Operand operand(BuildFastArrayOperand(
- instr->elements(),
- key,
- elements_kind,
- 0,
- instr->additional_index()));
-
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
- XMMRegister value(ToDoubleRegister(instr->value()));
- __ cvtsd2ss(value, value);
- __ movss(operand, value);
- } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
- __ movsd(operand, ToDoubleRegister(instr->value()));
- } else {
- Register value(ToRegister(instr->value()));
- switch (elements_kind) {
- case EXTERNAL_PIXEL_ELEMENTS:
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- __ movb(operand, value);
- break;
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- __ movw(operand, value);
- break;
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- __ movl(operand, value);
- break;
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_SMI_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- UNREACHABLE();
- break;
- }
- }
-}
-
-
-void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
- XMMRegister value = ToDoubleRegister(instr->value());
- LOperand* key = instr->key();
- if (!key->IsConstantOperand()) {
- Register key_reg = ToRegister(key);
- // Even though the HLoad/StoreKeyedFastElement instructions force
- // the input representation for the key to be an integer, the
- // input gets replaced during bound check elimination with the index
- // argument to the bounds check, which can be tagged, so that case
- // must be handled here, too.
- if (instr->hydrogen()->key()->representation().IsTagged()) {
- __ SmiToInteger64(key_reg, key_reg);
- } else if (instr->hydrogen()->IsDehoisted()) {
- // Sign extend key because it could be a 32 bit negative value
- // and the dehoisted address computation happens in 64 bits
- __ movsxlq(key_reg, key_reg);
- }
- }
-
- if (instr->NeedsCanonicalization()) {
- Label have_value;
-
- __ ucomisd(value, value);
- __ j(parity_odd, &have_value); // NaN.
-
- __ Set(kScratchRegister, BitCast<uint64_t>(
- FixedDoubleArray::canonical_not_the_hole_nan_as_double()));
- __ movq(value, kScratchRegister);
-
- __ bind(&have_value);
- }
-
- Operand double_store_operand = BuildFastArrayOperand(
- instr->elements(),
- key,
- FAST_DOUBLE_ELEMENTS,
- FixedDoubleArray::kHeaderSize - kHeapObjectTag,
- instr->additional_index());
-
- __ movsd(double_store_operand, value);
-}
-
-
-void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
- Register value = ToRegister(instr->value());
- Register elements = ToRegister(instr->elements());
- LOperand* key = instr->key();
- if (!key->IsConstantOperand()) {
- Register key_reg = ToRegister(key);
- // Even though the HLoad/StoreKeyedFastElement instructions force
- // the input representation for the key to be an integer, the
- // input gets replaced during bound check elimination with the index
- // argument to the bounds check, which can be tagged, so that case
- // must be handled here, too.
- if (instr->hydrogen()->key()->representation().IsTagged()) {
- __ SmiToInteger64(key_reg, key_reg);
- } else if (instr->hydrogen()->IsDehoisted()) {
- // Sign extend key because it could be a 32 bit negative value
- // and the dehoisted address computation happens in 64 bits
- __ movsxlq(key_reg, key_reg);
- }
- }
-
- Operand operand =
- BuildFastArrayOperand(instr->elements(),
- key,
- FAST_ELEMENTS,
- FixedArray::kHeaderSize - kHeapObjectTag,
- instr->additional_index());
-
- if (instr->hydrogen()->NeedsWriteBarrier()) {
- ASSERT(!instr->key()->IsConstantOperand());
- HType type = instr->hydrogen()->value()->type();
- SmiCheck check_needed =
- type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
- // Compute address of modified element and store it into key register.
- Register key_reg(ToRegister(key));
- __ lea(key_reg, operand);
- __ movq(Operand(key_reg, 0), value);
- __ RecordWrite(elements,
- key_reg,
- value,
- kSaveFPRegs,
- EMIT_REMEMBERED_SET,
- check_needed);
- } else {
- __ movq(operand, value);
- }
-}
-
-
-void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
- if (instr->is_external()) {
- DoStoreKeyedExternalArray(instr);
- } else if (instr->hydrogen()->value()->representation().IsDouble()) {
- DoStoreKeyedFixedDoubleArray(instr);
- } else {
- DoStoreKeyedFixedArray(instr);
- }
-}
-
-
-void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
- ASSERT(ToRegister(instr->object()).is(rdx));
- ASSERT(ToRegister(instr->key()).is(rcx));
- ASSERT(ToRegister(instr->value()).is(rax));
-
- Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
- ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
- : isolate()->builtins()->KeyedStoreIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
- Register object_reg = ToRegister(instr->object());
-
- Handle<Map> from_map = instr->original_map();
- Handle<Map> to_map = instr->transitioned_map();
- ElementsKind from_kind = instr->from_kind();
- ElementsKind to_kind = instr->to_kind();
-
- Label not_applicable;
- __ Cmp(FieldOperand(object_reg, HeapObject::kMapOffset), from_map);
- __ j(not_equal, &not_applicable);
- if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
- Register new_map_reg = ToRegister(instr->new_map_temp());
- __ movq(new_map_reg, to_map, RelocInfo::EMBEDDED_OBJECT);
- __ movq(FieldOperand(object_reg, HeapObject::kMapOffset), new_map_reg);
- // Write barrier.
- ASSERT_NE(instr->temp(), NULL);
- __ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
- ToRegister(instr->temp()), kDontSaveFPRegs);
- } else if (FLAG_compiled_transitions) {
- PushSafepointRegistersScope scope(this);
- if (!object_reg.is(rax)) {
- __ movq(rax, object_reg);
- }
- __ Move(rbx, to_map);
- TransitionElementsKindStub stub(from_kind, to_kind);
- __ CallStub(&stub);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
- } else if (IsFastSmiElementsKind(from_kind) &&
- IsFastDoubleElementsKind(to_kind)) {
- Register fixed_object_reg = ToRegister(instr->temp());
- ASSERT(fixed_object_reg.is(rdx));
- Register new_map_reg = ToRegister(instr->new_map_temp());
- ASSERT(new_map_reg.is(rbx));
- __ movq(new_map_reg, to_map, RelocInfo::EMBEDDED_OBJECT);
- __ movq(fixed_object_reg, object_reg);
- CallCode(isolate()->builtins()->TransitionElementsSmiToDouble(),
- RelocInfo::CODE_TARGET, instr);
- } else if (IsFastDoubleElementsKind(from_kind) &&
- IsFastObjectElementsKind(to_kind)) {
- Register fixed_object_reg = ToRegister(instr->temp());
- ASSERT(fixed_object_reg.is(rdx));
- Register new_map_reg = ToRegister(instr->new_map_temp());
- ASSERT(new_map_reg.is(rbx));
- __ movq(new_map_reg, to_map, RelocInfo::EMBEDDED_OBJECT);
- __ movq(fixed_object_reg, object_reg);
- CallCode(isolate()->builtins()->TransitionElementsDoubleToObject(),
- RelocInfo::CODE_TARGET, instr);
- } else {
- UNREACHABLE();
- }
- __ bind(&not_applicable);
-}
-
-
-void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
- Register object = ToRegister(instr->object());
- Register temp = ToRegister(instr->temp());
- __ TestJSArrayForAllocationSiteInfo(object, temp);
- DeoptimizeIf(equal, instr->environment());
-}
-
-
-void LCodeGen::DoStringAdd(LStringAdd* instr) {
- EmitPushTaggedOperand(instr->left());
- EmitPushTaggedOperand(instr->right());
- StringAddStub stub(NO_STRING_CHECK_IN_STUB);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
- class DeferredStringCharCodeAt: public LDeferredCode {
- public:
- DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
- virtual LInstruction* instr() { return instr_; }
- private:
- LStringCharCodeAt* instr_;
- };
-
- DeferredStringCharCodeAt* deferred =
- new(zone()) DeferredStringCharCodeAt(this, instr);
-
- StringCharLoadGenerator::Generate(masm(),
- ToRegister(instr->string()),
- ToRegister(instr->index()),
- ToRegister(instr->result()),
- deferred->entry());
- __ bind(deferred->exit());
-}
-
-
-void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
- Register string = ToRegister(instr->string());
- Register result = ToRegister(instr->result());
-
- // TODO(3095996): Get rid of this. For now, we need to make the
- // result register contain a valid pointer because it is already
- // contained in the register pointer map.
- __ Set(result, 0);
-
- PushSafepointRegistersScope scope(this);
- __ push(string);
- // Push the index as a smi. This is safe because of the checks in
- // DoStringCharCodeAt above.
- STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue);
- if (instr->index()->IsConstantOperand()) {
- int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
- __ Push(Smi::FromInt(const_index));
- } else {
- Register index = ToRegister(instr->index());
- __ Integer32ToSmi(index, index);
- __ push(index);
- }
- CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2, instr);
- __ AssertSmi(rax);
- __ SmiToInteger32(rax, rax);
- __ StoreToSafepointRegisterSlot(result, rax);
-}
-
-
-void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
- class DeferredStringCharFromCode: public LDeferredCode {
- public:
- DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); }
- virtual LInstruction* instr() { return instr_; }
- private:
- LStringCharFromCode* instr_;
- };
-
- DeferredStringCharFromCode* deferred =
- new(zone()) DeferredStringCharFromCode(this, instr);
-
- ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
- Register char_code = ToRegister(instr->char_code());
- Register result = ToRegister(instr->result());
- ASSERT(!char_code.is(result));
-
- __ cmpl(char_code, Immediate(String::kMaxOneByteCharCode));
- __ j(above, deferred->entry());
- __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
- __ movq(result, FieldOperand(result,
- char_code, times_pointer_size,
- FixedArray::kHeaderSize));
- __ CompareRoot(result, Heap::kUndefinedValueRootIndex);
- __ j(equal, deferred->entry());
- __ bind(deferred->exit());
-}
-
-
-void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
- Register char_code = ToRegister(instr->char_code());
- Register result = ToRegister(instr->result());
-
- // TODO(3095996): Get rid of this. For now, we need to make the
- // result register contain a valid pointer because it is already
- // contained in the register pointer map.
- __ Set(result, 0);
-
- PushSafepointRegistersScope scope(this);
- __ Integer32ToSmi(char_code, char_code);
- __ push(char_code);
- CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr);
- __ StoreToSafepointRegisterSlot(result, rax);
-}
-
-
-void LCodeGen::DoStringLength(LStringLength* instr) {
- Register string = ToRegister(instr->string());
- Register result = ToRegister(instr->result());
- __ movq(result, FieldOperand(string, String::kLengthOffset));
-}
-
-
-void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
- LOperand* input = instr->value();
- ASSERT(input->IsRegister() || input->IsStackSlot());
- LOperand* output = instr->result();
- ASSERT(output->IsDoubleRegister());
- if (input->IsRegister()) {
- __ cvtlsi2sd(ToDoubleRegister(output), ToRegister(input));
- } else {
- __ cvtlsi2sd(ToDoubleRegister(output), ToOperand(input));
- }
-}
-
-
-void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
- LOperand* input = instr->value();
- LOperand* output = instr->result();
- LOperand* temp = instr->temp();
-
- __ LoadUint32(ToDoubleRegister(output),
- ToRegister(input),
- ToDoubleRegister(temp));
-}
-
-
-void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
- LOperand* input = instr->value();
- ASSERT(input->IsRegister() && input->Equals(instr->result()));
- Register reg = ToRegister(input);
-
- __ Integer32ToSmi(reg, reg);
-}
-
-
-void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
- class DeferredNumberTagU: public LDeferredCode {
- public:
- DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() {
- codegen()->DoDeferredNumberTagU(instr_);
- }
- virtual LInstruction* instr() { return instr_; }
- private:
- LNumberTagU* instr_;
- };
-
- LOperand* input = instr->value();
- ASSERT(input->IsRegister() && input->Equals(instr->result()));
- Register reg = ToRegister(input);
-
- DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
- __ cmpl(reg, Immediate(Smi::kMaxValue));
- __ j(above, deferred->entry());
- __ Integer32ToSmi(reg, reg);
- __ bind(deferred->exit());
-}
-
-
-void LCodeGen::DoDeferredNumberTagU(LNumberTagU* instr) {
- Label slow;
- Register reg = ToRegister(instr->value());
- Register tmp = reg.is(rax) ? rcx : rax;
-
- // Preserve the value of all registers.
- PushSafepointRegistersScope scope(this);
-
- Label done;
- // Load value into xmm1 which will be preserved across potential call to
- // runtime (MacroAssembler::EnterExitFrameEpilogue preserves only allocatable
- // XMM registers on x64).
- __ LoadUint32(xmm1, reg, xmm0);
-
- if (FLAG_inline_new) {
- __ AllocateHeapNumber(reg, tmp, &slow);
- __ jmp(&done, Label::kNear);
- }
-
- // Slow case: Call the runtime system to do the number allocation.
- __ bind(&slow);
-
- // Put a valid pointer value in the stack slot where the result
- // register is stored, as this register is in the pointer map, but contains an
- // integer value.
- __ StoreToSafepointRegisterSlot(reg, Immediate(0));
-
- CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
- if (!reg.is(rax)) __ movq(reg, rax);
-
- // Done. Put the value in xmm1 into the value of the allocated heap
- // number.
- __ bind(&done);
- __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), xmm1);
- __ StoreToSafepointRegisterSlot(reg, reg);
-}
-
-
-void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
- class DeferredNumberTagD: public LDeferredCode {
- public:
- DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
- virtual LInstruction* instr() { return instr_; }
- private:
- LNumberTagD* instr_;
- };
-
- XMMRegister input_reg = ToDoubleRegister(instr->value());
- Register reg = ToRegister(instr->result());
- Register tmp = ToRegister(instr->temp());
-
- bool convert_hole = false;
- HValue* change_input = instr->hydrogen()->value();
- if (change_input->IsLoadKeyed()) {
- HLoadKeyed* load = HLoadKeyed::cast(change_input);
- convert_hole = load->UsesMustHandleHole();
- }
-
- Label no_special_nan_handling;
- Label done;
- if (convert_hole) {
- XMMRegister input_reg = ToDoubleRegister(instr->value());
- __ ucomisd(input_reg, input_reg);
- __ j(parity_odd, &no_special_nan_handling);
- __ subq(rsp, Immediate(kDoubleSize));
- __ movsd(MemOperand(rsp, 0), input_reg);
- __ cmpl(MemOperand(rsp, sizeof(kHoleNanLower32)),
- Immediate(kHoleNanUpper32));
- Label canonicalize;
- __ j(not_equal, &canonicalize);
- __ addq(rsp, Immediate(kDoubleSize));
- __ Move(reg, factory()->the_hole_value());
- __ jmp(&done);
- __ bind(&canonicalize);
- __ addq(rsp, Immediate(kDoubleSize));
- __ Set(kScratchRegister, BitCast<uint64_t>(
- FixedDoubleArray::canonical_not_the_hole_nan_as_double()));
- __ movq(input_reg, kScratchRegister);
- }
-
- __ bind(&no_special_nan_handling);
- DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
- if (FLAG_inline_new) {
- __ AllocateHeapNumber(reg, tmp, deferred->entry());
- } else {
- __ jmp(deferred->entry());
- }
- __ bind(deferred->exit());
- __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), input_reg);
-
- __ bind(&done);
-}
-
-
-void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
- // TODO(3095996): Get rid of this. For now, we need to make the
- // result register contain a valid pointer because it is already
- // contained in the register pointer map.
- Register reg = ToRegister(instr->result());
- __ Move(reg, Smi::FromInt(0));
-
- {
- PushSafepointRegistersScope scope(this);
- CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
- // Ensure that value in rax survives popping registers.
- __ movq(kScratchRegister, rax);
- }
- __ movq(reg, kScratchRegister);
-}
-
-
-void LCodeGen::DoSmiTag(LSmiTag* instr) {
- ASSERT(instr->value()->Equals(instr->result()));
- Register input = ToRegister(instr->value());
- ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow));
- __ Integer32ToSmi(input, input);
-}
-
-
-void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
- ASSERT(instr->value()->Equals(instr->result()));
- Register input = ToRegister(instr->value());
- if (instr->needs_check()) {
- Condition is_smi = __ CheckSmi(input);
- DeoptimizeIf(NegateCondition(is_smi), instr->environment());
- } else {
- __ AssertSmi(input);
- }
- __ SmiToInteger32(input, input);
-}
-
-
-void LCodeGen::EmitNumberUntagD(Register input_reg,
- XMMRegister result_reg,
- bool deoptimize_on_undefined,
- bool deoptimize_on_minus_zero,
- LEnvironment* env,
- NumberUntagDMode mode) {
- Label load_smi, done;
-
- if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
- // Smi check.
- __ JumpIfSmi(input_reg, &load_smi, Label::kNear);
-
- // Heap number map check.
- __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
- Heap::kHeapNumberMapRootIndex);
- if (deoptimize_on_undefined) {
- DeoptimizeIf(not_equal, env);
- } else {
- Label heap_number;
- __ j(equal, &heap_number, Label::kNear);
-
- __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
- DeoptimizeIf(not_equal, env);
-
- // Convert undefined to NaN. Compute NaN as 0/0.
- __ xorps(result_reg, result_reg);
- __ divsd(result_reg, result_reg);
- __ jmp(&done, Label::kNear);
-
- __ bind(&heap_number);
- }
- // Heap number to XMM conversion.
- __ movsd(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
- if (deoptimize_on_minus_zero) {
- XMMRegister xmm_scratch = xmm0;
- __ xorps(xmm_scratch, xmm_scratch);
- __ ucomisd(xmm_scratch, result_reg);
- __ j(not_equal, &done, Label::kNear);
- __ movmskpd(kScratchRegister, result_reg);
- __ testq(kScratchRegister, Immediate(1));
- DeoptimizeIf(not_zero, env);
- }
- __ jmp(&done, Label::kNear);
- } else if (mode == NUMBER_CANDIDATE_IS_SMI_OR_HOLE) {
- __ testq(input_reg, Immediate(kSmiTagMask));
- DeoptimizeIf(not_equal, env);
- } else if (mode == NUMBER_CANDIDATE_IS_SMI_CONVERT_HOLE) {
- __ testq(input_reg, Immediate(kSmiTagMask));
- __ j(zero, &load_smi);
- __ Set(kScratchRegister, BitCast<uint64_t>(
- FixedDoubleArray::hole_nan_as_double()));
- __ movq(result_reg, kScratchRegister);
- __ jmp(&done, Label::kNear);
- } else {
- ASSERT(mode == NUMBER_CANDIDATE_IS_SMI);
- }
-
- // Smi to XMM conversion
- __ bind(&load_smi);
- __ SmiToInteger32(kScratchRegister, input_reg);
- __ cvtlsi2sd(result_reg, kScratchRegister);
- __ bind(&done);
-}
-
-
-void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
- Label done, heap_number;
- Register input_reg = ToRegister(instr->value());
-
- // Heap number map check.
- __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
- Heap::kHeapNumberMapRootIndex);
-
- if (instr->truncating()) {
- __ j(equal, &heap_number, Label::kNear);
- // Check for undefined. Undefined is converted to zero for truncating
- // conversions.
- __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
- DeoptimizeIf(not_equal, instr->environment());
- __ Set(input_reg, 0);
- __ jmp(&done, Label::kNear);
-
- __ bind(&heap_number);
-
- __ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
- __ cvttsd2siq(input_reg, xmm0);
- __ Set(kScratchRegister, V8_UINT64_C(0x8000000000000000));
- __ cmpq(input_reg, kScratchRegister);
- DeoptimizeIf(equal, instr->environment());
- } else {
- // Deoptimize if we don't have a heap number.
- DeoptimizeIf(not_equal, instr->environment());
-
- XMMRegister xmm_temp = ToDoubleRegister(instr->temp());
- __ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
- __ cvttsd2si(input_reg, xmm0);
- __ cvtlsi2sd(xmm_temp, input_reg);
- __ ucomisd(xmm0, xmm_temp);
- DeoptimizeIf(not_equal, instr->environment());
- DeoptimizeIf(parity_even, instr->environment()); // NaN.
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ testl(input_reg, input_reg);
- __ j(not_zero, &done);
- __ movmskpd(input_reg, xmm0);
- __ andl(input_reg, Immediate(1));
- DeoptimizeIf(not_zero, instr->environment());
- }
- }
- __ bind(&done);
-}
-
-
-void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
- class DeferredTaggedToI: public LDeferredCode {
- public:
- DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
- virtual LInstruction* instr() { return instr_; }
- private:
- LTaggedToI* instr_;
- };
-
- LOperand* input = instr->value();
- ASSERT(input->IsRegister());
- ASSERT(input->Equals(instr->result()));
-
- Register input_reg = ToRegister(input);
- DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
- __ JumpIfNotSmi(input_reg, deferred->entry());
- __ SmiToInteger32(input_reg, input_reg);
- __ bind(deferred->exit());
-}
-
-
-void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
- LOperand* input = instr->value();
- ASSERT(input->IsRegister());
- LOperand* result = instr->result();
- ASSERT(result->IsDoubleRegister());
-
- Register input_reg = ToRegister(input);
- XMMRegister result_reg = ToDoubleRegister(result);
-
- NumberUntagDMode mode = NUMBER_CANDIDATE_IS_ANY_TAGGED;
- HValue* value = instr->hydrogen()->value();
- if (value->type().IsSmi()) {
- if (value->IsLoadKeyed()) {
- HLoadKeyed* load = HLoadKeyed::cast(value);
- if (load->UsesMustHandleHole()) {
- if (load->hole_mode() == ALLOW_RETURN_HOLE) {
- mode = NUMBER_CANDIDATE_IS_SMI_CONVERT_HOLE;
- } else {
- mode = NUMBER_CANDIDATE_IS_SMI_OR_HOLE;
- }
- } else {
- mode = NUMBER_CANDIDATE_IS_SMI;
- }
- }
- }
-
- EmitNumberUntagD(input_reg, result_reg,
- instr->hydrogen()->deoptimize_on_undefined(),
- instr->hydrogen()->deoptimize_on_minus_zero(),
- instr->environment(),
- mode);
-}
-
-
-void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
- LOperand* input = instr->value();
- ASSERT(input->IsDoubleRegister());
- LOperand* result = instr->result();
- ASSERT(result->IsRegister());
-
- XMMRegister input_reg = ToDoubleRegister(input);
- Register result_reg = ToRegister(result);
-
- if (instr->truncating()) {
- // Performs a truncating conversion of a floating point number as used by
- // the JS bitwise operations.
- __ cvttsd2siq(result_reg, input_reg);
- __ movq(kScratchRegister,
- V8_INT64_C(0x8000000000000000),
- RelocInfo::NONE64);
- __ cmpq(result_reg, kScratchRegister);
- DeoptimizeIf(equal, instr->environment());
- } else {
- __ cvttsd2si(result_reg, input_reg);
- __ cvtlsi2sd(xmm0, result_reg);
- __ ucomisd(xmm0, input_reg);
- DeoptimizeIf(not_equal, instr->environment());
- DeoptimizeIf(parity_even, instr->environment()); // NaN.
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- Label done;
- // The integer converted back is equal to the original. We
- // only have to test if we got -0 as an input.
- __ testl(result_reg, result_reg);
- __ j(not_zero, &done, Label::kNear);
- __ movmskpd(result_reg, input_reg);
- // Bit 0 contains the sign of the double in input_reg.
- // If input was positive, we are ok and return 0, otherwise
- // deoptimize.
- __ andl(result_reg, Immediate(1));
- DeoptimizeIf(not_zero, instr->environment());
- __ bind(&done);
- }
- }
-}
-
-
-void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
- LOperand* input = instr->value();
- Condition cc = masm()->CheckSmi(ToRegister(input));
- DeoptimizeIf(NegateCondition(cc), instr->environment());
-}
-
-
-void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
- LOperand* input = instr->value();
- Condition cc = masm()->CheckSmi(ToRegister(input));
- DeoptimizeIf(cc, instr->environment());
-}
-
-
-void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
- Register input = ToRegister(instr->value());
-
- __ movq(kScratchRegister, FieldOperand(input, HeapObject::kMapOffset));
-
- if (instr->hydrogen()->is_interval_check()) {
- InstanceType first;
- InstanceType last;
- instr->hydrogen()->GetCheckInterval(&first, &last);
-
- __ cmpb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
- Immediate(static_cast<int8_t>(first)));
-
- // If there is only one type in the interval check for equality.
- if (first == last) {
- DeoptimizeIf(not_equal, instr->environment());
- } else {
- DeoptimizeIf(below, instr->environment());
- // Omit check for the last type.
- if (last != LAST_TYPE) {
- __ cmpb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
- Immediate(static_cast<int8_t>(last)));
- DeoptimizeIf(above, instr->environment());
- }
- }
- } else {
- uint8_t mask;
- uint8_t tag;
- instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
-
- if (IsPowerOf2(mask)) {
- ASSERT(tag == 0 || IsPowerOf2(tag));
- __ testb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
- Immediate(mask));
- DeoptimizeIf(tag == 0 ? not_zero : zero, instr->environment());
- } else {
- __ movzxbl(kScratchRegister,
- FieldOperand(kScratchRegister, Map::kInstanceTypeOffset));
- __ andb(kScratchRegister, Immediate(mask));
- __ cmpb(kScratchRegister, Immediate(tag));
- DeoptimizeIf(not_equal, instr->environment());
- }
- }
-}
-
-
-void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
- Register reg = ToRegister(instr->value());
- Handle<JSFunction> target = instr->hydrogen()->target();
- if (isolate()->heap()->InNewSpace(*target)) {
- Handle<JSGlobalPropertyCell> cell =
- isolate()->factory()->NewJSGlobalPropertyCell(target);
- __ movq(kScratchRegister, cell, RelocInfo::GLOBAL_PROPERTY_CELL);
- __ cmpq(reg, Operand(kScratchRegister, 0));
- } else {
- __ Cmp(reg, target);
- }
- DeoptimizeIf(not_equal, instr->environment());
-}
-
-
-void LCodeGen::DoCheckMapCommon(Register reg,
- Handle<Map> map,
- CompareMapMode mode,
- LInstruction* instr) {
- Label success;
- __ CompareMap(reg, map, &success, mode);
- DeoptimizeIf(not_equal, instr->environment());
- __ bind(&success);
-}
-
-
-void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
- LOperand* input = instr->value();
- ASSERT(input->IsRegister());
- Register reg = ToRegister(input);
-
- Label success;
- SmallMapList* map_set = instr->hydrogen()->map_set();
- for (int i = 0; i < map_set->length() - 1; i++) {
- Handle<Map> map = map_set->at(i);
- __ CompareMap(reg, map, &success, REQUIRE_EXACT_MAP);
- __ j(equal, &success);
- }
- Handle<Map> map = map_set->last();
- DoCheckMapCommon(reg, map, REQUIRE_EXACT_MAP, instr);
- __ bind(&success);
-}
-
-
-void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
- XMMRegister value_reg = ToDoubleRegister(instr->unclamped());
- Register result_reg = ToRegister(instr->result());
- __ ClampDoubleToUint8(value_reg, xmm0, result_reg);
-}
-
-
-void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
- ASSERT(instr->unclamped()->Equals(instr->result()));
- Register value_reg = ToRegister(instr->result());
- __ ClampUint8(value_reg);
-}
-
-
-void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
- ASSERT(instr->unclamped()->Equals(instr->result()));
- Register input_reg = ToRegister(instr->unclamped());
- XMMRegister temp_xmm_reg = ToDoubleRegister(instr->temp_xmm());
- Label is_smi, done, heap_number;
-
- __ JumpIfSmi(input_reg, &is_smi);
-
- // Check for heap number
- __ Cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
- factory()->heap_number_map());
- __ j(equal, &heap_number, Label::kNear);
-
- // Check for undefined. Undefined is converted to zero for clamping
- // conversions.
- __ Cmp(input_reg, factory()->undefined_value());
- DeoptimizeIf(not_equal, instr->environment());
- __ movq(input_reg, Immediate(0));
- __ jmp(&done, Label::kNear);
-
- // Heap number
- __ bind(&heap_number);
- __ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
- __ ClampDoubleToUint8(xmm0, temp_xmm_reg, input_reg);
- __ jmp(&done, Label::kNear);
-
- // smi
- __ bind(&is_smi);
- __ SmiToInteger32(input_reg, input_reg);
- __ ClampUint8(input_reg);
-
- __ bind(&done);
-}
-
-
-void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
- ASSERT(instr->temp()->Equals(instr->result()));
- Register reg = ToRegister(instr->temp());
-
- ZoneList<Handle<JSObject> >* prototypes = instr->prototypes();
- ZoneList<Handle<Map> >* maps = instr->maps();
-
- ASSERT(prototypes->length() == maps->length());
-
- if (instr->hydrogen()->CanOmitPrototypeChecks()) {
- for (int i = 0; i < maps->length(); i++) {
- prototype_maps_.Add(maps->at(i), info()->zone());
- }
- __ LoadHeapObject(reg, prototypes->at(prototypes->length() - 1));
- } else {
- for (int i = 0; i < prototypes->length(); i++) {
- __ LoadHeapObject(reg, prototypes->at(i));
- DoCheckMapCommon(reg, maps->at(i), ALLOW_ELEMENT_TRANSITION_MAPS, instr);
- }
- }
-}
-
-
-void LCodeGen::DoAllocateObject(LAllocateObject* instr) {
- class DeferredAllocateObject: public LDeferredCode {
- public:
- DeferredAllocateObject(LCodeGen* codegen, LAllocateObject* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredAllocateObject(instr_); }
- virtual LInstruction* instr() { return instr_; }
- private:
- LAllocateObject* instr_;
- };
-
- DeferredAllocateObject* deferred =
- new(zone()) DeferredAllocateObject(this, instr);
-
- Register result = ToRegister(instr->result());
- Register scratch = ToRegister(instr->temp());
- Handle<JSFunction> constructor = instr->hydrogen()->constructor();
- Handle<Map> initial_map(constructor->initial_map());
- int instance_size = initial_map->instance_size();
- ASSERT(initial_map->pre_allocated_property_fields() +
- initial_map->unused_property_fields() -
- initial_map->inobject_properties() == 0);
-
- // Allocate memory for the object. The initial map might change when
- // the constructor's prototype changes, but instance size and property
- // counts remain unchanged (if slack tracking finished).
- ASSERT(!constructor->shared()->IsInobjectSlackTrackingInProgress());
- __ AllocateInNewSpace(instance_size,
- result,
- no_reg,
- scratch,
- deferred->entry(),
- TAG_OBJECT);
-
- __ bind(deferred->exit());
- if (FLAG_debug_code) {
- Label is_in_new_space;
- __ JumpIfInNewSpace(result, scratch, &is_in_new_space);
- __ Abort("Allocated object is not in new-space");
- __ bind(&is_in_new_space);
- }
-
- // Load the initial map.
- Register map = scratch;
- __ LoadHeapObject(scratch, constructor);
- __ movq(map, FieldOperand(scratch, JSFunction::kPrototypeOrInitialMapOffset));
-
- if (FLAG_debug_code) {
- __ AssertNotSmi(map);
- __ cmpb(FieldOperand(map, Map::kInstanceSizeOffset),
- Immediate(instance_size >> kPointerSizeLog2));
- __ Assert(equal, "Unexpected instance size");
- __ cmpb(FieldOperand(map, Map::kPreAllocatedPropertyFieldsOffset),
- Immediate(initial_map->pre_allocated_property_fields()));
- __ Assert(equal, "Unexpected pre-allocated property fields count");
- __ cmpb(FieldOperand(map, Map::kUnusedPropertyFieldsOffset),
- Immediate(initial_map->unused_property_fields()));
- __ Assert(equal, "Unexpected unused property fields count");
- __ cmpb(FieldOperand(map, Map::kInObjectPropertiesOffset),
- Immediate(initial_map->inobject_properties()));
- __ Assert(equal, "Unexpected in-object property fields count");
- }
-
- // Initialize map and fields of the newly allocated object.
- ASSERT(initial_map->instance_type() == JS_OBJECT_TYPE);
- __ movq(FieldOperand(result, JSObject::kMapOffset), map);
- __ LoadRoot(scratch, Heap::kEmptyFixedArrayRootIndex);
- __ movq(FieldOperand(result, JSObject::kElementsOffset), scratch);
- __ movq(FieldOperand(result, JSObject::kPropertiesOffset), scratch);
- if (initial_map->inobject_properties() != 0) {
- __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
- for (int i = 0; i < initial_map->inobject_properties(); i++) {
- int property_offset = JSObject::kHeaderSize + i * kPointerSize;
- __ movq(FieldOperand(result, property_offset), scratch);
- }
- }
-}
-
-
-void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) {
- Register result = ToRegister(instr->result());
- Handle<JSFunction> constructor = instr->hydrogen()->constructor();
- Handle<Map> initial_map(constructor->initial_map());
- int instance_size = initial_map->instance_size();
-
- // TODO(3095996): Get rid of this. For now, we need to make the
- // result register contain a valid pointer because it is already
- // contained in the register pointer map.
- __ Set(result, 0);
-
- PushSafepointRegistersScope scope(this);
- __ Push(Smi::FromInt(instance_size));
- CallRuntimeFromDeferred(Runtime::kAllocateInNewSpace, 1, instr);
- __ StoreToSafepointRegisterSlot(result, rax);
-}
-
-
-void LCodeGen::DoAllocate(LAllocate* instr) {
- class DeferredAllocate: public LDeferredCode {
- public:
- DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredAllocate(instr_); }
- virtual LInstruction* instr() { return instr_; }
- private:
- LAllocate* instr_;
- };
-
- DeferredAllocate* deferred =
- new(zone()) DeferredAllocate(this, instr);
-
- Register result = ToRegister(instr->result());
- Register temp = ToRegister(instr->temp());
-
- // Allocate memory for the object.
- AllocationFlags flags = TAG_OBJECT;
- if (instr->hydrogen()->MustAllocateDoubleAligned()) {
- flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
- }
- if (instr->size()->IsConstantOperand()) {
- int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
- __ AllocateInNewSpace(size, result, temp, no_reg, deferred->entry(), flags);
- } else {
- Register size = ToRegister(instr->size());
- __ AllocateInNewSpace(size, result, temp, no_reg, deferred->entry(), flags);
- }
-
- __ bind(deferred->exit());
-}
-
-
-void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
- Register size = ToRegister(instr->size());
- Register result = ToRegister(instr->result());
-
- // TODO(3095996): Get rid of this. For now, we need to make the
- // result register contain a valid pointer because it is already
- // contained in the register pointer map.
- __ Set(result, 0);
-
- PushSafepointRegistersScope scope(this);
- __ Integer32ToSmi(size, size);
- __ push(size);
- CallRuntimeFromDeferred(Runtime::kAllocateInNewSpace, 1, instr);
- __ StoreToSafepointRegisterSlot(result, rax);
-}
-
-
-void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
- Handle<FixedArray> literals(instr->environment()->closure()->literals());
- ElementsKind boilerplate_elements_kind =
- instr->hydrogen()->boilerplate_elements_kind();
- AllocationSiteMode allocation_site_mode =
- instr->hydrogen()->allocation_site_mode();
-
- // Deopt if the array literal boilerplate ElementsKind is of a type different
- // than the expected one. The check isn't necessary if the boilerplate has
- // already been converted to TERMINAL_FAST_ELEMENTS_KIND.
- if (CanTransitionToMoreGeneralFastElementsKind(
- boilerplate_elements_kind, true)) {
- __ LoadHeapObject(rax, instr->hydrogen()->boilerplate_object());
- __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
- // Load the map's "bit field 2".
- __ movb(rbx, FieldOperand(rbx, Map::kBitField2Offset));
- // Retrieve elements_kind from bit field 2.
- __ and_(rbx, Immediate(Map::kElementsKindMask));
- __ cmpb(rbx, Immediate(boilerplate_elements_kind <<
- Map::kElementsKindShift));
- DeoptimizeIf(not_equal, instr->environment());
- }
-
- // Set up the parameters to the stub/runtime call.
- __ PushHeapObject(literals);
- __ Push(Smi::FromInt(instr->hydrogen()->literal_index()));
- // Boilerplate already exists, constant elements are never accessed.
- // Pass an empty fixed array.
- __ Push(isolate()->factory()->empty_fixed_array());
-
- // Pick the right runtime function or stub to call.
- int length = instr->hydrogen()->length();
- if (instr->hydrogen()->IsCopyOnWrite()) {
- ASSERT(instr->hydrogen()->depth() == 1);
- FastCloneShallowArrayStub::Mode mode =
- FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS;
- FastCloneShallowArrayStub stub(mode, DONT_TRACK_ALLOCATION_SITE, length);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- } else if (instr->hydrogen()->depth() > 1) {
- CallRuntime(Runtime::kCreateArrayLiteral, 3, instr);
- } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
- CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr);
- } else {
- FastCloneShallowArrayStub::Mode mode =
- boilerplate_elements_kind == FAST_DOUBLE_ELEMENTS
- ? FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
- : FastCloneShallowArrayStub::CLONE_ELEMENTS;
- FastCloneShallowArrayStub stub(mode, allocation_site_mode, length);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- }
-}
-
-
-void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
- Register result,
- Register source,
- int* offset,
- AllocationSiteMode mode) {
- ASSERT(!source.is(rcx));
- ASSERT(!result.is(rcx));
-
- bool create_allocation_site_info = mode == TRACK_ALLOCATION_SITE &&
- object->map()->CanTrackAllocationSite();
-
- // Only elements backing stores for non-COW arrays need to be copied.
- Handle<FixedArrayBase> elements(object->elements());
- bool has_elements = elements->length() > 0 &&
- elements->map() != isolate()->heap()->fixed_cow_array_map();
-
- // Increase the offset so that subsequent objects end up right after
- // this object and its backing store.
- int object_offset = *offset;
- int object_size = object->map()->instance_size();
- int elements_size = has_elements ? elements->Size() : 0;
- int elements_offset = *offset + object_size;
- if (create_allocation_site_info) {
- elements_offset += AllocationSiteInfo::kSize;
- *offset += AllocationSiteInfo::kSize;
- }
-
- *offset += object_size + elements_size;
-
- // Copy object header.
- ASSERT(object->properties()->length() == 0);
- int inobject_properties = object->map()->inobject_properties();
- int header_size = object_size - inobject_properties * kPointerSize;
- for (int i = 0; i < header_size; i += kPointerSize) {
- if (has_elements && i == JSObject::kElementsOffset) {
- __ lea(rcx, Operand(result, elements_offset));
- } else {
- __ movq(rcx, FieldOperand(source, i));
- }
- __ movq(FieldOperand(result, object_offset + i), rcx);
- }
-
- // Copy in-object properties.
- for (int i = 0; i < inobject_properties; i++) {
- int total_offset = object_offset + object->GetInObjectPropertyOffset(i);
- Handle<Object> value = Handle<Object>(object->InObjectPropertyAt(i),
- isolate());
- if (value->IsJSObject()) {
- Handle<JSObject> value_object = Handle<JSObject>::cast(value);
- __ lea(rcx, Operand(result, *offset));
- __ movq(FieldOperand(result, total_offset), rcx);
- __ LoadHeapObject(source, value_object);
- EmitDeepCopy(value_object, result, source, offset,
- DONT_TRACK_ALLOCATION_SITE);
- } else if (value->IsHeapObject()) {
- __ LoadHeapObject(rcx, Handle<HeapObject>::cast(value));
- __ movq(FieldOperand(result, total_offset), rcx);
- } else {
- __ movq(rcx, value, RelocInfo::NONE64);
- __ movq(FieldOperand(result, total_offset), rcx);
- }
- }
-
- // Build Allocation Site Info if desired
- if (create_allocation_site_info) {
- __ LoadRoot(kScratchRegister, Heap::kAllocationSiteInfoMapRootIndex);
- __ movq(FieldOperand(result, object_size), kScratchRegister);
- __ movq(FieldOperand(result, object_size + kPointerSize), source);
- }
-
- if (has_elements) {
- // Copy elements backing store header.
- __ LoadHeapObject(source, elements);
- for (int i = 0; i < FixedArray::kHeaderSize; i += kPointerSize) {
- __ movq(rcx, FieldOperand(source, i));
- __ movq(FieldOperand(result, elements_offset + i), rcx);
- }
-
- // Copy elements backing store content.
- int elements_length = elements->length();
- if (elements->IsFixedDoubleArray()) {
- Handle<FixedDoubleArray> double_array =
- Handle<FixedDoubleArray>::cast(elements);
- for (int i = 0; i < elements_length; i++) {
- int64_t value = double_array->get_representation(i);
- int total_offset =
- elements_offset + FixedDoubleArray::OffsetOfElementAt(i);
- __ movq(rcx, value, RelocInfo::NONE64);
- __ movq(FieldOperand(result, total_offset), rcx);
- }
- } else if (elements->IsFixedArray()) {
- Handle<FixedArray> fast_elements = Handle<FixedArray>::cast(elements);
- for (int i = 0; i < elements_length; i++) {
- int total_offset = elements_offset + FixedArray::OffsetOfElementAt(i);
- Handle<Object> value(fast_elements->get(i), isolate());
- if (value->IsJSObject()) {
- Handle<JSObject> value_object = Handle<JSObject>::cast(value);
- __ lea(rcx, Operand(result, *offset));
- __ movq(FieldOperand(result, total_offset), rcx);
- __ LoadHeapObject(source, value_object);
- EmitDeepCopy(value_object, result, source, offset,
- DONT_TRACK_ALLOCATION_SITE);
- } else if (value->IsHeapObject()) {
- __ LoadHeapObject(rcx, Handle<HeapObject>::cast(value));
- __ movq(FieldOperand(result, total_offset), rcx);
- } else {
- __ movq(rcx, value, RelocInfo::NONE64);
- __ movq(FieldOperand(result, total_offset), rcx);
- }
- }
- } else {
- UNREACHABLE();
- }
- }
-}
-
-
-void LCodeGen::DoFastLiteral(LFastLiteral* instr) {
- int size = instr->hydrogen()->total_size();
- ElementsKind boilerplate_elements_kind =
- instr->hydrogen()->boilerplate()->GetElementsKind();
-
- // Deopt if the array literal boilerplate ElementsKind is of a type different
- // than the expected one. The check isn't necessary if the boilerplate has
- // already been converted to TERMINAL_FAST_ELEMENTS_KIND.
- if (CanTransitionToMoreGeneralFastElementsKind(
- boilerplate_elements_kind, true)) {
- __ LoadHeapObject(rbx, instr->hydrogen()->boilerplate());
- __ movq(rcx, FieldOperand(rbx, HeapObject::kMapOffset));
- // Load the map's "bit field 2".
- __ movb(rcx, FieldOperand(rcx, Map::kBitField2Offset));
- // Retrieve elements_kind from bit field 2.
- __ and_(rcx, Immediate(Map::kElementsKindMask));
- __ cmpb(rcx, Immediate(boilerplate_elements_kind <<
- Map::kElementsKindShift));
- DeoptimizeIf(not_equal, instr->environment());
- }
-
- // Allocate all objects that are part of the literal in one big
- // allocation. This avoids multiple limit checks.
- Label allocated, runtime_allocate;
- __ AllocateInNewSpace(size, rax, rcx, rdx, &runtime_allocate, TAG_OBJECT);
- __ jmp(&allocated);
-
- __ bind(&runtime_allocate);
- __ Push(Smi::FromInt(size));
- CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
-
- __ bind(&allocated);
- int offset = 0;
- __ LoadHeapObject(rbx, instr->hydrogen()->boilerplate());
- EmitDeepCopy(instr->hydrogen()->boilerplate(), rax, rbx, &offset,
- instr->hydrogen()->allocation_site_mode());
- ASSERT_EQ(size, offset);
-}
-
-
-void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
- Handle<FixedArray> literals(instr->environment()->closure()->literals());
- Handle<FixedArray> constant_properties =
- instr->hydrogen()->constant_properties();
-
- int flags = instr->hydrogen()->fast_elements()
- ? ObjectLiteral::kFastElements
- : ObjectLiteral::kNoFlags;
- flags |= instr->hydrogen()->has_function()
- ? ObjectLiteral::kHasFunction
- : ObjectLiteral::kNoFlags;
-
- // Set up the parameters to the stub/runtime call and pick the right
- // runtime function or stub to call.
- int properties_count = constant_properties->length() / 2;
- if (instr->hydrogen()->depth() > 1) {
- __ PushHeapObject(literals);
- __ Push(Smi::FromInt(instr->hydrogen()->literal_index()));
- __ Push(constant_properties);
- __ Push(Smi::FromInt(flags));
- CallRuntime(Runtime::kCreateObjectLiteral, 4, instr);
- } else if (flags != ObjectLiteral::kFastElements ||
- properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
- __ PushHeapObject(literals);
- __ Push(Smi::FromInt(instr->hydrogen()->literal_index()));
- __ Push(constant_properties);
- __ Push(Smi::FromInt(flags));
- CallRuntime(Runtime::kCreateObjectLiteralShallow, 4, instr);
- } else {
- __ LoadHeapObject(rax, literals);
- __ Move(rbx, Smi::FromInt(instr->hydrogen()->literal_index()));
- __ Move(rcx, constant_properties);
- __ Move(rdx, Smi::FromInt(flags));
- FastCloneShallowObjectStub stub(properties_count);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- }
-}
-
-
-void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
- ASSERT(ToRegister(instr->value()).is(rax));
- __ push(rax);
- CallRuntime(Runtime::kToFastProperties, 1, instr);
-}
-
-
-void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
- Label materialized;
- // Registers will be used as follows:
- // rcx = literals array.
- // rbx = regexp literal.
- // rax = regexp literal clone.
- int literal_offset =
- FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
- __ LoadHeapObject(rcx, instr->hydrogen()->literals());
- __ movq(rbx, FieldOperand(rcx, literal_offset));
- __ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
- __ j(not_equal, &materialized, Label::kNear);
-
- // Create regexp literal using runtime function
- // Result will be in rax.
- __ push(rcx);
- __ Push(Smi::FromInt(instr->hydrogen()->literal_index()));
- __ Push(instr->hydrogen()->pattern());
- __ Push(instr->hydrogen()->flags());
- CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
- __ movq(rbx, rax);
-
- __ bind(&materialized);
- int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
- Label allocated, runtime_allocate;
- __ AllocateInNewSpace(size, rax, rcx, rdx, &runtime_allocate, TAG_OBJECT);
- __ jmp(&allocated);
-
- __ bind(&runtime_allocate);
- __ push(rbx);
- __ Push(Smi::FromInt(size));
- CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
- __ pop(rbx);
-
- __ bind(&allocated);
- // Copy the content into the newly allocated memory.
- // (Unroll copy loop once for better throughput).
- for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
- __ movq(rdx, FieldOperand(rbx, i));
- __ movq(rcx, FieldOperand(rbx, i + kPointerSize));
- __ movq(FieldOperand(rax, i), rdx);
- __ movq(FieldOperand(rax, i + kPointerSize), rcx);
- }
- if ((size % (2 * kPointerSize)) != 0) {
- __ movq(rdx, FieldOperand(rbx, size - kPointerSize));
- __ movq(FieldOperand(rax, size - kPointerSize), rdx);
- }
-}
-
-
-void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
- // Use the fast case closure allocation code that allocates in new
- // space for nested functions that don't need literals cloning.
- Handle<SharedFunctionInfo> shared_info = instr->shared_info();
- bool pretenure = instr->hydrogen()->pretenure();
- if (!pretenure && shared_info->num_literals() == 0) {
- FastNewClosureStub stub(shared_info->language_mode());
- __ Push(shared_info);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- } else {
- __ push(rsi);
- __ Push(shared_info);
- __ PushRoot(pretenure ?
- Heap::kTrueValueRootIndex :
- Heap::kFalseValueRootIndex);
- CallRuntime(Runtime::kNewClosure, 3, instr);
- }
-}
-
-
-void LCodeGen::DoTypeof(LTypeof* instr) {
- LOperand* input = instr->value();
- EmitPushTaggedOperand(input);
- CallRuntime(Runtime::kTypeof, 1, instr);
-}
-
-
-void LCodeGen::EmitPushTaggedOperand(LOperand* operand) {
- ASSERT(!operand->IsDoubleRegister());
- if (operand->IsConstantOperand()) {
- Handle<Object> object = ToHandle(LConstantOperand::cast(operand));
- if (object->IsSmi()) {
- __ Push(Handle<Smi>::cast(object));
- } else {
- __ PushHeapObject(Handle<HeapObject>::cast(object));
- }
- } else if (operand->IsRegister()) {
- __ push(ToRegister(operand));
- } else {
- __ push(ToOperand(operand));
- }
-}
-
-
-void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
- Register input = ToRegister(instr->value());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
- Label* true_label = chunk_->GetAssemblyLabel(true_block);
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
-
- Condition final_branch_condition =
- EmitTypeofIs(true_label, false_label, input, instr->type_literal());
- if (final_branch_condition != no_condition) {
- EmitBranch(true_block, false_block, final_branch_condition);
- }
-}
-
-
-Condition LCodeGen::EmitTypeofIs(Label* true_label,
- Label* false_label,
- Register input,
- Handle<String> type_name) {
- Condition final_branch_condition = no_condition;
- if (type_name->Equals(heap()->number_string())) {
- __ JumpIfSmi(input, true_label);
- __ CompareRoot(FieldOperand(input, HeapObject::kMapOffset),
- Heap::kHeapNumberMapRootIndex);
-
- final_branch_condition = equal;
-
- } else if (type_name->Equals(heap()->string_string())) {
- __ JumpIfSmi(input, false_label);
- __ CmpObjectType(input, FIRST_NONSTRING_TYPE, input);
- __ j(above_equal, false_label);
- __ testb(FieldOperand(input, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsUndetectable));
- final_branch_condition = zero;
-
- } else if (type_name->Equals(heap()->boolean_string())) {
- __ CompareRoot(input, Heap::kTrueValueRootIndex);
- __ j(equal, true_label);
- __ CompareRoot(input, Heap::kFalseValueRootIndex);
- final_branch_condition = equal;
-
- } else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_string())) {
- __ CompareRoot(input, Heap::kNullValueRootIndex);
- final_branch_condition = equal;
-
- } else if (type_name->Equals(heap()->undefined_string())) {
- __ CompareRoot(input, Heap::kUndefinedValueRootIndex);
- __ j(equal, true_label);
- __ JumpIfSmi(input, false_label);
- // Check for undetectable objects => true.
- __ movq(input, FieldOperand(input, HeapObject::kMapOffset));
- __ testb(FieldOperand(input, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsUndetectable));
- final_branch_condition = not_zero;
-
- } else if (type_name->Equals(heap()->function_string())) {
- STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
- __ JumpIfSmi(input, false_label);
- __ CmpObjectType(input, JS_FUNCTION_TYPE, input);
- __ j(equal, true_label);
- __ CmpInstanceType(input, JS_FUNCTION_PROXY_TYPE);
- final_branch_condition = equal;
-
- } else if (type_name->Equals(heap()->object_string())) {
- __ JumpIfSmi(input, false_label);
- if (!FLAG_harmony_typeof) {
- __ CompareRoot(input, Heap::kNullValueRootIndex);
- __ j(equal, true_label);
- }
- if (FLAG_harmony_symbols) {
- __ CmpObjectType(input, SYMBOL_TYPE, input);
- __ j(equal, true_label);
- __ CmpInstanceType(input, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
- } else {
- __ CmpObjectType(input, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, input);
- }
- __ j(below, false_label);
- __ CmpInstanceType(input, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
- __ j(above, false_label);
- // Check for undetectable objects => false.
- __ testb(FieldOperand(input, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsUndetectable));
- final_branch_condition = zero;
-
- } else {
- __ jmp(false_label);
- }
-
- return final_branch_condition;
-}
-
-
-void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
- Register temp = ToRegister(instr->temp());
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- EmitIsConstructCall(temp);
- EmitBranch(true_block, false_block, equal);
-}
-
-
-void LCodeGen::EmitIsConstructCall(Register temp) {
- // Get the frame pointer for the calling frame.
- __ movq(temp, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
-
- // Skip the arguments adaptor frame if it exists.
- Label check_frame_marker;
- __ Cmp(Operand(temp, StandardFrameConstants::kContextOffset),
- Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
- __ j(not_equal, &check_frame_marker, Label::kNear);
- __ movq(temp, Operand(rax, StandardFrameConstants::kCallerFPOffset));
-
- // Check the marker in the calling frame.
- __ bind(&check_frame_marker);
- __ Cmp(Operand(temp, StandardFrameConstants::kMarkerOffset),
- Smi::FromInt(StackFrame::CONSTRUCT));
-}
-
-
-void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
- if (info()->IsStub()) return;
- // Ensure that we have enough space after the previous lazy-bailout
- // instruction for patching the code here.
- int current_pc = masm()->pc_offset();
- if (current_pc < last_lazy_deopt_pc_ + space_needed) {
- int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
- __ Nop(padding_size);
- }
-}
-
-
-void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
- EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
- last_lazy_deopt_pc_ = masm()->pc_offset();
- ASSERT(instr->HasEnvironment());
- LEnvironment* env = instr->environment();
- RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
- safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
-}
-
-
-void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
- DeoptimizeIf(no_condition, instr->environment());
-}
-
-
-void LCodeGen::DoDummyUse(LDummyUse* instr) {
- // Nothing to see here, move on!
-}
-
-
-void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) {
- LOperand* obj = instr->object();
- LOperand* key = instr->key();
- EmitPushTaggedOperand(obj);
- EmitPushTaggedOperand(key);
- ASSERT(instr->HasPointerMap());
- LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
- // Create safepoint generator that will also ensure enough space in the
- // reloc info for patching in deoptimization (since this is invoking a
- // builtin)
- SafepointGenerator safepoint_generator(
- this, pointers, Safepoint::kLazyDeopt);
- __ Push(Smi::FromInt(strict_mode_flag()));
- __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, safepoint_generator);
-}
-
-
-void LCodeGen::DoIn(LIn* instr) {
- LOperand* obj = instr->object();
- LOperand* key = instr->key();
- EmitPushTaggedOperand(key);
- EmitPushTaggedOperand(obj);
- ASSERT(instr->HasPointerMap());
- LPointerMap* pointers = instr->pointer_map();
- RecordPosition(pointers->position());
- SafepointGenerator safepoint_generator(
- this, pointers, Safepoint::kLazyDeopt);
- __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION, safepoint_generator);
-}
-
-
-void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
- PushSafepointRegistersScope scope(this);
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
- RecordSafepointWithLazyDeopt(instr, RECORD_SAFEPOINT_WITH_REGISTERS, 0);
- ASSERT(instr->HasEnvironment());
- LEnvironment* env = instr->environment();
- safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
-}
-
-
-void LCodeGen::DoStackCheck(LStackCheck* instr) {
- class DeferredStackCheck: public LDeferredCode {
- public:
- DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
- virtual LInstruction* instr() { return instr_; }
- private:
- LStackCheck* instr_;
- };
-
- ASSERT(instr->HasEnvironment());
- LEnvironment* env = instr->environment();
- // There is no LLazyBailout instruction for stack-checks. We have to
- // prepare for lazy deoptimization explicitly here.
- if (instr->hydrogen()->is_function_entry()) {
- // Perform stack overflow check.
- Label done;
- __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
- __ j(above_equal, &done, Label::kNear);
- StackCheckStub stub;
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
- last_lazy_deopt_pc_ = masm()->pc_offset();
- __ bind(&done);
- RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
- safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
- } else {
- ASSERT(instr->hydrogen()->is_backwards_branch());
- // Perform stack overflow check if this goto needs it before jumping.
- DeferredStackCheck* deferred_stack_check =
- new(zone()) DeferredStackCheck(this, instr);
- __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
- __ j(below, deferred_stack_check->entry());
- EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
- last_lazy_deopt_pc_ = masm()->pc_offset();
- __ bind(instr->done_label());
- deferred_stack_check->SetExit(instr->done_label());
- RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
- // Don't record a deoptimization index for the safepoint here.
- // This will be done explicitly when emitting call and the safepoint in
- // the deferred code.
- }
-}
-
-
-void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
- // This is a pseudo-instruction that ensures that the environment here is
- // properly registered for deoptimization and records the assembler's PC
- // offset.
- LEnvironment* environment = instr->environment();
- environment->SetSpilledRegisters(instr->SpilledRegisterArray(),
- instr->SpilledDoubleRegisterArray());
-
- // If the environment were already registered, we would have no way of
- // backpatching it with the spill slot operands.
- ASSERT(!environment->HasBeenRegistered());
- RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
- ASSERT(osr_pc_offset_ == -1);
- osr_pc_offset_ = masm()->pc_offset();
-}
-
-
-void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
- __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
- DeoptimizeIf(equal, instr->environment());
-
- Register null_value = rdi;
- __ LoadRoot(null_value, Heap::kNullValueRootIndex);
- __ cmpq(rax, null_value);
- DeoptimizeIf(equal, instr->environment());
-
- Condition cc = masm()->CheckSmi(rax);
- DeoptimizeIf(cc, instr->environment());
-
- STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
- __ CmpObjectType(rax, LAST_JS_PROXY_TYPE, rcx);
- DeoptimizeIf(below_equal, instr->environment());
-
- Label use_cache, call_runtime;
- __ CheckEnumCache(null_value, &call_runtime);
-
- __ movq(rax, FieldOperand(rax, HeapObject::kMapOffset));
- __ jmp(&use_cache, Label::kNear);
-
- // Get the set of properties to enumerate.
- __ bind(&call_runtime);
- __ push(rax);
- CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
-
- __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
- Heap::kMetaMapRootIndex);
- DeoptimizeIf(not_equal, instr->environment());
- __ bind(&use_cache);
-}
-
-
-void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
- Register map = ToRegister(instr->map());
- Register result = ToRegister(instr->result());
- Label load_cache, done;
- __ EnumLength(result, map);
- __ Cmp(result, Smi::FromInt(0));
- __ j(not_equal, &load_cache);
- __ LoadRoot(result, Heap::kEmptyFixedArrayRootIndex);
- __ jmp(&done);
- __ bind(&load_cache);
- __ LoadInstanceDescriptors(map, result);
- __ movq(result,
- FieldOperand(result, DescriptorArray::kEnumCacheOffset));
- __ movq(result,
- FieldOperand(result, FixedArray::SizeFor(instr->idx())));
- __ bind(&done);
- Condition cc = masm()->CheckSmi(result);
- DeoptimizeIf(cc, instr->environment());
-}
-
-
-void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
- Register object = ToRegister(instr->value());
- __ cmpq(ToRegister(instr->map()),
- FieldOperand(object, HeapObject::kMapOffset));
- DeoptimizeIf(not_equal, instr->environment());
-}
-
-
-void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
- Register object = ToRegister(instr->object());
- Register index = ToRegister(instr->index());
-
- Label out_of_object, done;
- __ SmiToInteger32(index, index);
- __ cmpl(index, Immediate(0));
- __ j(less, &out_of_object);
- __ movq(object, FieldOperand(object,
- index,
- times_pointer_size,
- JSObject::kHeaderSize));
- __ jmp(&done, Label::kNear);
-
- __ bind(&out_of_object);
- __ movq(object, FieldOperand(object, JSObject::kPropertiesOffset));
- __ negl(index);
- // Index is now equal to out of object property index plus 1.
- __ movq(object, FieldOperand(object,
- index,
- times_pointer_size,
- FixedArray::kHeaderSize - kPointerSize));
- __ bind(&done);
-}
-
-
-#undef __
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_X64
diff --git a/src/3rdparty/v8/src/x64/lithium-codegen-x64.h b/src/3rdparty/v8/src/x64/lithium-codegen-x64.h
deleted file mode 100644
index 66880aa..0000000
--- a/src/3rdparty/v8/src/x64/lithium-codegen-x64.h
+++ /dev/null
@@ -1,450 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_X64_LITHIUM_CODEGEN_X64_H_
-#define V8_X64_LITHIUM_CODEGEN_X64_H_
-
-#include "x64/lithium-x64.h"
-
-#include "checks.h"
-#include "deoptimizer.h"
-#include "safepoint-table.h"
-#include "scopes.h"
-#include "x64/lithium-gap-resolver-x64.h"
-
-namespace v8 {
-namespace internal {
-
-// Forward declarations.
-class LDeferredCode;
-class SafepointGenerator;
-
-class LCodeGen BASE_EMBEDDED {
- public:
- LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
- : zone_(info->zone()),
- chunk_(static_cast<LPlatformChunk*>(chunk)),
- masm_(assembler),
- info_(info),
- current_block_(-1),
- current_instruction_(-1),
- instructions_(chunk->instructions()),
- deoptimizations_(4, info->zone()),
- jump_table_(4, info->zone()),
- deoptimization_literals_(8, info->zone()),
- prototype_maps_(0, info->zone()),
- inlined_function_count_(0),
- scope_(info->scope()),
- status_(UNUSED),
- translations_(info->zone()),
- deferred_(8, info->zone()),
- osr_pc_offset_(-1),
- last_lazy_deopt_pc_(0),
- frame_is_built_(false),
- safepoints_(info->zone()),
- resolver_(this),
- expected_safepoint_kind_(Safepoint::kSimple) {
- PopulateDeoptimizationLiteralsWithInlinedFunctions();
- }
-
- // Simple accessors.
- MacroAssembler* masm() const { return masm_; }
- CompilationInfo* info() const { return info_; }
- Isolate* isolate() const { return info_->isolate(); }
- Factory* factory() const { return isolate()->factory(); }
- Heap* heap() const { return isolate()->heap(); }
- Zone* zone() const { return zone_; }
-
- bool NeedsEagerFrame() const {
- return GetStackSlotCount() > 0 ||
- info()->is_non_deferred_calling() ||
- !info()->IsStub();
- }
- bool NeedsDeferredFrame() const {
- return !NeedsEagerFrame() && info()->is_deferred_calling();
- }
-
- // Support for converting LOperands to assembler types.
- Register ToRegister(LOperand* op) const;
- XMMRegister ToDoubleRegister(LOperand* op) const;
- bool IsInteger32Constant(LConstantOperand* op) const;
- int ToInteger32(LConstantOperand* op) const;
- double ToDouble(LConstantOperand* op) const;
- bool IsTaggedConstant(LConstantOperand* op) const;
- Handle<Object> ToHandle(LConstantOperand* op) const;
- Operand ToOperand(LOperand* op) const;
-
- // Try to generate code for the entire chunk, but it may fail if the
- // chunk contains constructs we cannot handle. Returns true if the
- // code generation attempt succeeded.
- bool GenerateCode();
-
- // Finish the code by setting stack height, safepoint, and bailout
- // information on it.
- void FinishCode(Handle<Code> code);
-
- // Deferred code support.
- void DoDeferredNumberTagD(LNumberTagD* instr);
- void DoDeferredNumberTagU(LNumberTagU* instr);
- void DoDeferredTaggedToI(LTaggedToI* instr);
- void DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr);
- void DoDeferredStackCheck(LStackCheck* instr);
- void DoDeferredRandom(LRandom* instr);
- void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
- void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
- void DoDeferredAllocateObject(LAllocateObject* instr);
- void DoDeferredAllocate(LAllocate* instr);
- void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
- Label* map_check);
-
- void DoCheckMapCommon(Register reg, Handle<Map> map,
- CompareMapMode mode, LInstruction* instr);
-
-// Parallel move support.
- void DoParallelMove(LParallelMove* move);
- void DoGap(LGap* instr);
-
- // Emit frame translation commands for an environment.
- void WriteTranslation(LEnvironment* environment,
- Translation* translation,
- int* arguments_index,
- int* arguments_count);
-
- // Declare methods that deal with the individual node types.
-#define DECLARE_DO(type) void Do##type(L##type* node);
- LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
-#undef DECLARE_DO
-
- private:
- enum Status {
- UNUSED,
- GENERATING,
- DONE,
- ABORTED
- };
-
- bool is_unused() const { return status_ == UNUSED; }
- bool is_generating() const { return status_ == GENERATING; }
- bool is_done() const { return status_ == DONE; }
- bool is_aborted() const { return status_ == ABORTED; }
-
- StrictModeFlag strict_mode_flag() const {
- return info()->is_classic_mode() ? kNonStrictMode : kStrictMode;
- }
-
- LPlatformChunk* chunk() const { return chunk_; }
- Scope* scope() const { return scope_; }
- HGraph* graph() const { return chunk_->graph(); }
-
- int GetNextEmittedBlock(int block);
-
- void EmitClassOfTest(Label* if_true,
- Label* if_false,
- Handle<String> class_name,
- Register input,
- Register temporary,
- Register scratch);
-
- int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
- int GetParameterCount() const { return info()->num_parameters(); }
-
- void Abort(const char* reason);
- void Comment(const char* format, ...);
-
- void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
-
- // Code generation passes. Returns true if code generation should
- // continue.
- bool GeneratePrologue();
- bool GenerateBody();
- bool GenerateDeferredCode();
- bool GenerateJumpTable();
- bool GenerateSafepointTable();
-
- enum SafepointMode {
- RECORD_SIMPLE_SAFEPOINT,
- RECORD_SAFEPOINT_WITH_REGISTERS
- };
-
- void CallCodeGeneric(Handle<Code> code,
- RelocInfo::Mode mode,
- LInstruction* instr,
- SafepointMode safepoint_mode,
- int argc);
-
-
- void CallCode(Handle<Code> code,
- RelocInfo::Mode mode,
- LInstruction* instr);
-
- void CallRuntime(const Runtime::Function* function,
- int num_arguments,
- LInstruction* instr);
-
- void CallRuntime(Runtime::FunctionId id,
- int num_arguments,
- LInstruction* instr) {
- const Runtime::Function* function = Runtime::FunctionForId(id);
- CallRuntime(function, num_arguments, instr);
- }
-
- void CallRuntimeFromDeferred(Runtime::FunctionId id,
- int argc,
- LInstruction* instr);
-
- enum RDIState {
- RDI_UNINITIALIZED,
- RDI_CONTAINS_TARGET
- };
-
- // Generate a direct call to a known function. Expects the function
- // to be in rdi.
- void CallKnownFunction(Handle<JSFunction> function,
- int arity,
- LInstruction* instr,
- CallKind call_kind,
- RDIState rdi_state);
-
-
- void RecordSafepointWithLazyDeopt(LInstruction* instr,
- SafepointMode safepoint_mode,
- int argc);
- void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
- Safepoint::DeoptMode mode);
- void DeoptimizeIf(Condition cc, LEnvironment* environment);
-
- void AddToTranslation(Translation* translation,
- LOperand* op,
- bool is_tagged,
- bool is_uint32,
- bool arguments_known,
- int arguments_index,
- int arguments_count);
- void RegisterDependentCodeForEmbeddedMaps(Handle<Code> code);
- void PopulateDeoptimizationData(Handle<Code> code);
- int DefineDeoptimizationLiteral(Handle<Object> literal);
-
- void PopulateDeoptimizationLiteralsWithInlinedFunctions();
-
- Register ToRegister(int index) const;
- XMMRegister ToDoubleRegister(int index) const;
- Operand BuildFastArrayOperand(
- LOperand* elements_pointer,
- LOperand* key,
- ElementsKind elements_kind,
- uint32_t offset,
- uint32_t additional_index = 0);
-
- // Specific math operations - used from DoUnaryMathOperation.
- void EmitIntegerMathAbs(LUnaryMathOperation* instr);
- void DoMathAbs(LUnaryMathOperation* instr);
- void DoMathFloor(LUnaryMathOperation* instr);
- void DoMathRound(LUnaryMathOperation* instr);
- void DoMathSqrt(LUnaryMathOperation* instr);
- void DoMathPowHalf(LUnaryMathOperation* instr);
- void DoMathLog(LUnaryMathOperation* instr);
- void DoMathTan(LUnaryMathOperation* instr);
- void DoMathCos(LUnaryMathOperation* instr);
- void DoMathSin(LUnaryMathOperation* instr);
-
- // Support for recording safepoint and position information.
- void RecordSafepoint(LPointerMap* pointers,
- Safepoint::Kind kind,
- int arguments,
- Safepoint::DeoptMode mode);
- void RecordSafepoint(LPointerMap* pointers, Safepoint::DeoptMode mode);
- void RecordSafepoint(Safepoint::DeoptMode mode);
- void RecordSafepointWithRegisters(LPointerMap* pointers,
- int arguments,
- Safepoint::DeoptMode mode);
- void RecordPosition(int position);
-
- static Condition TokenToCondition(Token::Value op, bool is_unsigned);
- void EmitGoto(int block);
- void EmitBranch(int left_block, int right_block, Condition cc);
- void EmitNumberUntagD(
- Register input,
- XMMRegister result,
- bool deoptimize_on_undefined,
- bool deoptimize_on_minus_zero,
- LEnvironment* env,
- NumberUntagDMode mode = NUMBER_CANDIDATE_IS_ANY_TAGGED);
-
- // Emits optimized code for typeof x == "y". Modifies input register.
- // Returns the condition on which a final split to
- // true and false label should be made, to optimize fallthrough.
- Condition EmitTypeofIs(Label* true_label,
- Label* false_label,
- Register input,
- Handle<String> type_name);
-
- // Emits optimized code for %_IsObject(x). Preserves input register.
- // Returns the condition on which a final split to
- // true and false label should be made, to optimize fallthrough.
- Condition EmitIsObject(Register input,
- Label* is_not_object,
- Label* is_object);
-
- // Emits optimized code for %_IsString(x). Preserves input register.
- // Returns the condition on which a final split to
- // true and false label should be made, to optimize fallthrough.
- Condition EmitIsString(Register input,
- Register temp1,
- Label* is_not_string);
-
- // Emits optimized code for %_IsConstructCall().
- // Caller should branch on equal condition.
- void EmitIsConstructCall(Register temp);
-
- void EmitLoadFieldOrConstantFunction(Register result,
- Register object,
- Handle<Map> type,
- Handle<String> name,
- LEnvironment* env);
-
- // Emits code for pushing either a tagged constant, a (non-double)
- // register, or a stack slot operand.
- void EmitPushTaggedOperand(LOperand* operand);
-
- // Emits optimized code to deep-copy the contents of statically known
- // object graphs (e.g. object literal boilerplate).
- void EmitDeepCopy(Handle<JSObject> object,
- Register result,
- Register source,
- int* offset,
- AllocationSiteMode mode);
-
- struct JumpTableEntry {
- inline JumpTableEntry(Address entry, bool frame, bool is_lazy)
- : label(),
- address(entry),
- needs_frame(frame),
- is_lazy_deopt(is_lazy) { }
- Label label;
- Address address;
- bool needs_frame;
- bool is_lazy_deopt;
- };
-
- void EnsureSpaceForLazyDeopt(int space_needed);
- void DoLoadKeyedExternalArray(LLoadKeyed* instr);
- void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr);
- void DoLoadKeyedFixedArray(LLoadKeyed* instr);
- void DoStoreKeyedExternalArray(LStoreKeyed* instr);
- void DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr);
- void DoStoreKeyedFixedArray(LStoreKeyed* instr);
-
- Zone* zone_;
- LPlatformChunk* const chunk_;
- MacroAssembler* const masm_;
- CompilationInfo* const info_;
-
- int current_block_;
- int current_instruction_;
- const ZoneList<LInstruction*>* instructions_;
- ZoneList<LEnvironment*> deoptimizations_;
- ZoneList<JumpTableEntry> jump_table_;
- ZoneList<Handle<Object> > deoptimization_literals_;
- ZoneList<Handle<Map> > prototype_maps_;
- int inlined_function_count_;
- Scope* const scope_;
- Status status_;
- TranslationBuffer translations_;
- ZoneList<LDeferredCode*> deferred_;
- int osr_pc_offset_;
- int last_lazy_deopt_pc_;
- bool frame_is_built_;
-
- // Builder that keeps track of safepoints in the code. The table
- // itself is emitted at the end of the generated code.
- SafepointTableBuilder safepoints_;
-
- // Compiler from a set of parallel moves to a sequential list of moves.
- LGapResolver resolver_;
-
- Safepoint::Kind expected_safepoint_kind_;
-
- class PushSafepointRegistersScope BASE_EMBEDDED {
- public:
- explicit PushSafepointRegistersScope(LCodeGen* codegen)
- : codegen_(codegen) {
- ASSERT(codegen_->info()->is_calling());
- ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
- codegen_->masm_->PushSafepointRegisters();
- codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters;
- }
-
- ~PushSafepointRegistersScope() {
- ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kWithRegisters);
- codegen_->masm_->PopSafepointRegisters();
- codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
- }
-
- private:
- LCodeGen* codegen_;
- };
-
- friend class LDeferredCode;
- friend class LEnvironment;
- friend class SafepointGenerator;
- DISALLOW_COPY_AND_ASSIGN(LCodeGen);
-};
-
-
-class LDeferredCode: public ZoneObject {
- public:
- explicit LDeferredCode(LCodeGen* codegen)
- : codegen_(codegen),
- external_exit_(NULL),
- instruction_index_(codegen->current_instruction_) {
- codegen->AddDeferredCode(this);
- }
-
- virtual ~LDeferredCode() { }
- virtual void Generate() = 0;
- virtual LInstruction* instr() = 0;
-
- void SetExit(Label* exit) { external_exit_ = exit; }
- Label* entry() { return &entry_; }
- Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; }
- int instruction_index() const { return instruction_index_; }
-
- protected:
- LCodeGen* codegen() const { return codegen_; }
- MacroAssembler* masm() const { return codegen_->masm(); }
-
- private:
- LCodeGen* codegen_;
- Label entry_;
- Label exit_;
- Label* external_exit_;
- int instruction_index_;
-};
-
-} } // namespace v8::internal
-
-#endif // V8_X64_LITHIUM_CODEGEN_X64_H_
diff --git a/src/3rdparty/v8/src/x64/lithium-gap-resolver-x64.cc b/src/3rdparty/v8/src/x64/lithium-gap-resolver-x64.cc
deleted file mode 100644
index 22183a2..0000000
--- a/src/3rdparty/v8/src/x64/lithium-gap-resolver-x64.cc
+++ /dev/null
@@ -1,322 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_X64)
-
-#include "x64/lithium-gap-resolver-x64.h"
-#include "x64/lithium-codegen-x64.h"
-
-namespace v8 {
-namespace internal {
-
-LGapResolver::LGapResolver(LCodeGen* owner)
- : cgen_(owner), moves_(32, owner->zone()) {}
-
-
-void LGapResolver::Resolve(LParallelMove* parallel_move) {
- ASSERT(moves_.is_empty());
- // Build up a worklist of moves.
- BuildInitialMoveList(parallel_move);
-
- for (int i = 0; i < moves_.length(); ++i) {
- LMoveOperands move = moves_[i];
- // Skip constants to perform them last. They don't block other moves
- // and skipping such moves with register destinations keeps those
- // registers free for the whole algorithm.
- if (!move.IsEliminated() && !move.source()->IsConstantOperand()) {
- PerformMove(i);
- }
- }
-
- // Perform the moves with constant sources.
- for (int i = 0; i < moves_.length(); ++i) {
- if (!moves_[i].IsEliminated()) {
- ASSERT(moves_[i].source()->IsConstantOperand());
- EmitMove(i);
- }
- }
-
- moves_.Rewind(0);
-}
-
-
-void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) {
- // Perform a linear sweep of the moves to add them to the initial list of
- // moves to perform, ignoring any move that is redundant (the source is
- // the same as the destination, the destination is ignored and
- // unallocated, or the move was already eliminated).
- const ZoneList<LMoveOperands>* moves = parallel_move->move_operands();
- for (int i = 0; i < moves->length(); ++i) {
- LMoveOperands move = moves->at(i);
- if (!move.IsRedundant()) moves_.Add(move, cgen_->zone());
- }
- Verify();
-}
-
-
-void LGapResolver::PerformMove(int index) {
- // Each call to this function performs a move and deletes it from the move
- // graph. We first recursively perform any move blocking this one. We
- // mark a move as "pending" on entry to PerformMove in order to detect
- // cycles in the move graph. We use operand swaps to resolve cycles,
- // which means that a call to PerformMove could change any source operand
- // in the move graph.
-
- ASSERT(!moves_[index].IsPending());
- ASSERT(!moves_[index].IsRedundant());
-
- // Clear this move's destination to indicate a pending move. The actual
- // destination is saved in a stack-allocated local. Recursion may allow
- // multiple moves to be pending.
- ASSERT(moves_[index].source() != NULL); // Or else it will look eliminated.
- LOperand* destination = moves_[index].destination();
- moves_[index].set_destination(NULL);
-
- // Perform a depth-first traversal of the move graph to resolve
- // dependencies. Any unperformed, unpending move with a source the same
- // as this one's destination blocks this one so recursively perform all
- // such moves.
- for (int i = 0; i < moves_.length(); ++i) {
- LMoveOperands other_move = moves_[i];
- if (other_move.Blocks(destination) && !other_move.IsPending()) {
- // Though PerformMove can change any source operand in the move graph,
- // this call cannot create a blocking move via a swap (this loop does
- // not miss any). Assume there is a non-blocking move with source A
- // and this move is blocked on source B and there is a swap of A and
- // B. Then A and B must be involved in the same cycle (or they would
- // not be swapped). Since this move's destination is B and there is
- // only a single incoming edge to an operand, this move must also be
- // involved in the same cycle. In that case, the blocking move will
- // be created but will be "pending" when we return from PerformMove.
- PerformMove(i);
- }
- }
-
- // We are about to resolve this move and don't need it marked as
- // pending, so restore its destination.
- moves_[index].set_destination(destination);
-
- // This move's source may have changed due to swaps to resolve cycles and
- // so it may now be the last move in the cycle. If so remove it.
- if (moves_[index].source()->Equals(destination)) {
- moves_[index].Eliminate();
- return;
- }
-
- // The move may be blocked on a (at most one) pending move, in which case
- // we have a cycle. Search for such a blocking move and perform a swap to
- // resolve it.
- for (int i = 0; i < moves_.length(); ++i) {
- LMoveOperands other_move = moves_[i];
- if (other_move.Blocks(destination)) {
- ASSERT(other_move.IsPending());
- EmitSwap(index);
- return;
- }
- }
-
- // This move is not blocked.
- EmitMove(index);
-}
-
-
-void LGapResolver::Verify() {
-#ifdef ENABLE_SLOW_ASSERTS
- // No operand should be the destination for more than one move.
- for (int i = 0; i < moves_.length(); ++i) {
- LOperand* destination = moves_[i].destination();
- for (int j = i + 1; j < moves_.length(); ++j) {
- SLOW_ASSERT(!destination->Equals(moves_[j].destination()));
- }
- }
-#endif
-}
-
-
-#define __ ACCESS_MASM(cgen_->masm())
-
-
-void LGapResolver::EmitMove(int index) {
- LOperand* source = moves_[index].source();
- LOperand* destination = moves_[index].destination();
-
- // Dispatch on the source and destination operand kinds. Not all
- // combinations are possible.
- if (source->IsRegister()) {
- Register src = cgen_->ToRegister(source);
- if (destination->IsRegister()) {
- Register dst = cgen_->ToRegister(destination);
- __ movq(dst, src);
- } else {
- ASSERT(destination->IsStackSlot());
- Operand dst = cgen_->ToOperand(destination);
- __ movq(dst, src);
- }
-
- } else if (source->IsStackSlot()) {
- Operand src = cgen_->ToOperand(source);
- if (destination->IsRegister()) {
- Register dst = cgen_->ToRegister(destination);
- __ movq(dst, src);
- } else {
- ASSERT(destination->IsStackSlot());
- Operand dst = cgen_->ToOperand(destination);
- __ movq(kScratchRegister, src);
- __ movq(dst, kScratchRegister);
- }
-
- } else if (source->IsConstantOperand()) {
- LConstantOperand* constant_source = LConstantOperand::cast(source);
- if (destination->IsRegister()) {
- Register dst = cgen_->ToRegister(destination);
- if (cgen_->IsInteger32Constant(constant_source)) {
- __ movl(dst, Immediate(cgen_->ToInteger32(constant_source)));
- } else {
- __ LoadObject(dst, cgen_->ToHandle(constant_source));
- }
- } else {
- ASSERT(destination->IsStackSlot());
- Operand dst = cgen_->ToOperand(destination);
- if (cgen_->IsInteger32Constant(constant_source)) {
- // Zero top 32 bits of a 64 bit spill slot that holds a 32 bit untagged
- // value.
- __ movq(dst, Immediate(cgen_->ToInteger32(constant_source)));
- } else {
- __ LoadObject(kScratchRegister, cgen_->ToHandle(constant_source));
- __ movq(dst, kScratchRegister);
- }
- }
-
- } else if (source->IsDoubleRegister()) {
- XMMRegister src = cgen_->ToDoubleRegister(source);
- if (destination->IsDoubleRegister()) {
- __ movaps(cgen_->ToDoubleRegister(destination), src);
- } else {
- ASSERT(destination->IsDoubleStackSlot());
- __ movsd(cgen_->ToOperand(destination), src);
- }
- } else if (source->IsDoubleStackSlot()) {
- Operand src = cgen_->ToOperand(source);
- if (destination->IsDoubleRegister()) {
- __ movsd(cgen_->ToDoubleRegister(destination), src);
- } else {
- ASSERT(destination->IsDoubleStackSlot());
- __ movsd(xmm0, src);
- __ movsd(cgen_->ToOperand(destination), xmm0);
- }
- } else {
- UNREACHABLE();
- }
-
- moves_[index].Eliminate();
-}
-
-
-void LGapResolver::EmitSwap(int index) {
- LOperand* source = moves_[index].source();
- LOperand* destination = moves_[index].destination();
-
- // Dispatch on the source and destination operand kinds. Not all
- // combinations are possible.
- if (source->IsRegister() && destination->IsRegister()) {
- // Swap two general-purpose registers.
- Register src = cgen_->ToRegister(source);
- Register dst = cgen_->ToRegister(destination);
- __ xchg(dst, src);
-
- } else if ((source->IsRegister() && destination->IsStackSlot()) ||
- (source->IsStackSlot() && destination->IsRegister())) {
- // Swap a general-purpose register and a stack slot.
- Register reg =
- cgen_->ToRegister(source->IsRegister() ? source : destination);
- Operand mem =
- cgen_->ToOperand(source->IsRegister() ? destination : source);
- __ movq(kScratchRegister, mem);
- __ movq(mem, reg);
- __ movq(reg, kScratchRegister);
-
- } else if ((source->IsStackSlot() && destination->IsStackSlot()) ||
- (source->IsDoubleStackSlot() && destination->IsDoubleStackSlot())) {
- // Swap two stack slots or two double stack slots.
- Operand src = cgen_->ToOperand(source);
- Operand dst = cgen_->ToOperand(destination);
- __ movsd(xmm0, src);
- __ movq(kScratchRegister, dst);
- __ movsd(dst, xmm0);
- __ movq(src, kScratchRegister);
-
- } else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
- // Swap two double registers.
- XMMRegister source_reg = cgen_->ToDoubleRegister(source);
- XMMRegister destination_reg = cgen_->ToDoubleRegister(destination);
- __ movaps(xmm0, source_reg);
- __ movaps(source_reg, destination_reg);
- __ movaps(destination_reg, xmm0);
-
- } else if (source->IsDoubleRegister() || destination->IsDoubleRegister()) {
- // Swap a double register and a double stack slot.
- ASSERT((source->IsDoubleRegister() && destination->IsDoubleStackSlot()) ||
- (source->IsDoubleStackSlot() && destination->IsDoubleRegister()));
- XMMRegister reg = cgen_->ToDoubleRegister(source->IsDoubleRegister()
- ? source
- : destination);
- LOperand* other = source->IsDoubleRegister() ? destination : source;
- ASSERT(other->IsDoubleStackSlot());
- Operand other_operand = cgen_->ToOperand(other);
- __ movsd(xmm0, other_operand);
- __ movsd(other_operand, reg);
- __ movsd(reg, xmm0);
-
- } else {
- // No other combinations are possible.
- UNREACHABLE();
- }
-
- // The swap of source and destination has executed a move from source to
- // destination.
- moves_[index].Eliminate();
-
- // Any unperformed (including pending) move with a source of either
- // this move's source or destination needs to have their source
- // changed to reflect the state of affairs after the swap.
- for (int i = 0; i < moves_.length(); ++i) {
- LMoveOperands other_move = moves_[i];
- if (other_move.Blocks(source)) {
- moves_[i].set_source(destination);
- } else if (other_move.Blocks(destination)) {
- moves_[i].set_source(source);
- }
- }
-}
-
-#undef __
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_X64
diff --git a/src/3rdparty/v8/src/x64/lithium-gap-resolver-x64.h b/src/3rdparty/v8/src/x64/lithium-gap-resolver-x64.h
deleted file mode 100644
index d828455..0000000
--- a/src/3rdparty/v8/src/x64/lithium-gap-resolver-x64.h
+++ /dev/null
@@ -1,74 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_X64_LITHIUM_GAP_RESOLVER_X64_H_
-#define V8_X64_LITHIUM_GAP_RESOLVER_X64_H_
-
-#include "v8.h"
-
-#include "lithium.h"
-
-namespace v8 {
-namespace internal {
-
-class LCodeGen;
-class LGapResolver;
-
-class LGapResolver BASE_EMBEDDED {
- public:
- explicit LGapResolver(LCodeGen* owner);
-
- // Resolve a set of parallel moves, emitting assembler instructions.
- void Resolve(LParallelMove* parallel_move);
-
- private:
- // Build the initial list of moves.
- void BuildInitialMoveList(LParallelMove* parallel_move);
-
- // Perform the move at the moves_ index in question (possibly requiring
- // other moves to satisfy dependencies).
- void PerformMove(int index);
-
- // Emit a move and remove it from the move graph.
- void EmitMove(int index);
-
- // Execute a move by emitting a swap of two operands. The move from
- // source to destination is removed from the move graph.
- void EmitSwap(int index);
-
- // Verify the move list before performing moves.
- void Verify();
-
- LCodeGen* cgen_;
-
- // List of moves not yet resolved.
- ZoneList<LMoveOperands> moves_;
-};
-
-} } // namespace v8::internal
-
-#endif // V8_X64_LITHIUM_GAP_RESOLVER_X64_H_
diff --git a/src/3rdparty/v8/src/x64/lithium-x64.cc b/src/3rdparty/v8/src/x64/lithium-x64.cc
deleted file mode 100644
index f591437..0000000
--- a/src/3rdparty/v8/src/x64/lithium-x64.cc
+++ /dev/null
@@ -1,2438 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_X64)
-
-#include "lithium-allocator-inl.h"
-#include "x64/lithium-x64.h"
-#include "x64/lithium-codegen-x64.h"
-
-namespace v8 {
-namespace internal {
-
-#define DEFINE_COMPILE(type) \
- void L##type::CompileToNative(LCodeGen* generator) { \
- generator->Do##type(this); \
- }
-LITHIUM_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE)
-#undef DEFINE_COMPILE
-
-LOsrEntry::LOsrEntry() {
- for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) {
- register_spills_[i] = NULL;
- }
- for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); ++i) {
- double_register_spills_[i] = NULL;
- }
-}
-
-
-void LOsrEntry::MarkSpilledRegister(int allocation_index,
- LOperand* spill_operand) {
- ASSERT(spill_operand->IsStackSlot());
- ASSERT(register_spills_[allocation_index] == NULL);
- register_spills_[allocation_index] = spill_operand;
-}
-
-
-void LOsrEntry::MarkSpilledDoubleRegister(int allocation_index,
- LOperand* spill_operand) {
- ASSERT(spill_operand->IsDoubleStackSlot());
- ASSERT(double_register_spills_[allocation_index] == NULL);
- double_register_spills_[allocation_index] = spill_operand;
-}
-
-
-#ifdef DEBUG
-void LInstruction::VerifyCall() {
- // Call instructions can use only fixed registers as temporaries and
- // outputs because all registers are blocked by the calling convention.
- // Inputs operands must use a fixed register or use-at-start policy or
- // a non-register policy.
- ASSERT(Output() == NULL ||
- LUnallocated::cast(Output())->HasFixedPolicy() ||
- !LUnallocated::cast(Output())->HasRegisterPolicy());
- for (UseIterator it(this); !it.Done(); it.Advance()) {
- LUnallocated* operand = LUnallocated::cast(it.Current());
- ASSERT(operand->HasFixedPolicy() ||
- operand->IsUsedAtStart());
- }
- for (TempIterator it(this); !it.Done(); it.Advance()) {
- LUnallocated* operand = LUnallocated::cast(it.Current());
- ASSERT(operand->HasFixedPolicy() ||!operand->HasRegisterPolicy());
- }
-}
-#endif
-
-
-void LInstruction::PrintTo(StringStream* stream) {
- stream->Add("%s ", this->Mnemonic());
-
- PrintOutputOperandTo(stream);
-
- PrintDataTo(stream);
-
- if (HasEnvironment()) {
- stream->Add(" ");
- environment()->PrintTo(stream);
- }
-
- if (HasPointerMap()) {
- stream->Add(" ");
- pointer_map()->PrintTo(stream);
- }
-}
-
-
-void LInstruction::PrintDataTo(StringStream* stream) {
- stream->Add("= ");
- for (int i = 0; i < InputCount(); i++) {
- if (i > 0) stream->Add(" ");
- if (InputAt(i) == NULL) {
- stream->Add("NULL");
- } else {
- InputAt(i)->PrintTo(stream);
- }
- }
-}
-
-
-void LInstruction::PrintOutputOperandTo(StringStream* stream) {
- if (HasResult()) result()->PrintTo(stream);
-}
-
-
-void LLabel::PrintDataTo(StringStream* stream) {
- LGap::PrintDataTo(stream);
- LLabel* rep = replacement();
- if (rep != NULL) {
- stream->Add(" Dead block replaced with B%d", rep->block_id());
- }
-}
-
-
-bool LGap::IsRedundant() const {
- for (int i = 0; i < 4; i++) {
- if (parallel_moves_[i] != NULL && !parallel_moves_[i]->IsRedundant()) {
- return false;
- }
- }
-
- return true;
-}
-
-
-void LGap::PrintDataTo(StringStream* stream) {
- for (int i = 0; i < 4; i++) {
- stream->Add("(");
- if (parallel_moves_[i] != NULL) {
- parallel_moves_[i]->PrintDataTo(stream);
- }
- stream->Add(") ");
- }
-}
-
-
-const char* LArithmeticD::Mnemonic() const {
- switch (op()) {
- case Token::ADD: return "add-d";
- case Token::SUB: return "sub-d";
- case Token::MUL: return "mul-d";
- case Token::DIV: return "div-d";
- case Token::MOD: return "mod-d";
- default:
- UNREACHABLE();
- return NULL;
- }
-}
-
-
-const char* LArithmeticT::Mnemonic() const {
- switch (op()) {
- case Token::ADD: return "add-t";
- case Token::SUB: return "sub-t";
- case Token::MUL: return "mul-t";
- case Token::MOD: return "mod-t";
- case Token::DIV: return "div-t";
- case Token::BIT_AND: return "bit-and-t";
- case Token::BIT_OR: return "bit-or-t";
- case Token::BIT_XOR: return "bit-xor-t";
- case Token::ROR: return "ror-t";
- case Token::SHL: return "sal-t";
- case Token::SAR: return "sar-t";
- case Token::SHR: return "shr-t";
- default:
- UNREACHABLE();
- return NULL;
- }
-}
-
-
-void LGoto::PrintDataTo(StringStream* stream) {
- stream->Add("B%d", block_id());
-}
-
-
-void LBranch::PrintDataTo(StringStream* stream) {
- stream->Add("B%d | B%d on ", true_block_id(), false_block_id());
- value()->PrintTo(stream);
-}
-
-
-void LCmpIDAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if ");
- left()->PrintTo(stream);
- stream->Add(" %s ", Token::String(op()));
- right()->PrintTo(stream);
- stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LIsNilAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if ");
- value()->PrintTo(stream);
- stream->Add(kind() == kStrictEquality ? " === " : " == ");
- stream->Add(nil() == kNullValue ? "null" : "undefined");
- stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LIsObjectAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if is_object(");
- value()->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LIsStringAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if is_string(");
- value()->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LIsSmiAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if is_smi(");
- value()->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LIsUndetectableAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if is_undetectable(");
- value()->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LStringCompareAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if string_compare(");
- left()->PrintTo(stream);
- right()->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if has_instance_type(");
- value()->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LHasCachedArrayIndexAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if has_cached_array_index(");
- value()->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if class_of_test(");
- value()->PrintTo(stream);
- stream->Add(", \"%o\") then B%d else B%d",
- *hydrogen()->class_name(),
- true_block_id(),
- false_block_id());
-}
-
-
-void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if typeof ");
- value()->PrintTo(stream);
- stream->Add(" == \"%s\" then B%d else B%d",
- *hydrogen()->type_literal()->ToCString(),
- true_block_id(), false_block_id());
-}
-
-
-void LCallConstantFunction::PrintDataTo(StringStream* stream) {
- stream->Add("#%d / ", arity());
-}
-
-
-void LUnaryMathOperation::PrintDataTo(StringStream* stream) {
- stream->Add("/%s ", hydrogen()->OpName());
- value()->PrintTo(stream);
-}
-
-
-void LMathExp::PrintDataTo(StringStream* stream) {
- value()->PrintTo(stream);
-}
-
-
-void LLoadContextSlot::PrintDataTo(StringStream* stream) {
- context()->PrintTo(stream);
- stream->Add("[%d]", slot_index());
-}
-
-
-void LStoreContextSlot::PrintDataTo(StringStream* stream) {
- context()->PrintTo(stream);
- stream->Add("[%d] <- ", slot_index());
- value()->PrintTo(stream);
-}
-
-
-void LInvokeFunction::PrintDataTo(StringStream* stream) {
- stream->Add("= ");
- function()->PrintTo(stream);
- stream->Add(" #%d / ", arity());
-}
-
-
-void LCallKeyed::PrintDataTo(StringStream* stream) {
- stream->Add("[rcx] #%d / ", arity());
-}
-
-
-void LCallNamed::PrintDataTo(StringStream* stream) {
- SmartArrayPointer<char> name_string = name()->ToCString();
- stream->Add("%s #%d / ", *name_string, arity());
-}
-
-
-void LCallGlobal::PrintDataTo(StringStream* stream) {
- SmartArrayPointer<char> name_string = name()->ToCString();
- stream->Add("%s #%d / ", *name_string, arity());
-}
-
-
-void LCallKnownGlobal::PrintDataTo(StringStream* stream) {
- stream->Add("#%d / ", arity());
-}
-
-
-void LCallNew::PrintDataTo(StringStream* stream) {
- stream->Add("= ");
- constructor()->PrintTo(stream);
- stream->Add(" #%d / ", arity());
-}
-
-
-void LCallNewArray::PrintDataTo(StringStream* stream) {
- stream->Add("= ");
- constructor()->PrintTo(stream);
- stream->Add(" #%d / ", arity());
- ASSERT(hydrogen()->property_cell()->value()->IsSmi());
- ElementsKind kind = static_cast<ElementsKind>(
- Smi::cast(hydrogen()->property_cell()->value())->value());
- stream->Add(" (%s) ", ElementsKindToString(kind));
-}
-
-
-void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
- arguments()->PrintTo(stream);
-
- stream->Add(" length ");
- length()->PrintTo(stream);
-
- stream->Add(" index ");
- index()->PrintTo(stream);
-}
-
-
-int LPlatformChunk::GetNextSpillIndex(bool is_double) {
- return spill_slot_count_++;
-}
-
-
-LOperand* LPlatformChunk::GetNextSpillSlot(bool is_double) {
- // All stack slots are Double stack slots on x64.
- // Alternatively, at some point, start using half-size
- // stack slots for int32 values.
- int index = GetNextSpillIndex(is_double);
- if (is_double) {
- return LDoubleStackSlot::Create(index, zone());
- } else {
- return LStackSlot::Create(index, zone());
- }
-}
-
-
-void LStoreNamedField::PrintDataTo(StringStream* stream) {
- object()->PrintTo(stream);
- stream->Add(".");
- stream->Add(*String::cast(*name())->ToCString());
- stream->Add(" <- ");
- value()->PrintTo(stream);
-}
-
-
-void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
- object()->PrintTo(stream);
- stream->Add(".");
- stream->Add(*String::cast(*name())->ToCString());
- stream->Add(" <- ");
- value()->PrintTo(stream);
-}
-
-
-void LLoadKeyed::PrintDataTo(StringStream* stream) {
- elements()->PrintTo(stream);
- stream->Add("[");
- key()->PrintTo(stream);
- if (hydrogen()->IsDehoisted()) {
- stream->Add(" + %d]", additional_index());
- } else {
- stream->Add("]");
- }
-}
-
-
-void LStoreKeyed::PrintDataTo(StringStream* stream) {
- elements()->PrintTo(stream);
- stream->Add("[");
- key()->PrintTo(stream);
- if (hydrogen()->IsDehoisted()) {
- stream->Add(" + %d] <-", additional_index());
- } else {
- stream->Add("] <- ");
- }
- value()->PrintTo(stream);
-}
-
-
-void LStoreKeyedGeneric::PrintDataTo(StringStream* stream) {
- object()->PrintTo(stream);
- stream->Add("[");
- key()->PrintTo(stream);
- stream->Add("] <- ");
- value()->PrintTo(stream);
-}
-
-
-void LTransitionElementsKind::PrintDataTo(StringStream* stream) {
- object()->PrintTo(stream);
- stream->Add(" %p -> %p", *original_map(), *transitioned_map());
-}
-
-
-LPlatformChunk* LChunkBuilder::Build() {
- ASSERT(is_unused());
- chunk_ = new(zone()) LPlatformChunk(info(), graph());
- HPhase phase("L_Building chunk", chunk_);
- status_ = BUILDING;
- const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
- for (int i = 0; i < blocks->length(); i++) {
- HBasicBlock* next = NULL;
- if (i < blocks->length() - 1) next = blocks->at(i + 1);
- DoBasicBlock(blocks->at(i), next);
- if (is_aborted()) return NULL;
- }
- status_ = DONE;
- return chunk_;
-}
-
-
-void LCodeGen::Abort(const char* reason) {
- info()->set_bailout_reason(reason);
- status_ = ABORTED;
-}
-
-
-LUnallocated* LChunkBuilder::ToUnallocated(Register reg) {
- return new(zone()) LUnallocated(LUnallocated::FIXED_REGISTER,
- Register::ToAllocationIndex(reg));
-}
-
-
-LUnallocated* LChunkBuilder::ToUnallocated(XMMRegister reg) {
- return new(zone()) LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER,
- XMMRegister::ToAllocationIndex(reg));
-}
-
-
-LOperand* LChunkBuilder::UseFixed(HValue* value, Register fixed_register) {
- return Use(value, ToUnallocated(fixed_register));
-}
-
-
-LOperand* LChunkBuilder::UseFixedDouble(HValue* value, XMMRegister reg) {
- return Use(value, ToUnallocated(reg));
-}
-
-
-LOperand* LChunkBuilder::UseRegister(HValue* value) {
- return Use(value, new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
-}
-
-
-LOperand* LChunkBuilder::UseRegisterAtStart(HValue* value) {
- return Use(value,
- new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER,
- LUnallocated::USED_AT_START));
-}
-
-
-LOperand* LChunkBuilder::UseTempRegister(HValue* value) {
- return Use(value, new(zone()) LUnallocated(LUnallocated::WRITABLE_REGISTER));
-}
-
-
-LOperand* LChunkBuilder::Use(HValue* value) {
- return Use(value, new(zone()) LUnallocated(LUnallocated::NONE));
-}
-
-
-LOperand* LChunkBuilder::UseAtStart(HValue* value) {
- return Use(value, new(zone()) LUnallocated(LUnallocated::NONE,
- LUnallocated::USED_AT_START));
-}
-
-
-LOperand* LChunkBuilder::UseOrConstant(HValue* value) {
- return value->IsConstant()
- ? chunk_->DefineConstantOperand(HConstant::cast(value))
- : Use(value);
-}
-
-
-LOperand* LChunkBuilder::UseOrConstantAtStart(HValue* value) {
- return value->IsConstant()
- ? chunk_->DefineConstantOperand(HConstant::cast(value))
- : UseAtStart(value);
-}
-
-
-LOperand* LChunkBuilder::UseRegisterOrConstant(HValue* value) {
- return value->IsConstant()
- ? chunk_->DefineConstantOperand(HConstant::cast(value))
- : UseRegister(value);
-}
-
-
-LOperand* LChunkBuilder::UseRegisterOrConstantAtStart(HValue* value) {
- return value->IsConstant()
- ? chunk_->DefineConstantOperand(HConstant::cast(value))
- : UseRegisterAtStart(value);
-}
-
-
-LOperand* LChunkBuilder::UseAny(HValue* value) {
- return value->IsConstant()
- ? chunk_->DefineConstantOperand(HConstant::cast(value))
- : Use(value, new(zone()) LUnallocated(LUnallocated::ANY));
-}
-
-
-LOperand* LChunkBuilder::Use(HValue* value, LUnallocated* operand) {
- if (value->EmitAtUses()) {
- HInstruction* instr = HInstruction::cast(value);
- VisitInstruction(instr);
- }
- operand->set_virtual_register(value->id());
- return operand;
-}
-
-
-template<int I, int T>
-LInstruction* LChunkBuilder::Define(LTemplateInstruction<1, I, T>* instr,
- LUnallocated* result) {
- result->set_virtual_register(current_instruction_->id());
- instr->set_result(result);
- return instr;
-}
-
-
-template<int I, int T>
-LInstruction* LChunkBuilder::DefineAsRegister(
- LTemplateInstruction<1, I, T>* instr) {
- return Define(instr,
- new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
-}
-
-
-template<int I, int T>
-LInstruction* LChunkBuilder::DefineAsSpilled(
- LTemplateInstruction<1, I, T>* instr,
- int index) {
- return Define(instr,
- new(zone()) LUnallocated(LUnallocated::FIXED_SLOT, index));
-}
-
-
-template<int I, int T>
-LInstruction* LChunkBuilder::DefineSameAsFirst(
- LTemplateInstruction<1, I, T>* instr) {
- return Define(instr,
- new(zone()) LUnallocated(LUnallocated::SAME_AS_FIRST_INPUT));
-}
-
-
-template<int I, int T>
-LInstruction* LChunkBuilder::DefineFixed(LTemplateInstruction<1, I, T>* instr,
- Register reg) {
- return Define(instr, ToUnallocated(reg));
-}
-
-
-template<int I, int T>
-LInstruction* LChunkBuilder::DefineFixedDouble(
- LTemplateInstruction<1, I, T>* instr,
- XMMRegister reg) {
- return Define(instr, ToUnallocated(reg));
-}
-
-
-LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
- HEnvironment* hydrogen_env = current_block_->last_environment();
- int argument_index_accumulator = 0;
- instr->set_environment(CreateEnvironment(hydrogen_env,
- &argument_index_accumulator));
- return instr;
-}
-
-
-LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
- HInstruction* hinstr,
- CanDeoptimize can_deoptimize) {
- info()->MarkAsNonDeferredCalling();
-
-#ifdef DEBUG
- instr->VerifyCall();
-#endif
- instr->MarkAsCall();
- instr = AssignPointerMap(instr);
-
- if (hinstr->HasObservableSideEffects()) {
- ASSERT(hinstr->next()->IsSimulate());
- HSimulate* sim = HSimulate::cast(hinstr->next());
- ASSERT(instruction_pending_deoptimization_environment_ == NULL);
- ASSERT(pending_deoptimization_ast_id_.IsNone());
- instruction_pending_deoptimization_environment_ = instr;
- pending_deoptimization_ast_id_ = sim->ast_id();
- }
-
- // If instruction does not have side-effects lazy deoptimization
- // after the call will try to deoptimize to the point before the call.
- // Thus we still need to attach environment to this call even if
- // call sequence can not deoptimize eagerly.
- bool needs_environment =
- (can_deoptimize == CAN_DEOPTIMIZE_EAGERLY) ||
- !hinstr->HasObservableSideEffects();
- if (needs_environment && !instr->HasEnvironment()) {
- instr = AssignEnvironment(instr);
- }
-
- return instr;
-}
-
-
-LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
- ASSERT(!instr->HasPointerMap());
- instr->set_pointer_map(new(zone()) LPointerMap(position_, zone()));
- return instr;
-}
-
-
-LUnallocated* LChunkBuilder::TempRegister() {
- LUnallocated* operand =
- new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER);
- operand->set_virtual_register(allocator_->GetVirtualRegister());
- if (!allocator_->AllocationOk()) Abort("Not enough virtual registers.");
- return operand;
-}
-
-
-LOperand* LChunkBuilder::FixedTemp(Register reg) {
- LUnallocated* operand = ToUnallocated(reg);
- ASSERT(operand->HasFixedPolicy());
- return operand;
-}
-
-
-LOperand* LChunkBuilder::FixedTemp(XMMRegister reg) {
- LUnallocated* operand = ToUnallocated(reg);
- ASSERT(operand->HasFixedPolicy());
- return operand;
-}
-
-
-LInstruction* LChunkBuilder::DoBlockEntry(HBlockEntry* instr) {
- return new(zone()) LLabel(instr->block());
-}
-
-
-LInstruction* LChunkBuilder::DoDummyUse(HDummyUse* instr) {
- return DefineAsRegister(new(zone()) LDummyUse(UseAny(instr->value())));
-}
-
-
-LInstruction* LChunkBuilder::DoSoftDeoptimize(HSoftDeoptimize* instr) {
- return AssignEnvironment(new(zone()) LDeoptimize);
-}
-
-
-LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
- return AssignEnvironment(new(zone()) LDeoptimize);
-}
-
-
-LInstruction* LChunkBuilder::DoShift(Token::Value op,
- HBitwiseBinaryOperation* instr) {
- if (instr->representation().IsTagged()) {
- ASSERT(instr->left()->representation().IsTagged());
- ASSERT(instr->right()->representation().IsTagged());
-
- LOperand* left = UseFixed(instr->left(), rdx);
- LOperand* right = UseFixed(instr->right(), rax);
- LArithmeticT* result = new(zone()) LArithmeticT(op, left, right);
- return MarkAsCall(DefineFixed(result, rax), instr);
- }
-
- ASSERT(instr->representation().IsInteger32());
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
- LOperand* left = UseRegisterAtStart(instr->left());
-
- HValue* right_value = instr->right();
- LOperand* right = NULL;
- int constant_value = 0;
- if (right_value->IsConstant()) {
- HConstant* constant = HConstant::cast(right_value);
- right = chunk_->DefineConstantOperand(constant);
- constant_value = constant->Integer32Value() & 0x1f;
- } else {
- right = UseFixed(right_value, rcx);
- }
-
- // Shift operations can only deoptimize if we do a logical shift by 0 and
- // the result cannot be truncated to int32.
- bool does_deopt = false;
- if (op == Token::SHR && constant_value == 0) {
- if (FLAG_opt_safe_uint32_operations) {
- does_deopt = !instr->CheckFlag(HInstruction::kUint32);
- } else {
- for (HUseIterator it(instr->uses()); !it.Done(); it.Advance()) {
- if (!it.value()->CheckFlag(HValue::kTruncatingToInt32)) {
- does_deopt = true;
- break;
- }
- }
- }
- }
-
- LInstruction* result =
- DefineSameAsFirst(new(zone()) LShiftI(op, left, right, does_deopt));
- return does_deopt ? AssignEnvironment(result) : result;
-}
-
-
-LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op,
- HArithmeticBinaryOperation* instr) {
- ASSERT(instr->representation().IsDouble());
- ASSERT(instr->left()->representation().IsDouble());
- ASSERT(instr->right()->representation().IsDouble());
- ASSERT(op != Token::MOD);
- LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right = UseRegisterAtStart(instr->right());
- LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
- return DefineSameAsFirst(result);
-}
-
-
-LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op,
- HArithmeticBinaryOperation* instr) {
- ASSERT(op == Token::ADD ||
- op == Token::DIV ||
- op == Token::MOD ||
- op == Token::MUL ||
- op == Token::SUB);
- HValue* left = instr->left();
- HValue* right = instr->right();
- ASSERT(left->representation().IsTagged());
- ASSERT(right->representation().IsTagged());
- LOperand* left_operand = UseFixed(left, rdx);
- LOperand* right_operand = UseFixed(right, rax);
- LArithmeticT* result =
- new(zone()) LArithmeticT(op, left_operand, right_operand);
- return MarkAsCall(DefineFixed(result, rax), instr);
-}
-
-
-void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) {
- ASSERT(is_building());
- current_block_ = block;
- next_block_ = next_block;
- if (block->IsStartBlock()) {
- block->UpdateEnvironment(graph_->start_environment());
- argument_count_ = 0;
- } else if (block->predecessors()->length() == 1) {
- // We have a single predecessor => copy environment and outgoing
- // argument count from the predecessor.
- ASSERT(block->phis()->length() == 0);
- HBasicBlock* pred = block->predecessors()->at(0);
- HEnvironment* last_environment = pred->last_environment();
- ASSERT(last_environment != NULL);
- // Only copy the environment, if it is later used again.
- if (pred->end()->SecondSuccessor() == NULL) {
- ASSERT(pred->end()->FirstSuccessor() == block);
- } else {
- if (pred->end()->FirstSuccessor()->block_id() > block->block_id() ||
- pred->end()->SecondSuccessor()->block_id() > block->block_id()) {
- last_environment = last_environment->Copy();
- }
- }
- block->UpdateEnvironment(last_environment);
- ASSERT(pred->argument_count() >= 0);
- argument_count_ = pred->argument_count();
- } else {
- // We are at a state join => process phis.
- HBasicBlock* pred = block->predecessors()->at(0);
- // No need to copy the environment, it cannot be used later.
- HEnvironment* last_environment = pred->last_environment();
- for (int i = 0; i < block->phis()->length(); ++i) {
- HPhi* phi = block->phis()->at(i);
- last_environment->SetValueAt(phi->merged_index(), phi);
- }
- for (int i = 0; i < block->deleted_phis()->length(); ++i) {
- last_environment->SetValueAt(block->deleted_phis()->at(i),
- graph_->GetConstantUndefined());
- }
- block->UpdateEnvironment(last_environment);
- // Pick up the outgoing argument count of one of the predecessors.
- argument_count_ = pred->argument_count();
- }
- HInstruction* current = block->first();
- int start = chunk_->instructions()->length();
- while (current != NULL && !is_aborted()) {
- // Code for constants in registers is generated lazily.
- if (!current->EmitAtUses()) {
- VisitInstruction(current);
- }
- current = current->next();
- }
- int end = chunk_->instructions()->length() - 1;
- if (end >= start) {
- block->set_first_instruction_index(start);
- block->set_last_instruction_index(end);
- }
- block->set_argument_count(argument_count_);
- next_block_ = NULL;
- current_block_ = NULL;
-}
-
-
-void LChunkBuilder::VisitInstruction(HInstruction* current) {
- HInstruction* old_current = current_instruction_;
- current_instruction_ = current;
- if (current->has_position()) position_ = current->position();
- LInstruction* instr = current->CompileToLithium(this);
-
- if (instr != NULL) {
- if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) {
- instr = AssignPointerMap(instr);
- }
- if (FLAG_stress_environments && !instr->HasEnvironment()) {
- instr = AssignEnvironment(instr);
- }
- instr->set_hydrogen_value(current);
- chunk_->AddInstruction(instr, current_block_);
- }
- current_instruction_ = old_current;
-}
-
-
-LEnvironment* LChunkBuilder::CreateEnvironment(
- HEnvironment* hydrogen_env,
- int* argument_index_accumulator) {
- if (hydrogen_env == NULL) return NULL;
-
- LEnvironment* outer =
- CreateEnvironment(hydrogen_env->outer(), argument_index_accumulator);
- BailoutId ast_id = hydrogen_env->ast_id();
- ASSERT(!ast_id.IsNone() ||
- hydrogen_env->frame_type() != JS_FUNCTION);
- int value_count = hydrogen_env->length();
- LEnvironment* result = new(zone()) LEnvironment(
- hydrogen_env->closure(),
- hydrogen_env->frame_type(),
- ast_id,
- hydrogen_env->parameter_count(),
- argument_count_,
- value_count,
- outer,
- hydrogen_env->entry(),
- zone());
- int argument_index = *argument_index_accumulator;
- for (int i = 0; i < value_count; ++i) {
- if (hydrogen_env->is_special_index(i)) continue;
-
- HValue* value = hydrogen_env->values()->at(i);
- LOperand* op = NULL;
- if (value->IsArgumentsObject()) {
- op = NULL;
- } else if (value->IsPushArgument()) {
- op = new(zone()) LArgument(argument_index++);
- } else {
- op = UseAny(value);
- }
- result->AddValue(op,
- value->representation(),
- value->CheckFlag(HInstruction::kUint32));
- }
-
- if (hydrogen_env->frame_type() == JS_FUNCTION) {
- *argument_index_accumulator = argument_index;
- }
-
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
- return new(zone()) LGoto(instr->FirstSuccessor()->block_id());
-}
-
-
-LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
- HValue* value = instr->value();
- if (value->EmitAtUses()) {
- ASSERT(value->IsConstant());
- ASSERT(!value->representation().IsDouble());
- HBasicBlock* successor = HConstant::cast(value)->ToBoolean()
- ? instr->FirstSuccessor()
- : instr->SecondSuccessor();
- return new(zone()) LGoto(successor->block_id());
- }
-
- LBranch* result = new(zone()) LBranch(UseRegister(value));
- // Tagged values that are not known smis or booleans require a
- // deoptimization environment.
- Representation rep = value->representation();
- HType type = value->type();
- if (rep.IsTagged() && !type.IsSmi() && !type.IsBoolean()) {
- return AssignEnvironment(result);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) {
- ASSERT(instr->value()->representation().IsTagged());
- LOperand* value = UseRegisterAtStart(instr->value());
- return new(zone()) LCmpMapAndBranch(value);
-}
-
-
-LInstruction* LChunkBuilder::DoArgumentsLength(HArgumentsLength* length) {
- return DefineAsRegister(new(zone()) LArgumentsLength(Use(length->value())));
-}
-
-
-LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) {
- return DefineAsRegister(new(zone()) LArgumentsElements);
-}
-
-
-LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
- LOperand* left = UseFixed(instr->left(), rax);
- LOperand* right = UseFixed(instr->right(), rdx);
- LInstanceOf* result = new(zone()) LInstanceOf(left, right);
- return MarkAsCall(DefineFixed(result, rax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal(
- HInstanceOfKnownGlobal* instr) {
- LInstanceOfKnownGlobal* result =
- new(zone()) LInstanceOfKnownGlobal(UseFixed(instr->left(), rax),
- FixedTemp(rdi));
- return MarkAsCall(DefineFixed(result, rax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoInstanceSize(HInstanceSize* instr) {
- LOperand* object = UseRegisterAtStart(instr->object());
- return DefineAsRegister(new(zone()) LInstanceSize(object));
-}
-
-
-LInstruction* LChunkBuilder::DoWrapReceiver(HWrapReceiver* instr) {
- LOperand* receiver = UseRegister(instr->receiver());
- LOperand* function = UseRegisterAtStart(instr->function());
- LWrapReceiver* result = new(zone()) LWrapReceiver(receiver, function);
- return AssignEnvironment(DefineSameAsFirst(result));
-}
-
-
-LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) {
- LOperand* function = UseFixed(instr->function(), rdi);
- LOperand* receiver = UseFixed(instr->receiver(), rax);
- LOperand* length = UseFixed(instr->length(), rbx);
- LOperand* elements = UseFixed(instr->elements(), rcx);
- LApplyArguments* result = new(zone()) LApplyArguments(function,
- receiver,
- length,
- elements);
- return MarkAsCall(DefineFixed(result, rax), instr, CAN_DEOPTIMIZE_EAGERLY);
-}
-
-
-LInstruction* LChunkBuilder::DoPushArgument(HPushArgument* instr) {
- ++argument_count_;
- LOperand* argument = UseOrConstant(instr->argument());
- return new(zone()) LPushArgument(argument);
-}
-
-
-LInstruction* LChunkBuilder::DoThisFunction(HThisFunction* instr) {
- return instr->HasNoUses()
- ? NULL
- : DefineAsRegister(new(zone()) LThisFunction);
-}
-
-
-LInstruction* LChunkBuilder::DoContext(HContext* instr) {
- // If there is a non-return use, the context must be allocated in a register.
- for (HUseIterator it(instr->uses()); !it.Done(); it.Advance()) {
- if (!it.value()->IsReturn()) {
- return DefineAsRegister(new(zone()) LContext);
- }
- }
-
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoOuterContext(HOuterContext* instr) {
- LOperand* context = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LOuterContext(context));
-}
-
-
-LInstruction* LChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) {
- return MarkAsCall(new(zone()) LDeclareGlobals, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoGlobalObject(HGlobalObject* instr) {
- return DefineAsRegister(new(zone()) LGlobalObject(instr->qml_global()));
-}
-
-
-LInstruction* LChunkBuilder::DoGlobalReceiver(HGlobalReceiver* instr) {
- LOperand* global_object = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LGlobalReceiver(global_object));
-}
-
-
-LInstruction* LChunkBuilder::DoCallConstantFunction(
- HCallConstantFunction* instr) {
- argument_count_ -= instr->argument_count();
- return MarkAsCall(DefineFixed(new(zone()) LCallConstantFunction, rax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
- LOperand* function = UseFixed(instr->function(), rdi);
- argument_count_ -= instr->argument_count();
- LInvokeFunction* result = new(zone()) LInvokeFunction(function);
- return MarkAsCall(DefineFixed(result, rax), instr, CANNOT_DEOPTIMIZE_EAGERLY);
-}
-
-
-LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
- BuiltinFunctionId op = instr->op();
- if (op == kMathLog || op == kMathSin || op == kMathCos || op == kMathTan) {
- LOperand* input = UseFixedDouble(instr->value(), xmm1);
- LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(input);
- return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
- } else if (op == kMathExp) {
- ASSERT(instr->representation().IsDouble());
- ASSERT(instr->value()->representation().IsDouble());
- LOperand* value = UseTempRegister(instr->value());
- LOperand* temp1 = TempRegister();
- LOperand* temp2 = TempRegister();
- LMathExp* result = new(zone()) LMathExp(value, temp1, temp2);
- return DefineAsRegister(result);
- } else {
- LOperand* input = UseRegisterAtStart(instr->value());
- LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(input);
- switch (op) {
- case kMathAbs:
- return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
- case kMathFloor:
- return AssignEnvironment(DefineAsRegister(result));
- case kMathRound:
- return AssignEnvironment(DefineAsRegister(result));
- case kMathSqrt:
- return DefineSameAsFirst(result);
- case kMathPowHalf:
- return DefineSameAsFirst(result);
- default:
- UNREACHABLE();
- return NULL;
- }
- }
-}
-
-
-LInstruction* LChunkBuilder::DoCallKeyed(HCallKeyed* instr) {
- ASSERT(instr->key()->representation().IsTagged());
- LOperand* key = UseFixed(instr->key(), rcx);
- argument_count_ -= instr->argument_count();
- LCallKeyed* result = new(zone()) LCallKeyed(key);
- return MarkAsCall(DefineFixed(result, rax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallNamed(HCallNamed* instr) {
- argument_count_ -= instr->argument_count();
- return MarkAsCall(DefineFixed(new(zone()) LCallNamed, rax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallGlobal(HCallGlobal* instr) {
- argument_count_ -= instr->argument_count();
- LCallGlobal* result = new(zone()) LCallGlobal(instr->qml_global());
- return MarkAsCall(DefineFixed(result, rax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallKnownGlobal(HCallKnownGlobal* instr) {
- argument_count_ -= instr->argument_count();
- return MarkAsCall(DefineFixed(new(zone()) LCallKnownGlobal, rax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
- LOperand* constructor = UseFixed(instr->constructor(), rdi);
- argument_count_ -= instr->argument_count();
- LCallNew* result = new(zone()) LCallNew(constructor);
- return MarkAsCall(DefineFixed(result, rax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) {
- ASSERT(FLAG_optimize_constructed_arrays);
- LOperand* constructor = UseFixed(instr->constructor(), rdi);
- argument_count_ -= instr->argument_count();
- LCallNewArray* result = new(zone()) LCallNewArray(constructor);
- return MarkAsCall(DefineFixed(result, rax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
- LOperand* function = UseFixed(instr->function(), rdi);
- argument_count_ -= instr->argument_count();
- LCallFunction* result = new(zone()) LCallFunction(function);
- return MarkAsCall(DefineFixed(result, rax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
- argument_count_ -= instr->argument_count();
- return MarkAsCall(DefineFixed(new(zone()) LCallRuntime, rax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoRor(HRor* instr) {
- return DoShift(Token::ROR, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoShr(HShr* instr) {
- return DoShift(Token::SHR, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoSar(HSar* instr) {
- return DoShift(Token::SAR, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoShl(HShl* instr) {
- return DoShift(Token::SHL, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
- if (instr->representation().IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
-
- LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
- LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
- return DefineSameAsFirst(new(zone()) LBitI(left, right));
- } else {
- ASSERT(instr->representation().IsTagged());
- ASSERT(instr->left()->representation().IsTagged());
- ASSERT(instr->right()->representation().IsTagged());
-
- LOperand* left = UseFixed(instr->left(), rdx);
- LOperand* right = UseFixed(instr->right(), rax);
- LArithmeticT* result = new(zone()) LArithmeticT(instr->op(), left, right);
- return MarkAsCall(DefineFixed(result, rax), instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoBitNot(HBitNot* instr) {
- ASSERT(instr->value()->representation().IsInteger32());
- ASSERT(instr->representation().IsInteger32());
- if (instr->HasNoUses()) return NULL;
- LOperand* input = UseRegisterAtStart(instr->value());
- LBitNotI* result = new(zone()) LBitNotI(input);
- return DefineSameAsFirst(result);
-}
-
-
-LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
- if (instr->representation().IsDouble()) {
- return DoArithmeticD(Token::DIV, instr);
- } else if (instr->representation().IsInteger32()) {
- if (instr->HasPowerOf2Divisor()) {
- ASSERT(!instr->CheckFlag(HValue::kCanBeDivByZero));
- LOperand* value = UseRegisterAtStart(instr->left());
- LDivI* div =
- new(zone()) LDivI(value, UseOrConstant(instr->right()), NULL);
- return AssignEnvironment(DefineSameAsFirst(div));
- }
- // The temporary operand is necessary to ensure that right is not allocated
- // into rdx.
- LOperand* temp = FixedTemp(rdx);
- LOperand* dividend = UseFixed(instr->left(), rax);
- LOperand* divisor = UseRegister(instr->right());
- LDivI* result = new(zone()) LDivI(dividend, divisor, temp);
- return AssignEnvironment(DefineFixed(result, rax));
- } else {
- ASSERT(instr->representation().IsTagged());
- return DoArithmeticT(Token::DIV, instr);
- }
-}
-
-
-HValue* LChunkBuilder::SimplifiedDividendForMathFloorOfDiv(HValue* dividend) {
- // A value with an integer representation does not need to be transformed.
- if (dividend->representation().IsInteger32()) {
- return dividend;
- // A change from an integer32 can be replaced by the integer32 value.
- } else if (dividend->IsChange() &&
- HChange::cast(dividend)->from().IsInteger32()) {
- return HChange::cast(dividend)->value();
- }
- return NULL;
-}
-
-
-HValue* LChunkBuilder::SimplifiedDivisorForMathFloorOfDiv(HValue* divisor) {
- if (divisor->IsConstant() &&
- HConstant::cast(divisor)->HasInteger32Value()) {
- HConstant* constant_val = HConstant::cast(divisor);
- return constant_val->CopyToRepresentation(Representation::Integer32(),
- divisor->block()->zone());
- }
- // A value with an integer representation does not need to be transformed.
- if (divisor->representation().IsInteger32()) {
- return divisor;
- // A change from an integer32 can be replaced by the integer32 value.
- } else if (divisor->IsChange() &&
- HChange::cast(divisor)->from().IsInteger32()) {
- return HChange::cast(divisor)->value();
- }
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) {
- HValue* right = instr->right();
- if (!right->IsConstant()) {
- ASSERT(right->representation().IsInteger32());
- // The temporary operand is necessary to ensure that right is not allocated
- // into rdx.
- LOperand* temp = FixedTemp(rdx);
- LOperand* dividend = UseFixed(instr->left(), rax);
- LOperand* divisor = UseRegister(instr->right());
- LDivI* flooring_div = new(zone()) LDivI(dividend, divisor, temp);
- return AssignEnvironment(DefineFixed(flooring_div, rax));
- }
-
- ASSERT(right->IsConstant() && HConstant::cast(right)->HasInteger32Value());
- LOperand* divisor = chunk_->DefineConstantOperand(HConstant::cast(right));
- int32_t divisor_si = HConstant::cast(right)->Integer32Value();
- if (divisor_si == 0) {
- LOperand* dividend = UseRegister(instr->left());
- return AssignEnvironment(DefineAsRegister(
- new(zone()) LMathFloorOfDiv(dividend, divisor, NULL)));
- } else if (IsPowerOf2(abs(divisor_si))) {
- LOperand* dividend = UseRegisterAtStart(instr->left());
- LInstruction* result = DefineAsRegister(
- new(zone()) LMathFloorOfDiv(dividend, divisor, NULL));
- return divisor_si < 0 ? AssignEnvironment(result) : result;
- } else {
- // use two r64
- LOperand* dividend = UseRegisterAtStart(instr->left());
- LOperand* temp = TempRegister();
- LInstruction* result = DefineAsRegister(
- new(zone()) LMathFloorOfDiv(dividend, divisor, temp));
- return divisor_si < 0 ? AssignEnvironment(result) : result;
- }
-}
-
-
-LInstruction* LChunkBuilder::DoMod(HMod* instr) {
- if (instr->representation().IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
-
- LInstruction* result;
- if (instr->HasPowerOf2Divisor()) {
- ASSERT(!instr->CheckFlag(HValue::kCanBeDivByZero));
- LOperand* value = UseRegisterAtStart(instr->left());
- LModI* mod =
- new(zone()) LModI(value, UseOrConstant(instr->right()), NULL);
- result = DefineSameAsFirst(mod);
- } else {
- // The temporary operand is necessary to ensure that right is not
- // allocated into edx.
- LOperand* temp = FixedTemp(rdx);
- LOperand* value = UseFixed(instr->left(), rax);
- LOperand* divisor = UseRegister(instr->right());
- LModI* mod = new(zone()) LModI(value, divisor, temp);
- result = DefineFixed(mod, rdx);
- }
-
- return (instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
- instr->CheckFlag(HValue::kCanBeDivByZero))
- ? AssignEnvironment(result)
- : result;
- } else if (instr->representation().IsTagged()) {
- return DoArithmeticT(Token::MOD, instr);
- } else {
- ASSERT(instr->representation().IsDouble());
- // We call a C function for double modulo. It can't trigger a GC.
- // We need to use fixed result register for the call.
- // TODO(fschneider): Allow any register as input registers.
- LOperand* left = UseFixedDouble(instr->left(), xmm2);
- LOperand* right = UseFixedDouble(instr->right(), xmm1);
- LArithmeticD* result = new(zone()) LArithmeticD(Token::MOD, left, right);
- return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoMul(HMul* instr) {
- if (instr->representation().IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
- LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
- LOperand* right = UseOrConstant(instr->MostConstantOperand());
- LMulI* mul = new(zone()) LMulI(left, right);
- if (instr->CheckFlag(HValue::kCanOverflow) ||
- instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
- AssignEnvironment(mul);
- }
- return DefineSameAsFirst(mul);
- } else if (instr->representation().IsDouble()) {
- return DoArithmeticD(Token::MUL, instr);
- } else {
- ASSERT(instr->representation().IsTagged());
- return DoArithmeticT(Token::MUL, instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoSub(HSub* instr) {
- if (instr->representation().IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
- LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right = UseOrConstantAtStart(instr->right());
- LSubI* sub = new(zone()) LSubI(left, right);
- LInstruction* result = DefineSameAsFirst(sub);
- if (instr->CheckFlag(HValue::kCanOverflow)) {
- result = AssignEnvironment(result);
- }
- return result;
- } else if (instr->representation().IsDouble()) {
- return DoArithmeticD(Token::SUB, instr);
- } else {
- ASSERT(instr->representation().IsTagged());
- return DoArithmeticT(Token::SUB, instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
- if (instr->representation().IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
- LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
- LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
- LAddI* add = new(zone()) LAddI(left, right);
- LInstruction* result = DefineSameAsFirst(add);
- if (instr->CheckFlag(HValue::kCanOverflow)) {
- result = AssignEnvironment(result);
- }
- return result;
- } else if (instr->representation().IsDouble()) {
- return DoArithmeticD(Token::ADD, instr);
- } else {
- ASSERT(instr->representation().IsTagged());
- return DoArithmeticT(Token::ADD, instr);
- }
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoMathMinMax(HMathMinMax* instr) {
- LOperand* left = NULL;
- LOperand* right = NULL;
- if (instr->representation().IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
- left = UseRegisterAtStart(instr->LeastConstantOperand());
- right = UseOrConstantAtStart(instr->MostConstantOperand());
- } else {
- ASSERT(instr->representation().IsDouble());
- ASSERT(instr->left()->representation().IsDouble());
- ASSERT(instr->right()->representation().IsDouble());
- left = UseRegisterAtStart(instr->left());
- right = UseRegisterAtStart(instr->right());
- }
- LMathMinMax* minmax = new(zone()) LMathMinMax(left, right);
- return DefineSameAsFirst(minmax);
-}
-
-
-LInstruction* LChunkBuilder::DoPower(HPower* instr) {
- ASSERT(instr->representation().IsDouble());
- // We call a C function for double power. It can't trigger a GC.
- // We need to use fixed result register for the call.
- Representation exponent_type = instr->right()->representation();
- ASSERT(instr->left()->representation().IsDouble());
- LOperand* left = UseFixedDouble(instr->left(), xmm2);
- LOperand* right = exponent_type.IsDouble() ?
- UseFixedDouble(instr->right(), xmm1) :
-#ifdef _WIN64
- UseFixed(instr->right(), rdx);
-#else
- UseFixed(instr->right(), rdi);
-#endif
- LPower* result = new(zone()) LPower(left, right);
- return MarkAsCall(DefineFixedDouble(result, xmm3), instr,
- CAN_DEOPTIMIZE_EAGERLY);
-}
-
-
-LInstruction* LChunkBuilder::DoRandom(HRandom* instr) {
- ASSERT(instr->representation().IsDouble());
- ASSERT(instr->global_object()->representation().IsTagged());
-#ifdef _WIN64
- LOperand* global_object = UseFixed(instr->global_object(), rcx);
-#else
- LOperand* global_object = UseFixed(instr->global_object(), rdi);
-#endif
- LRandom* result = new(zone()) LRandom(global_object);
- return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
- ASSERT(instr->left()->representation().IsTagged());
- ASSERT(instr->right()->representation().IsTagged());
- LOperand* left = UseFixed(instr->left(), rdx);
- LOperand* right = UseFixed(instr->right(), rax);
- LCmpT* result = new(zone()) LCmpT(left, right);
- return MarkAsCall(DefineFixed(result, rax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCompareIDAndBranch(
- HCompareIDAndBranch* instr) {
- Representation r = instr->representation();
- if (r.IsInteger32()) {
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
- LOperand* left = UseRegisterOrConstantAtStart(instr->left());
- LOperand* right = UseOrConstantAtStart(instr->right());
- return new(zone()) LCmpIDAndBranch(left, right);
- } else {
- ASSERT(r.IsDouble());
- ASSERT(instr->left()->representation().IsDouble());
- ASSERT(instr->right()->representation().IsDouble());
- LOperand* left;
- LOperand* right;
- if (instr->left()->IsConstant() && instr->right()->IsConstant()) {
- left = UseRegisterOrConstantAtStart(instr->left());
- right = UseRegisterOrConstantAtStart(instr->right());
- } else {
- left = UseRegisterAtStart(instr->left());
- right = UseRegisterAtStart(instr->right());
- }
- return new(zone()) LCmpIDAndBranch(left, right);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch(
- HCompareObjectEqAndBranch* instr) {
- LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right = UseRegisterAtStart(instr->right());
- return new(zone()) LCmpObjectEqAndBranch(left, right);
-}
-
-
-LInstruction* LChunkBuilder::DoCompareConstantEqAndBranch(
- HCompareConstantEqAndBranch* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
- return new(zone()) LCmpConstantEqAndBranch(value);
-}
-
-
-LInstruction* LChunkBuilder::DoIsNilAndBranch(HIsNilAndBranch* instr) {
- ASSERT(instr->value()->representation().IsTagged());
- LOperand* temp = instr->kind() == kStrictEquality ? NULL : TempRegister();
- return new(zone()) LIsNilAndBranch(UseRegisterAtStart(instr->value()), temp);
-}
-
-
-LInstruction* LChunkBuilder::DoIsObjectAndBranch(HIsObjectAndBranch* instr) {
- ASSERT(instr->value()->representation().IsTagged());
- return new(zone()) LIsObjectAndBranch(UseRegisterAtStart(instr->value()));
-}
-
-
-LInstruction* LChunkBuilder::DoIsStringAndBranch(HIsStringAndBranch* instr) {
- ASSERT(instr->value()->representation().IsTagged());
- LOperand* value = UseRegisterAtStart(instr->value());
- LOperand* temp = TempRegister();
- return new(zone()) LIsStringAndBranch(value, temp);
-}
-
-
-LInstruction* LChunkBuilder::DoIsSmiAndBranch(HIsSmiAndBranch* instr) {
- ASSERT(instr->value()->representation().IsTagged());
- return new(zone()) LIsSmiAndBranch(Use(instr->value()));
-}
-
-
-LInstruction* LChunkBuilder::DoIsUndetectableAndBranch(
- HIsUndetectableAndBranch* instr) {
- ASSERT(instr->value()->representation().IsTagged());
- LOperand* value = UseRegisterAtStart(instr->value());
- LOperand* temp = TempRegister();
- return new(zone()) LIsUndetectableAndBranch(value, temp);
-}
-
-
-LInstruction* LChunkBuilder::DoStringCompareAndBranch(
- HStringCompareAndBranch* instr) {
-
- ASSERT(instr->left()->representation().IsTagged());
- ASSERT(instr->right()->representation().IsTagged());
- LOperand* left = UseFixed(instr->left(), rdx);
- LOperand* right = UseFixed(instr->right(), rax);
- LStringCompareAndBranch* result =
- new(zone()) LStringCompareAndBranch(left, right);
-
- return MarkAsCall(result, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoHasInstanceTypeAndBranch(
- HHasInstanceTypeAndBranch* instr) {
- ASSERT(instr->value()->representation().IsTagged());
- LOperand* value = UseRegisterAtStart(instr->value());
- return new(zone()) LHasInstanceTypeAndBranch(value);
-}
-
-
-LInstruction* LChunkBuilder::DoGetCachedArrayIndex(
- HGetCachedArrayIndex* instr) {
- ASSERT(instr->value()->representation().IsTagged());
- LOperand* value = UseRegisterAtStart(instr->value());
-
- return DefineAsRegister(new(zone()) LGetCachedArrayIndex(value));
-}
-
-
-LInstruction* LChunkBuilder::DoHasCachedArrayIndexAndBranch(
- HHasCachedArrayIndexAndBranch* instr) {
- ASSERT(instr->value()->representation().IsTagged());
- LOperand* value = UseRegisterAtStart(instr->value());
- return new(zone()) LHasCachedArrayIndexAndBranch(value);
-}
-
-
-LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
- HClassOfTestAndBranch* instr) {
- LOperand* value = UseRegister(instr->value());
- return new(zone()) LClassOfTestAndBranch(value,
- TempRegister(),
- TempRegister());
-}
-
-
-LInstruction* LChunkBuilder::DoJSArrayLength(HJSArrayLength* instr) {
- LOperand* array = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LJSArrayLength(array));
-}
-
-
-LInstruction* LChunkBuilder::DoFixedArrayBaseLength(
- HFixedArrayBaseLength* instr) {
- LOperand* array = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LFixedArrayBaseLength(array));
-}
-
-
-LInstruction* LChunkBuilder::DoMapEnumLength(HMapEnumLength* instr) {
- LOperand* map = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LMapEnumLength(map));
-}
-
-
-LInstruction* LChunkBuilder::DoElementsKind(HElementsKind* instr) {
- LOperand* object = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LElementsKind(object));
-}
-
-
-LInstruction* LChunkBuilder::DoValueOf(HValueOf* instr) {
- LOperand* object = UseRegister(instr->value());
- LValueOf* result = new(zone()) LValueOf(object);
- return DefineSameAsFirst(result);
-}
-
-
-LInstruction* LChunkBuilder::DoDateField(HDateField* instr) {
- LOperand* object = UseFixed(instr->value(), rax);
- LDateField* result = new(zone()) LDateField(object, instr->index());
- return MarkAsCall(DefineFixed(result, rax), instr, CAN_DEOPTIMIZE_EAGERLY);
-}
-
-
-LInstruction* LChunkBuilder::DoSeqStringSetChar(HSeqStringSetChar* instr) {
- LOperand* string = UseRegister(instr->string());
- LOperand* index = UseRegister(instr->index());
- ASSERT(rcx.is_byte_register());
- LOperand* value = UseFixed(instr->value(), rcx);
- LSeqStringSetChar* result =
- new(zone()) LSeqStringSetChar(instr->encoding(), string, index, value);
- return DefineSameAsFirst(result);
-}
-
-
-LInstruction* LChunkBuilder::DoNumericConstraint(HNumericConstraint* instr) {
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoInductionVariableAnnotation(
- HInductionVariableAnnotation* instr) {
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
- LOperand* value = UseRegisterOrConstantAtStart(instr->index());
- LOperand* length = Use(instr->length());
- return AssignEnvironment(new(zone()) LBoundsCheck(value, length));
-}
-
-
-LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) {
- // The control instruction marking the end of a block that completed
- // abruptly (e.g., threw an exception). There is nothing specific to do.
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoThrow(HThrow* instr) {
- LOperand* value = UseFixed(instr->value(), rax);
- return MarkAsCall(new(zone()) LThrow(value), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoUseConst(HUseConst* instr) {
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoForceRepresentation(HForceRepresentation* bad) {
- // All HForceRepresentation instructions should be eliminated in the
- // representation change phase of Hydrogen.
- UNREACHABLE();
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoChange(HChange* instr) {
- Representation from = instr->from();
- Representation to = instr->to();
- // Only mark conversions that might need to allocate as calling rather than
- // all changes. This makes simple, non-allocating conversion not have to force
- // building a stack frame.
- if (from.IsTagged()) {
- if (to.IsDouble()) {
- info()->MarkAsDeferredCalling();
- LOperand* value = UseRegister(instr->value());
- LNumberUntagD* res = new(zone()) LNumberUntagD(value);
- return AssignEnvironment(DefineAsRegister(res));
- } else {
- ASSERT(to.IsInteger32());
- LOperand* value = UseRegister(instr->value());
- if (instr->value()->type().IsSmi()) {
- return DefineSameAsFirst(new(zone()) LSmiUntag(value, false));
- } else {
- bool truncating = instr->CanTruncateToInt32();
- LOperand* xmm_temp = truncating ? NULL : FixedTemp(xmm1);
- LTaggedToI* res = new(zone()) LTaggedToI(value, xmm_temp);
- return AssignEnvironment(DefineSameAsFirst(res));
- }
- }
- } else if (from.IsDouble()) {
- if (to.IsTagged()) {
- info()->MarkAsDeferredCalling();
- LOperand* value = UseRegister(instr->value());
- LOperand* temp = TempRegister();
-
- // Make sure that temp and result_temp are different registers.
- LUnallocated* result_temp = TempRegister();
- LNumberTagD* result = new(zone()) LNumberTagD(value, temp);
- return AssignPointerMap(Define(result, result_temp));
- } else {
- ASSERT(to.IsInteger32());
- LOperand* value = UseRegister(instr->value());
- return AssignEnvironment(DefineAsRegister(new(zone()) LDoubleToI(value)));
- }
- } else if (from.IsInteger32()) {
- info()->MarkAsDeferredCalling();
- if (to.IsTagged()) {
- HValue* val = instr->value();
- LOperand* value = UseRegister(val);
- if (val->CheckFlag(HInstruction::kUint32)) {
- LOperand* temp = FixedTemp(xmm1);
- LNumberTagU* result = new(zone()) LNumberTagU(value, temp);
- return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
- } else if (val->HasRange() && val->range()->IsInSmiRange()) {
- return DefineSameAsFirst(new(zone()) LSmiTag(value));
- } else {
- LNumberTagI* result = new(zone()) LNumberTagI(value);
- return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
- }
- } else {
- if (instr->value()->CheckFlag(HInstruction::kUint32)) {
- LOperand* temp = FixedTemp(xmm1);
- return DefineAsRegister(
- new(zone()) LUint32ToDouble(UseRegister(instr->value()), temp));
- } else {
- ASSERT(to.IsDouble());
- LOperand* value = Use(instr->value());
- return DefineAsRegister(new(zone()) LInteger32ToDouble(value));
- }
- }
- }
- UNREACHABLE();
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoCheckNonSmi(HCheckNonSmi* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
- return AssignEnvironment(new(zone()) LCheckNonSmi(value));
-}
-
-
-LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
- LCheckInstanceType* result = new(zone()) LCheckInstanceType(value);
- return AssignEnvironment(result);
-}
-
-
-LInstruction* LChunkBuilder::DoCheckPrototypeMaps(HCheckPrototypeMaps* instr) {
- LUnallocated* temp = TempRegister();
- LCheckPrototypeMaps* result = new(zone()) LCheckPrototypeMaps(temp);
- return AssignEnvironment(Define(result, temp));
-}
-
-
-LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
- return AssignEnvironment(new(zone()) LCheckSmi(value));
-}
-
-
-LInstruction* LChunkBuilder::DoCheckSmiOrInt32(HCheckSmiOrInt32* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
- return AssignEnvironment(new(zone()) LCheckSmi(value));
-}
-
-
-LInstruction* LChunkBuilder::DoCheckFunction(HCheckFunction* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
- return AssignEnvironment(new(zone()) LCheckFunction(value));
-}
-
-
-LInstruction* LChunkBuilder::DoCheckMaps(HCheckMaps* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
- LCheckMaps* result = new(zone()) LCheckMaps(value);
- return AssignEnvironment(result);
-}
-
-
-LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
- HValue* value = instr->value();
- Representation input_rep = value->representation();
- LOperand* reg = UseRegister(value);
- if (input_rep.IsDouble()) {
- return DefineAsRegister(new(zone()) LClampDToUint8(reg));
- } else if (input_rep.IsInteger32()) {
- return DefineSameAsFirst(new(zone()) LClampIToUint8(reg));
- } else {
- ASSERT(input_rep.IsTagged());
- // Register allocator doesn't (yet) support allocation of double
- // temps. Reserve xmm1 explicitly.
- LClampTToUint8* result = new(zone()) LClampTToUint8(reg,
- FixedTemp(xmm1));
- return AssignEnvironment(DefineSameAsFirst(result));
- }
-}
-
-
-LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
- return new(zone()) LReturn(UseFixed(instr->value(), rax));
-}
-
-
-LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
- Representation r = instr->representation();
- if (r.IsInteger32()) {
- return DefineAsRegister(new(zone()) LConstantI);
- } else if (r.IsDouble()) {
- LOperand* temp = TempRegister();
- return DefineAsRegister(new(zone()) LConstantD(temp));
- } else if (r.IsTagged()) {
- return DefineAsRegister(new(zone()) LConstantT);
- } else {
- UNREACHABLE();
- return NULL;
- }
-}
-
-
-LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) {
- LLoadGlobalCell* result = new(zone()) LLoadGlobalCell;
- return instr->RequiresHoleCheck()
- ? AssignEnvironment(DefineAsRegister(result))
- : DefineAsRegister(result);
-}
-
-
-LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
- LOperand* global_object = UseFixed(instr->global_object(), rax);
- LLoadGlobalGeneric* result = new(zone()) LLoadGlobalGeneric(global_object);
- return MarkAsCall(DefineFixed(result, rax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoStoreGlobalCell(HStoreGlobalCell* instr) {
- LOperand* value = UseRegister(instr->value());
- // Use a temp to avoid reloading the cell value address in the case where
- // we perform a hole check.
- return instr->RequiresHoleCheck()
- ? AssignEnvironment(new(zone()) LStoreGlobalCell(value, TempRegister()))
- : new(zone()) LStoreGlobalCell(value, NULL);
-}
-
-
-LInstruction* LChunkBuilder::DoStoreGlobalGeneric(HStoreGlobalGeneric* instr) {
- LOperand* global_object = UseFixed(instr->global_object(), rdx);
- LOperand* value = UseFixed(instr->value(), rax);
- LStoreGlobalGeneric* result = new(zone()) LStoreGlobalGeneric(global_object,
- value);
- return MarkAsCall(result, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
- LOperand* context = UseRegisterAtStart(instr->value());
- LInstruction* result =
- DefineAsRegister(new(zone()) LLoadContextSlot(context));
- return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
-}
-
-
-LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) {
- LOperand* context;
- LOperand* value;
- LOperand* temp;
- if (instr->NeedsWriteBarrier()) {
- context = UseTempRegister(instr->context());
- value = UseTempRegister(instr->value());
- temp = TempRegister();
- } else {
- context = UseRegister(instr->context());
- value = UseRegister(instr->value());
- temp = NULL;
- }
- LInstruction* result = new(zone()) LStoreContextSlot(context, value, temp);
- return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
-}
-
-
-LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
- ASSERT(instr->representation().IsTagged());
- LOperand* obj = UseRegisterAtStart(instr->object());
- return DefineAsRegister(new(zone()) LLoadNamedField(obj));
-}
-
-
-LInstruction* LChunkBuilder::DoLoadNamedFieldPolymorphic(
- HLoadNamedFieldPolymorphic* instr) {
- ASSERT(instr->representation().IsTagged());
- if (instr->need_generic()) {
- LOperand* obj = UseFixed(instr->object(), rax);
- LLoadNamedFieldPolymorphic* result =
- new(zone()) LLoadNamedFieldPolymorphic(obj);
- return MarkAsCall(DefineFixed(result, rax), instr);
- } else {
- LOperand* obj = UseRegisterAtStart(instr->object());
- LLoadNamedFieldPolymorphic* result =
- new(zone()) LLoadNamedFieldPolymorphic(obj);
- return AssignEnvironment(DefineAsRegister(result));
- }
-}
-
-
-LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
- LOperand* object = UseFixed(instr->object(), rax);
- LLoadNamedGeneric* result = new(zone()) LLoadNamedGeneric(object);
- return MarkAsCall(DefineFixed(result, rax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoLoadFunctionPrototype(
- HLoadFunctionPrototype* instr) {
- return AssignEnvironment(DefineAsRegister(
- new(zone()) LLoadFunctionPrototype(UseRegister(instr->function()))));
-}
-
-
-LInstruction* LChunkBuilder::DoLoadElements(HLoadElements* instr) {
- LOperand* input = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LLoadElements(input));
-}
-
-
-LInstruction* LChunkBuilder::DoLoadExternalArrayPointer(
- HLoadExternalArrayPointer* instr) {
- LOperand* input = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LLoadExternalArrayPointer(input));
-}
-
-
-LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
- ASSERT(instr->key()->representation().IsInteger32() ||
- instr->key()->representation().IsTagged());
- ElementsKind elements_kind = instr->elements_kind();
- bool clobbers_key = instr->key()->representation().IsTagged();
- LOperand* key = clobbers_key
- ? UseTempRegister(instr->key())
- : UseRegisterOrConstantAtStart(instr->key());
- LLoadKeyed* result = NULL;
-
- if (!instr->is_external()) {
- LOperand* obj = UseRegisterAtStart(instr->elements());
- result = new(zone()) LLoadKeyed(obj, key);
- } else {
- ASSERT(
- (instr->representation().IsInteger32() &&
- (elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
- (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
- (instr->representation().IsDouble() &&
- ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
- (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
- LOperand* external_pointer = UseRegister(instr->elements());
- result = new(zone()) LLoadKeyed(external_pointer, key);
- }
-
- DefineAsRegister(result);
- bool can_deoptimize = instr->RequiresHoleCheck() ||
- (elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS);
- // An unsigned int array load might overflow and cause a deopt, make sure it
- // has an environment.
- return can_deoptimize ? AssignEnvironment(result) : result;
-}
-
-
-LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
- LOperand* object = UseFixed(instr->object(), rdx);
- LOperand* key = UseFixed(instr->key(), rax);
-
- LLoadKeyedGeneric* result = new(zone()) LLoadKeyedGeneric(object, key);
- return MarkAsCall(DefineFixed(result, rax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
- ElementsKind elements_kind = instr->elements_kind();
- bool clobbers_key = instr->key()->representation().IsTagged();
-
- if (!instr->is_external()) {
- ASSERT(instr->elements()->representation().IsTagged());
- bool needs_write_barrier = instr->NeedsWriteBarrier();
- LOperand* object = NULL;
- LOperand* key = NULL;
- LOperand* val = NULL;
-
- if (instr->value()->representation().IsDouble()) {
- object = UseRegisterAtStart(instr->elements());
- val = UseTempRegister(instr->value());
- key = clobbers_key ? UseTempRegister(instr->key())
- : UseRegisterOrConstantAtStart(instr->key());
- } else {
- ASSERT(instr->value()->representation().IsTagged());
- object = UseTempRegister(instr->elements());
- val = needs_write_barrier ? UseTempRegister(instr->value())
- : UseRegisterAtStart(instr->value());
- key = (clobbers_key || needs_write_barrier)
- ? UseTempRegister(instr->key())
- : UseRegisterOrConstantAtStart(instr->key());
- }
-
- return new(zone()) LStoreKeyed(object, key, val);
- }
-
- ASSERT(
- (instr->value()->representation().IsInteger32() &&
- (elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
- (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
- (instr->value()->representation().IsDouble() &&
- ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
- (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
- ASSERT(instr->elements()->representation().IsExternal());
- bool val_is_temp_register =
- elements_kind == EXTERNAL_PIXEL_ELEMENTS ||
- elements_kind == EXTERNAL_FLOAT_ELEMENTS;
- LOperand* val = val_is_temp_register ? UseTempRegister(instr->value())
- : UseRegister(instr->value());
- LOperand* key = clobbers_key ? UseTempRegister(instr->key())
- : UseRegisterOrConstantAtStart(instr->key());
- LOperand* external_pointer = UseRegister(instr->elements());
- return new(zone()) LStoreKeyed(external_pointer, key, val);
-}
-
-
-LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
- LOperand* object = UseFixed(instr->object(), rdx);
- LOperand* key = UseFixed(instr->key(), rcx);
- LOperand* value = UseFixed(instr->value(), rax);
-
- ASSERT(instr->object()->representation().IsTagged());
- ASSERT(instr->key()->representation().IsTagged());
- ASSERT(instr->value()->representation().IsTagged());
-
- LStoreKeyedGeneric* result =
- new(zone()) LStoreKeyedGeneric(object, key, value);
- return MarkAsCall(result, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoTransitionElementsKind(
- HTransitionElementsKind* instr) {
- LOperand* object = UseRegister(instr->object());
- if (IsSimpleMapChangeTransition(instr->from_kind(), instr->to_kind())) {
- LOperand* object = UseRegister(instr->object());
- LOperand* new_map_reg = TempRegister();
- LOperand* temp_reg = TempRegister();
- LTransitionElementsKind* result =
- new(zone()) LTransitionElementsKind(object, new_map_reg, temp_reg);
- return result;
- } else if (FLAG_compiled_transitions) {
- LTransitionElementsKind* result =
- new(zone()) LTransitionElementsKind(object, NULL, NULL);
- return AssignPointerMap(result);
- } else {
- LOperand* object = UseFixed(instr->object(), rax);
- LOperand* fixed_object_reg = FixedTemp(rdx);
- LOperand* new_map_reg = FixedTemp(rbx);
- LTransitionElementsKind* result =
- new(zone()) LTransitionElementsKind(object,
- new_map_reg,
- fixed_object_reg);
- return MarkAsCall(result, instr);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoTrapAllocationMemento(
- HTrapAllocationMemento* instr) {
- LOperand* object = UseRegister(instr->object());
- LOperand* temp = TempRegister();
- LTrapAllocationMemento* result =
- new(zone()) LTrapAllocationMemento(object, temp);
- return AssignEnvironment(result);
-}
-
-
-LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
- bool needs_write_barrier = instr->NeedsWriteBarrier();
- bool needs_write_barrier_for_map = !instr->transition().is_null() &&
- instr->NeedsWriteBarrierForMap();
-
- LOperand* obj;
- if (needs_write_barrier) {
- obj = instr->is_in_object()
- ? UseRegister(instr->object())
- : UseTempRegister(instr->object());
- } else {
- obj = needs_write_barrier_for_map
- ? UseRegister(instr->object())
- : UseRegisterAtStart(instr->object());
- }
-
- LOperand* val = needs_write_barrier
- ? UseTempRegister(instr->value())
- : UseRegister(instr->value());
-
- // We only need a scratch register if we have a write barrier or we
- // have a store into the properties array (not in-object-property).
- LOperand* temp = (!instr->is_in_object() || needs_write_barrier ||
- needs_write_barrier_for_map) ? TempRegister() : NULL;
-
- return new(zone()) LStoreNamedField(obj, val, temp);
-}
-
-
-LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
- LOperand* object = UseFixed(instr->object(), rdx);
- LOperand* value = UseFixed(instr->value(), rax);
-
- LStoreNamedGeneric* result = new(zone()) LStoreNamedGeneric(object, value);
- return MarkAsCall(result, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
- LOperand* left = UseOrConstantAtStart(instr->left());
- LOperand* right = UseOrConstantAtStart(instr->right());
- return MarkAsCall(DefineFixed(new(zone()) LStringAdd(left, right), rax),
- instr);
-}
-
-
-LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) {
- LOperand* string = UseTempRegister(instr->string());
- LOperand* index = UseTempRegister(instr->index());
- LStringCharCodeAt* result = new(zone()) LStringCharCodeAt(string, index);
- return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
-}
-
-
-LInstruction* LChunkBuilder::DoStringCharFromCode(HStringCharFromCode* instr) {
- LOperand* char_code = UseRegister(instr->value());
- LStringCharFromCode* result = new(zone()) LStringCharFromCode(char_code);
- return AssignPointerMap(DefineAsRegister(result));
-}
-
-
-LInstruction* LChunkBuilder::DoStringLength(HStringLength* instr) {
- LOperand* string = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LStringLength(string));
-}
-
-
-LInstruction* LChunkBuilder::DoAllocateObject(HAllocateObject* instr) {
- info()->MarkAsDeferredCalling();
- LAllocateObject* result = new(zone()) LAllocateObject(TempRegister());
- return AssignPointerMap(DefineAsRegister(result));
-}
-
-
-LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
- info()->MarkAsDeferredCalling();
- LOperand* size = UseTempRegister(instr->size());
- LOperand* temp = TempRegister();
- LAllocate* result = new(zone()) LAllocate(size, temp);
- return AssignPointerMap(DefineAsRegister(result));
-}
-
-
-LInstruction* LChunkBuilder::DoFastLiteral(HFastLiteral* instr) {
- return MarkAsCall(DefineFixed(new(zone()) LFastLiteral, rax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoArrayLiteral(HArrayLiteral* instr) {
- return MarkAsCall(DefineFixed(new(zone()) LArrayLiteral, rax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoObjectLiteral(HObjectLiteral* instr) {
- return MarkAsCall(DefineFixed(new(zone()) LObjectLiteral, rax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoRegExpLiteral(HRegExpLiteral* instr) {
- return MarkAsCall(DefineFixed(new(zone()) LRegExpLiteral, rax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoFunctionLiteral(HFunctionLiteral* instr) {
- return MarkAsCall(DefineFixed(new(zone()) LFunctionLiteral, rax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoDeleteProperty(HDeleteProperty* instr) {
- LOperand* object = UseAtStart(instr->object());
- LOperand* key = UseOrConstantAtStart(instr->key());
- LDeleteProperty* result = new(zone()) LDeleteProperty(object, key);
- return MarkAsCall(DefineFixed(result, rax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
- ASSERT(argument_count_ == 0);
- allocator_->MarkAsOsrEntry();
- current_block_->last_environment()->set_ast_id(instr->ast_id());
- return AssignEnvironment(new(zone()) LOsrEntry);
-}
-
-
-LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
- LParameter* result = new(zone()) LParameter;
- if (instr->kind() == HParameter::STACK_PARAMETER) {
- int spill_index = chunk()->GetParameterStackSlot(instr->index());
- return DefineAsSpilled(result, spill_index);
- } else {
- ASSERT(info()->IsStub());
- CodeStubInterfaceDescriptor* descriptor =
- info()->code_stub()->GetInterfaceDescriptor(info()->isolate());
- Register reg = descriptor->register_params_[instr->index()];
- return DefineFixed(result, reg);
- }
-}
-
-
-LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
- int spill_index = chunk()->GetNextSpillIndex(false); // Not double-width.
- if (spill_index > LUnallocated::kMaxFixedIndex) {
- Abort("Too many spill slots needed for OSR");
- spill_index = 0;
- }
- return DefineAsSpilled(new(zone()) LUnknownOSRValue, spill_index);
-}
-
-
-LInstruction* LChunkBuilder::DoCallStub(HCallStub* instr) {
- argument_count_ -= instr->argument_count();
- return MarkAsCall(DefineFixed(new(zone()) LCallStub, rax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) {
- // There are no real uses of the arguments object.
- // arguments.length and element access are supported directly on
- // stack arguments, and any real arguments object use causes a bailout.
- // So this value is never used.
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
- LOperand* args = UseRegister(instr->arguments());
- LOperand* length = UseTempRegister(instr->length());
- LOperand* index = Use(instr->index());
- return DefineAsRegister(new(zone()) LAccessArgumentsAt(args, length, index));
-}
-
-
-LInstruction* LChunkBuilder::DoToFastProperties(HToFastProperties* instr) {
- LOperand* object = UseFixed(instr->value(), rax);
- LToFastProperties* result = new(zone()) LToFastProperties(object);
- return MarkAsCall(DefineFixed(result, rax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
- LTypeof* result = new(zone()) LTypeof(UseAtStart(instr->value()));
- return MarkAsCall(DefineFixed(result, rax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) {
- return new(zone()) LTypeofIsAndBranch(UseTempRegister(instr->value()));
-}
-
-
-LInstruction* LChunkBuilder::DoIsConstructCallAndBranch(
- HIsConstructCallAndBranch* instr) {
- return new(zone()) LIsConstructCallAndBranch(TempRegister());
-}
-
-
-LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
- HEnvironment* env = current_block_->last_environment();
- ASSERT(env != NULL);
-
- env->set_ast_id(instr->ast_id());
-
- env->Drop(instr->pop_count());
- for (int i = instr->values()->length() - 1; i >= 0; --i) {
- HValue* value = instr->values()->at(i);
- if (instr->HasAssignedIndexAt(i)) {
- env->Bind(instr->GetAssignedIndexAt(i), value);
- } else {
- env->Push(value);
- }
- }
-
- // If there is an instruction pending deoptimization environment create a
- // lazy bailout instruction to capture the environment.
- if (pending_deoptimization_ast_id_ == instr->ast_id()) {
- LLazyBailout* lazy_bailout = new(zone()) LLazyBailout;
- LInstruction* result = AssignEnvironment(lazy_bailout);
- // Store the lazy deopt environment with the instruction if needed. Right
- // now it is only used for LInstanceOfKnownGlobal.
- instruction_pending_deoptimization_environment_->
- SetDeferredLazyDeoptimizationEnvironment(result->environment());
- instruction_pending_deoptimization_environment_ = NULL;
- pending_deoptimization_ast_id_ = BailoutId::None();
- return result;
- }
-
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) {
- info()->MarkAsDeferredCalling();
- if (instr->is_function_entry()) {
- return MarkAsCall(new(zone()) LStackCheck, instr);
- } else {
- ASSERT(instr->is_backwards_branch());
- return AssignEnvironment(AssignPointerMap(new(zone()) LStackCheck));
- }
-}
-
-
-LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
- HEnvironment* outer = current_block_->last_environment();
- HConstant* undefined = graph()->GetConstantUndefined();
- HEnvironment* inner = outer->CopyForInlining(instr->closure(),
- instr->arguments_count(),
- instr->function(),
- undefined,
- instr->inlining_kind(),
- instr->undefined_receiver());
- if (instr->arguments_var() != NULL) {
- inner->Bind(instr->arguments_var(), graph()->GetArgumentsObject());
- }
- inner->set_entry(instr);
- current_block_->UpdateEnvironment(inner);
- chunk_->AddInlinedClosure(instr->closure());
- return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
- LInstruction* pop = NULL;
-
- HEnvironment* env = current_block_->last_environment();
-
- if (env->entry()->arguments_pushed()) {
- int argument_count = env->arguments_environment()->parameter_count();
- pop = new(zone()) LDrop(argument_count);
- argument_count_ -= argument_count;
- }
-
- HEnvironment* outer = current_block_->last_environment()->
- DiscardInlined(false);
- current_block_->UpdateEnvironment(outer);
-
- return pop;
-}
-
-
-LInstruction* LChunkBuilder::DoIn(HIn* instr) {
- LOperand* key = UseOrConstantAtStart(instr->key());
- LOperand* object = UseOrConstantAtStart(instr->object());
- LIn* result = new(zone()) LIn(key, object);
- return MarkAsCall(DefineFixed(result, rax), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoForInPrepareMap(HForInPrepareMap* instr) {
- LOperand* object = UseFixed(instr->enumerable(), rax);
- LForInPrepareMap* result = new(zone()) LForInPrepareMap(object);
- return MarkAsCall(DefineFixed(result, rax), instr, CAN_DEOPTIMIZE_EAGERLY);
-}
-
-
-LInstruction* LChunkBuilder::DoForInCacheArray(HForInCacheArray* instr) {
- LOperand* map = UseRegister(instr->map());
- return AssignEnvironment(DefineAsRegister(
- new(zone()) LForInCacheArray(map)));
-}
-
-
-LInstruction* LChunkBuilder::DoCheckMapValue(HCheckMapValue* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
- LOperand* map = UseRegisterAtStart(instr->map());
- return AssignEnvironment(new(zone()) LCheckMapValue(value, map));
-}
-
-
-LInstruction* LChunkBuilder::DoLoadFieldByIndex(HLoadFieldByIndex* instr) {
- LOperand* object = UseRegister(instr->object());
- LOperand* index = UseTempRegister(instr->index());
- return DefineSameAsFirst(new(zone()) LLoadFieldByIndex(object, index));
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_X64
diff --git a/src/3rdparty/v8/src/x64/lithium-x64.h b/src/3rdparty/v8/src/x64/lithium-x64.h
deleted file mode 100644
index 0133578..0000000
--- a/src/3rdparty/v8/src/x64/lithium-x64.h
+++ /dev/null
@@ -1,2641 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_X64_LITHIUM_X64_H_
-#define V8_X64_LITHIUM_X64_H_
-
-#include "hydrogen.h"
-#include "lithium-allocator.h"
-#include "lithium.h"
-#include "safepoint-table.h"
-#include "utils.h"
-
-namespace v8 {
-namespace internal {
-
-// Forward declarations.
-class LCodeGen;
-
-#define LITHIUM_ALL_INSTRUCTION_LIST(V) \
- V(ControlInstruction) \
- V(Call) \
- LITHIUM_CONCRETE_INSTRUCTION_LIST(V)
-
-
-#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V) \
- V(AccessArgumentsAt) \
- V(AddI) \
- V(Allocate) \
- V(AllocateObject) \
- V(ApplyArguments) \
- V(ArgumentsElements) \
- V(ArgumentsLength) \
- V(ArithmeticD) \
- V(ArithmeticT) \
- V(ArrayLiteral) \
- V(BitI) \
- V(BitNotI) \
- V(BoundsCheck) \
- V(Branch) \
- V(CallConstantFunction) \
- V(CallFunction) \
- V(CallGlobal) \
- V(CallKeyed) \
- V(CallKnownGlobal) \
- V(CallNamed) \
- V(CallNew) \
- V(CallNewArray) \
- V(CallRuntime) \
- V(CallStub) \
- V(CheckFunction) \
- V(CheckInstanceType) \
- V(CheckMaps) \
- V(CheckNonSmi) \
- V(CheckPrototypeMaps) \
- V(CheckSmi) \
- V(ClampDToUint8) \
- V(ClampIToUint8) \
- V(ClampTToUint8) \
- V(ClassOfTestAndBranch) \
- V(CmpConstantEqAndBranch) \
- V(CmpIDAndBranch) \
- V(CmpObjectEqAndBranch) \
- V(CmpMapAndBranch) \
- V(CmpT) \
- V(ConstantD) \
- V(ConstantI) \
- V(ConstantT) \
- V(Context) \
- V(DeclareGlobals) \
- V(DeleteProperty) \
- V(Deoptimize) \
- V(DivI) \
- V(DoubleToI) \
- V(DummyUse) \
- V(ElementsKind) \
- V(FastLiteral) \
- V(FixedArrayBaseLength) \
- V(MapEnumLength) \
- V(FunctionLiteral) \
- V(GetCachedArrayIndex) \
- V(GlobalObject) \
- V(GlobalReceiver) \
- V(Goto) \
- V(HasCachedArrayIndexAndBranch) \
- V(HasInstanceTypeAndBranch) \
- V(In) \
- V(InstanceOf) \
- V(InstanceOfKnownGlobal) \
- V(InstanceSize) \
- V(InstructionGap) \
- V(Integer32ToDouble) \
- V(Uint32ToDouble) \
- V(InvokeFunction) \
- V(IsConstructCallAndBranch) \
- V(IsNilAndBranch) \
- V(IsObjectAndBranch) \
- V(IsStringAndBranch) \
- V(IsSmiAndBranch) \
- V(IsUndetectableAndBranch) \
- V(JSArrayLength) \
- V(Label) \
- V(LazyBailout) \
- V(LoadContextSlot) \
- V(LoadElements) \
- V(LoadExternalArrayPointer) \
- V(LoadFunctionPrototype) \
- V(LoadGlobalCell) \
- V(LoadGlobalGeneric) \
- V(LoadKeyed) \
- V(LoadKeyedGeneric) \
- V(LoadNamedField) \
- V(LoadNamedFieldPolymorphic) \
- V(LoadNamedGeneric) \
- V(MathExp) \
- V(MathFloorOfDiv) \
- V(MathMinMax) \
- V(ModI) \
- V(MulI) \
- V(NumberTagD) \
- V(NumberTagI) \
- V(NumberTagU) \
- V(NumberUntagD) \
- V(ObjectLiteral) \
- V(OsrEntry) \
- V(OuterContext) \
- V(Parameter) \
- V(Power) \
- V(PushArgument) \
- V(Random) \
- V(RegExpLiteral) \
- V(Return) \
- V(SeqStringSetChar) \
- V(ShiftI) \
- V(SmiTag) \
- V(SmiUntag) \
- V(StackCheck) \
- V(StoreContextSlot) \
- V(StoreGlobalCell) \
- V(StoreGlobalGeneric) \
- V(StoreKeyed) \
- V(StoreKeyedGeneric) \
- V(StoreNamedField) \
- V(StoreNamedGeneric) \
- V(StringAdd) \
- V(StringCharCodeAt) \
- V(StringCharFromCode) \
- V(StringCompareAndBranch) \
- V(StringLength) \
- V(SubI) \
- V(TaggedToI) \
- V(ThisFunction) \
- V(Throw) \
- V(ToFastProperties) \
- V(TransitionElementsKind) \
- V(TrapAllocationMemento) \
- V(Typeof) \
- V(TypeofIsAndBranch) \
- V(UnaryMathOperation) \
- V(UnknownOSRValue) \
- V(ValueOf) \
- V(ForInPrepareMap) \
- V(ForInCacheArray) \
- V(CheckMapValue) \
- V(LoadFieldByIndex) \
- V(DateField) \
- V(WrapReceiver) \
- V(Drop)
-
-
-#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
- virtual Opcode opcode() const { return LInstruction::k##type; } \
- virtual void CompileToNative(LCodeGen* generator); \
- virtual const char* Mnemonic() const { return mnemonic; } \
- static L##type* cast(LInstruction* instr) { \
- ASSERT(instr->Is##type()); \
- return reinterpret_cast<L##type*>(instr); \
- }
-
-
-#define DECLARE_HYDROGEN_ACCESSOR(type) \
- H##type* hydrogen() const { \
- return H##type::cast(hydrogen_value()); \
- }
-
-
-class LInstruction: public ZoneObject {
- public:
- LInstruction()
- : environment_(NULL),
- hydrogen_value_(NULL),
- is_call_(false) { }
-
- virtual ~LInstruction() { }
-
- virtual void CompileToNative(LCodeGen* generator) = 0;
- virtual const char* Mnemonic() const = 0;
- virtual void PrintTo(StringStream* stream);
- virtual void PrintDataTo(StringStream* stream);
- virtual void PrintOutputOperandTo(StringStream* stream);
-
- enum Opcode {
- // Declare a unique enum value for each instruction.
-#define DECLARE_OPCODE(type) k##type,
- LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_OPCODE)
- kNumberOfInstructions
-#undef DECLARE_OPCODE
- };
-
- virtual Opcode opcode() const = 0;
-
- // Declare non-virtual type testers for all leaf IR classes.
-#define DECLARE_PREDICATE(type) \
- bool Is##type() const { return opcode() == k##type; }
- LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_PREDICATE)
-#undef DECLARE_PREDICATE
-
- // Declare virtual predicates for instructions that don't have
- // an opcode.
- virtual bool IsGap() const { return false; }
-
- virtual bool IsControl() const { return false; }
-
- void set_environment(LEnvironment* env) { environment_ = env; }
- LEnvironment* environment() const { return environment_; }
- bool HasEnvironment() const { return environment_ != NULL; }
-
- void set_pointer_map(LPointerMap* p) { pointer_map_.set(p); }
- LPointerMap* pointer_map() const { return pointer_map_.get(); }
- bool HasPointerMap() const { return pointer_map_.is_set(); }
-
- void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; }
- HValue* hydrogen_value() const { return hydrogen_value_; }
-
- void MarkAsCall() { is_call_ = true; }
-
- // Interface to the register allocator and iterators.
- bool ClobbersTemps() const { return is_call_; }
- bool ClobbersRegisters() const { return is_call_; }
- bool ClobbersDoubleRegisters() const { return is_call_; }
-
- virtual void SetDeferredLazyDeoptimizationEnvironment(LEnvironment* env) { }
-
- // Interface to the register allocator and iterators.
- bool IsMarkedAsCall() const { return is_call_; }
-
- virtual bool HasResult() const = 0;
- virtual LOperand* result() = 0;
-
- LOperand* FirstInput() { return InputAt(0); }
- LOperand* Output() { return HasResult() ? result() : NULL; }
-
-#ifdef DEBUG
- void VerifyCall();
-#endif
-
- private:
- // Iterator support.
- friend class InputIterator;
- virtual int InputCount() = 0;
- virtual LOperand* InputAt(int i) = 0;
-
- friend class TempIterator;
- virtual int TempCount() = 0;
- virtual LOperand* TempAt(int i) = 0;
-
- LEnvironment* environment_;
- SetOncePointer<LPointerMap> pointer_map_;
- HValue* hydrogen_value_;
- bool is_call_;
-};
-
-
-// R = number of result operands (0 or 1).
-// I = number of input operands.
-// T = number of temporary operands.
-template<int R, int I, int T>
-class LTemplateInstruction: public LInstruction {
- public:
- // Allow 0 or 1 output operands.
- STATIC_ASSERT(R == 0 || R == 1);
- virtual bool HasResult() const { return R != 0; }
- void set_result(LOperand* operand) { results_[0] = operand; }
- LOperand* result() { return results_[0]; }
-
- protected:
- EmbeddedContainer<LOperand*, R> results_;
- EmbeddedContainer<LOperand*, I> inputs_;
- EmbeddedContainer<LOperand*, T> temps_;
-
- private:
- // Iterator support.
- virtual int InputCount() { return I; }
- virtual LOperand* InputAt(int i) { return inputs_[i]; }
-
- virtual int TempCount() { return T; }
- virtual LOperand* TempAt(int i) { return temps_[i]; }
-};
-
-
-class LGap: public LTemplateInstruction<0, 0, 0> {
- public:
- explicit LGap(HBasicBlock* block)
- : block_(block) {
- parallel_moves_[BEFORE] = NULL;
- parallel_moves_[START] = NULL;
- parallel_moves_[END] = NULL;
- parallel_moves_[AFTER] = NULL;
- }
-
- // Can't use the DECLARE-macro here because of sub-classes.
- virtual bool IsGap() const { return true; }
- virtual void PrintDataTo(StringStream* stream);
- static LGap* cast(LInstruction* instr) {
- ASSERT(instr->IsGap());
- return reinterpret_cast<LGap*>(instr);
- }
-
- bool IsRedundant() const;
-
- HBasicBlock* block() const { return block_; }
-
- enum InnerPosition {
- BEFORE,
- START,
- END,
- AFTER,
- FIRST_INNER_POSITION = BEFORE,
- LAST_INNER_POSITION = AFTER
- };
-
- LParallelMove* GetOrCreateParallelMove(InnerPosition pos,
- Zone* zone) {
- if (parallel_moves_[pos] == NULL) {
- parallel_moves_[pos] = new(zone) LParallelMove(zone);
- }
- return parallel_moves_[pos];
- }
-
- LParallelMove* GetParallelMove(InnerPosition pos) {
- return parallel_moves_[pos];
- }
-
- private:
- LParallelMove* parallel_moves_[LAST_INNER_POSITION + 1];
- HBasicBlock* block_;
-};
-
-
-class LInstructionGap: public LGap {
- public:
- explicit LInstructionGap(HBasicBlock* block) : LGap(block) { }
-
- DECLARE_CONCRETE_INSTRUCTION(InstructionGap, "gap")
-};
-
-
-class LGoto: public LTemplateInstruction<0, 0, 0> {
- public:
- explicit LGoto(int block_id) : block_id_(block_id) { }
-
- DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
- virtual void PrintDataTo(StringStream* stream);
- virtual bool IsControl() const { return true; }
-
- int block_id() const { return block_id_; }
-
- private:
- int block_id_;
-};
-
-
-class LLazyBailout: public LTemplateInstruction<0, 0, 0> {
- public:
- LLazyBailout() : gap_instructions_size_(0) { }
-
- DECLARE_CONCRETE_INSTRUCTION(LazyBailout, "lazy-bailout")
-
- void set_gap_instructions_size(int gap_instructions_size) {
- gap_instructions_size_ = gap_instructions_size;
- }
- int gap_instructions_size() { return gap_instructions_size_; }
-
- private:
- int gap_instructions_size_;
-};
-
-
-class LDummyUse: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LDummyUse(LOperand* value) {
- inputs_[0] = value;
- }
- DECLARE_CONCRETE_INSTRUCTION(DummyUse, "dummy-use")
-};
-
-
-class LDeoptimize: public LTemplateInstruction<0, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
-};
-
-
-class LLabel: public LGap {
- public:
- explicit LLabel(HBasicBlock* block)
- : LGap(block), replacement_(NULL) { }
-
- DECLARE_CONCRETE_INSTRUCTION(Label, "label")
-
- virtual void PrintDataTo(StringStream* stream);
-
- int block_id() const { return block()->block_id(); }
- bool is_loop_header() const { return block()->IsLoopHeader(); }
- Label* label() { return &label_; }
- LLabel* replacement() const { return replacement_; }
- void set_replacement(LLabel* label) { replacement_ = label; }
- bool HasReplacement() const { return replacement_ != NULL; }
-
- private:
- Label label_;
- LLabel* replacement_;
-};
-
-
-class LParameter: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter")
-};
-
-
-class LCallStub: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(CallStub, "call-stub")
- DECLARE_HYDROGEN_ACCESSOR(CallStub)
-
- TranscendentalCache::Type transcendental_type() {
- return hydrogen()->transcendental_type();
- }
-};
-
-
-class LUnknownOSRValue: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown-osr-value")
-};
-
-
-template<int I, int T>
-class LControlInstruction: public LTemplateInstruction<0, I, T> {
- public:
- virtual bool IsControl() const { return true; }
-
- int SuccessorCount() { return hydrogen()->SuccessorCount(); }
- HBasicBlock* SuccessorAt(int i) { return hydrogen()->SuccessorAt(i); }
- int true_block_id() { return hydrogen()->SuccessorAt(0)->block_id(); }
- int false_block_id() { return hydrogen()->SuccessorAt(1)->block_id(); }
-
- private:
- HControlInstruction* hydrogen() {
- return HControlInstruction::cast(this->hydrogen_value());
- }
-};
-
-
-class LWrapReceiver: public LTemplateInstruction<1, 2, 0> {
- public:
- LWrapReceiver(LOperand* receiver, LOperand* function) {
- inputs_[0] = receiver;
- inputs_[1] = function;
- }
-
- LOperand* receiver() { return inputs_[0]; }
- LOperand* function() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(WrapReceiver, "wrap-receiver")
-};
-
-
-class LApplyArguments: public LTemplateInstruction<1, 4, 0> {
- public:
- LApplyArguments(LOperand* function,
- LOperand* receiver,
- LOperand* length,
- LOperand* elements) {
- inputs_[0] = function;
- inputs_[1] = receiver;
- inputs_[2] = length;
- inputs_[3] = elements;
- }
-
- LOperand* function() { return inputs_[0]; }
- LOperand* receiver() { return inputs_[1]; }
- LOperand* length() { return inputs_[2]; }
- LOperand* elements() { return inputs_[3]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ApplyArguments, "apply-arguments")
-};
-
-
-class LAccessArgumentsAt: public LTemplateInstruction<1, 3, 0> {
- public:
- LAccessArgumentsAt(LOperand* arguments, LOperand* length, LOperand* index) {
- inputs_[0] = arguments;
- inputs_[1] = length;
- inputs_[2] = index;
- }
-
- LOperand* arguments() { return inputs_[0]; }
- LOperand* length() { return inputs_[1]; }
- LOperand* index() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(AccessArgumentsAt, "access-arguments-at")
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LArgumentsLength: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LArgumentsLength(LOperand* elements) {
- inputs_[0] = elements;
- }
-
- LOperand* elements() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ArgumentsLength, "arguments-length")
-};
-
-
-class LArgumentsElements: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments-elements")
- DECLARE_HYDROGEN_ACCESSOR(ArgumentsElements)
-};
-
-
-class LModI: public LTemplateInstruction<1, 2, 1> {
- public:
- LModI(LOperand* left, LOperand* right, LOperand* temp) {
- inputs_[0] = left;
- inputs_[1] = right;
- temps_[0] = temp;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ModI, "mod-i")
- DECLARE_HYDROGEN_ACCESSOR(Mod)
-};
-
-
-class LDivI: public LTemplateInstruction<1, 2, 1> {
- public:
- LDivI(LOperand* left, LOperand* right, LOperand* temp) {
- inputs_[0] = left;
- inputs_[1] = right;
- temps_[0] = temp;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
- LOperand* temp() { return temps_[0]; }
-
- bool is_flooring() { return hydrogen_value()->IsMathFloorOfDiv(); }
-
- DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i")
- DECLARE_HYDROGEN_ACCESSOR(Div)
-};
-
-
-class LMathFloorOfDiv: public LTemplateInstruction<1, 2, 1> {
- public:
- LMathFloorOfDiv(LOperand* left,
- LOperand* right,
- LOperand* temp = NULL) {
- inputs_[0] = left;
- inputs_[1] = right;
- temps_[0] = temp;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathFloorOfDiv, "math-floor-of-div")
- DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
-};
-
-
-class LMulI: public LTemplateInstruction<1, 2, 0> {
- public:
- LMulI(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MulI, "mul-i")
- DECLARE_HYDROGEN_ACCESSOR(Mul)
-};
-
-
-class LCmpIDAndBranch: public LControlInstruction<2, 0> {
- public:
- LCmpIDAndBranch(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpIDAndBranch, "cmp-id-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(CompareIDAndBranch)
-
- Token::Value op() const { return hydrogen()->token(); }
- bool is_double() const {
- return hydrogen()->representation().IsDouble();
- }
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LUnaryMathOperation: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LUnaryMathOperation(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(UnaryMathOperation, "unary-math-operation")
- DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
-
- virtual void PrintDataTo(StringStream* stream);
- BuiltinFunctionId op() const { return hydrogen()->op(); }
-};
-
-
-class LMathExp: public LTemplateInstruction<1, 1, 2> {
- public:
- LMathExp(LOperand* value, LOperand* temp1, LOperand* temp2) {
- inputs_[0] = value;
- temps_[0] = temp1;
- temps_[1] = temp2;
- ExternalReference::InitializeMathExpData();
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp1() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathExp, "math-exp")
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LCmpObjectEqAndBranch: public LControlInstruction<2, 0> {
- public:
- LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpObjectEqAndBranch,
- "cmp-object-eq-and-branch")
-};
-
-
-class LCmpConstantEqAndBranch: public LControlInstruction<1, 0> {
- public:
- explicit LCmpConstantEqAndBranch(LOperand* left) {
- inputs_[0] = left;
- }
-
- LOperand* left() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpConstantEqAndBranch,
- "cmp-constant-eq-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(CompareConstantEqAndBranch)
-};
-
-
-class LIsNilAndBranch: public LControlInstruction<1, 1> {
- public:
- LIsNilAndBranch(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(IsNilAndBranch, "is-nil-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(IsNilAndBranch)
-
- EqualityKind kind() const { return hydrogen()->kind(); }
- NilValue nil() const { return hydrogen()->nil(); }
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LIsObjectAndBranch: public LControlInstruction<1, 0> {
- public:
- explicit LIsObjectAndBranch(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(IsObjectAndBranch)
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LIsStringAndBranch: public LControlInstruction<1, 1> {
- public:
- explicit LIsStringAndBranch(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch, "is-string-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(IsStringAndBranch)
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LIsSmiAndBranch: public LControlInstruction<1, 0> {
- public:
- explicit LIsSmiAndBranch(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(IsSmiAndBranch)
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LIsUndetectableAndBranch: public LControlInstruction<1, 1> {
- public:
- explicit LIsUndetectableAndBranch(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(IsUndetectableAndBranch,
- "is-undetectable-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(IsUndetectableAndBranch)
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LStringCompareAndBranch: public LControlInstruction<2, 0> {
- public:
- explicit LStringCompareAndBranch(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StringCompareAndBranch,
- "string-compare-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(StringCompareAndBranch)
-
- virtual void PrintDataTo(StringStream* stream);
-
- Token::Value op() const { return hydrogen()->token(); }
-};
-
-
-class LHasInstanceTypeAndBranch: public LControlInstruction<1, 0> {
- public:
- explicit LHasInstanceTypeAndBranch(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(HasInstanceTypeAndBranch,
- "has-instance-type-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(HasInstanceTypeAndBranch)
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LGetCachedArrayIndex: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LGetCachedArrayIndex(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(GetCachedArrayIndex, "get-cached-array-index")
- DECLARE_HYDROGEN_ACCESSOR(GetCachedArrayIndex)
-};
-
-
-class LHasCachedArrayIndexAndBranch: public LControlInstruction<1, 0> {
- public:
- explicit LHasCachedArrayIndexAndBranch(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndexAndBranch,
- "has-cached-array-index-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndexAndBranch)
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LClassOfTestAndBranch: public LControlInstruction<1, 2> {
- public:
- LClassOfTestAndBranch(LOperand* value, LOperand* temp, LOperand* temp2) {
- inputs_[0] = value;
- temps_[0] = temp;
- temps_[1] = temp2;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch,
- "class-of-test-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(ClassOfTestAndBranch)
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LCmpT: public LTemplateInstruction<1, 2, 0> {
- public:
- LCmpT(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
- DECLARE_HYDROGEN_ACCESSOR(CompareGeneric)
-
- Token::Value op() const { return hydrogen()->token(); }
-};
-
-
-class LIn: public LTemplateInstruction<1, 2, 0> {
- public:
- LIn(LOperand* key, LOperand* object) {
- inputs_[0] = key;
- inputs_[1] = object;
- }
-
- LOperand* key() { return inputs_[0]; }
- LOperand* object() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(In, "in")
-};
-
-
-class LInstanceOf: public LTemplateInstruction<1, 2, 0> {
- public:
- LInstanceOf(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(InstanceOf, "instance-of")
-};
-
-
-class LInstanceOfKnownGlobal: public LTemplateInstruction<1, 1, 1> {
- public:
- LInstanceOfKnownGlobal(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(InstanceOfKnownGlobal,
- "instance-of-known-global")
- DECLARE_HYDROGEN_ACCESSOR(InstanceOfKnownGlobal)
-
- Handle<JSFunction> function() const { return hydrogen()->function(); }
- LEnvironment* GetDeferredLazyDeoptimizationEnvironment() {
- return lazy_deopt_env_;
- }
- virtual void SetDeferredLazyDeoptimizationEnvironment(LEnvironment* env) {
- lazy_deopt_env_ = env;
- }
-
- private:
- LEnvironment* lazy_deopt_env_;
-};
-
-
-class LInstanceSize: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LInstanceSize(LOperand* object) {
- inputs_[0] = object;
- }
-
- LOperand* object() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(InstanceSize, "instance-size")
- DECLARE_HYDROGEN_ACCESSOR(InstanceSize)
-};
-
-
-class LBoundsCheck: public LTemplateInstruction<0, 2, 0> {
- public:
- LBoundsCheck(LOperand* index, LOperand* length) {
- inputs_[0] = index;
- inputs_[1] = length;
- }
-
- LOperand* index() { return inputs_[0]; }
- LOperand* length() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(BoundsCheck, "bounds-check")
- DECLARE_HYDROGEN_ACCESSOR(BoundsCheck)
-};
-
-
-class LBitI: public LTemplateInstruction<1, 2, 0> {
- public:
- LBitI(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- Token::Value op() const { return hydrogen()->op(); }
-
- DECLARE_CONCRETE_INSTRUCTION(BitI, "bit-i")
- DECLARE_HYDROGEN_ACCESSOR(Bitwise)
-};
-
-
-class LShiftI: public LTemplateInstruction<1, 2, 0> {
- public:
- LShiftI(Token::Value op, LOperand* left, LOperand* right, bool can_deopt)
- : op_(op), can_deopt_(can_deopt) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- Token::Value op() const { return op_; }
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
- bool can_deopt() const { return can_deopt_; }
-
- DECLARE_CONCRETE_INSTRUCTION(ShiftI, "shift-i")
-
- private:
- Token::Value op_;
- bool can_deopt_;
-};
-
-
-class LSubI: public LTemplateInstruction<1, 2, 0> {
- public:
- LSubI(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(SubI, "sub-i")
- DECLARE_HYDROGEN_ACCESSOR(Sub)
-};
-
-
-class LConstantI: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ConstantI, "constant-i")
- DECLARE_HYDROGEN_ACCESSOR(Constant)
-
- int32_t value() const { return hydrogen()->Integer32Value(); }
-};
-
-
-class LConstantD: public LTemplateInstruction<1, 0, 1> {
- public:
- explicit LConstantD(LOperand* temp) {
- temps_[0] = temp;
- }
-
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ConstantD, "constant-d")
- DECLARE_HYDROGEN_ACCESSOR(Constant)
-
- double value() const { return hydrogen()->DoubleValue(); }
-};
-
-
-class LConstantT: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ConstantT, "constant-t")
- DECLARE_HYDROGEN_ACCESSOR(Constant)
-
- Handle<Object> value() const { return hydrogen()->handle(); }
-};
-
-
-class LBranch: public LControlInstruction<1, 0> {
- public:
- explicit LBranch(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
- DECLARE_HYDROGEN_ACCESSOR(Branch)
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LCmpMapAndBranch: public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LCmpMapAndBranch(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(CompareMap)
-
- virtual bool IsControl() const { return true; }
-
- Handle<Map> map() const { return hydrogen()->map(); }
- int true_block_id() const {
- return hydrogen()->FirstSuccessor()->block_id();
- }
- int false_block_id() const {
- return hydrogen()->SecondSuccessor()->block_id();
- }
-};
-
-
-class LJSArrayLength: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LJSArrayLength(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(JSArrayLength, "js-array-length")
- DECLARE_HYDROGEN_ACCESSOR(JSArrayLength)
-};
-
-
-class LFixedArrayBaseLength: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LFixedArrayBaseLength(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(FixedArrayBaseLength,
- "fixed-array-base-length")
- DECLARE_HYDROGEN_ACCESSOR(FixedArrayBaseLength)
-};
-
-
-class LMapEnumLength: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LMapEnumLength(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MapEnumLength, "map-enum-length")
-};
-
-
-class LElementsKind: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LElementsKind(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ElementsKind, "elements-kind")
- DECLARE_HYDROGEN_ACCESSOR(ElementsKind)
-};
-
-
-class LValueOf: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LValueOf(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ValueOf, "value-of")
- DECLARE_HYDROGEN_ACCESSOR(ValueOf)
-};
-
-
-class LDateField: public LTemplateInstruction<1, 1, 0> {
- public:
- LDateField(LOperand* date, Smi* index) : index_(index) {
- inputs_[0] = date;
- }
-
- LOperand* date() { return inputs_[0]; }
- Smi* index() const { return index_; }
-
- DECLARE_CONCRETE_INSTRUCTION(ValueOf, "date-field")
- DECLARE_HYDROGEN_ACCESSOR(ValueOf)
-
- private:
- Smi* index_;
-};
-
-
-class LSeqStringSetChar: public LTemplateInstruction<1, 3, 0> {
- public:
- LSeqStringSetChar(String::Encoding encoding,
- LOperand* string,
- LOperand* index,
- LOperand* value) : encoding_(encoding) {
- inputs_[0] = string;
- inputs_[1] = index;
- inputs_[2] = value;
- }
-
- String::Encoding encoding() { return encoding_; }
- LOperand* string() { return inputs_[0]; }
- LOperand* index() { return inputs_[1]; }
- LOperand* value() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(SeqStringSetChar, "seq-string-set-char")
- DECLARE_HYDROGEN_ACCESSOR(SeqStringSetChar)
-
- private:
- String::Encoding encoding_;
-};
-
-
-class LThrow: public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LThrow(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Throw, "throw")
-};
-
-
-class LBitNotI: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LBitNotI(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(BitNotI, "bit-not-i")
-};
-
-
-class LAddI: public LTemplateInstruction<1, 2, 0> {
- public:
- LAddI(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(AddI, "add-i")
- DECLARE_HYDROGEN_ACCESSOR(Add)
-};
-
-
-class LMathMinMax: public LTemplateInstruction<1, 2, 0> {
- public:
- LMathMinMax(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathMinMax, "min-max")
- DECLARE_HYDROGEN_ACCESSOR(MathMinMax)
-};
-
-
-class LPower: public LTemplateInstruction<1, 2, 0> {
- public:
- LPower(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Power, "power")
- DECLARE_HYDROGEN_ACCESSOR(Power)
-};
-
-
-class LRandom: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LRandom(LOperand* global_object) {
- inputs_[0] = global_object;
- }
-
- LOperand* global_object() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Random, "random")
- DECLARE_HYDROGEN_ACCESSOR(Random)
-};
-
-
-class LArithmeticD: public LTemplateInstruction<1, 2, 0> {
- public:
- LArithmeticD(Token::Value op, LOperand* left, LOperand* right)
- : op_(op) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- Token::Value op() const { return op_; }
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- virtual Opcode opcode() const { return LInstruction::kArithmeticD; }
- virtual void CompileToNative(LCodeGen* generator);
- virtual const char* Mnemonic() const;
-
- private:
- Token::Value op_;
-};
-
-
-class LArithmeticT: public LTemplateInstruction<1, 2, 0> {
- public:
- LArithmeticT(Token::Value op, LOperand* left, LOperand* right)
- : op_(op) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- Token::Value op() const { return op_; }
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- virtual Opcode opcode() const { return LInstruction::kArithmeticT; }
- virtual void CompileToNative(LCodeGen* generator);
- virtual const char* Mnemonic() const;
-
- private:
- Token::Value op_;
-};
-
-
-class LReturn: public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LReturn(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Return, "return")
-};
-
-
-class LLoadNamedField: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LLoadNamedField(LOperand* object) {
- inputs_[0] = object;
- }
-
- LOperand* object() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field")
- DECLARE_HYDROGEN_ACCESSOR(LoadNamedField)
-};
-
-
-class LLoadNamedFieldPolymorphic: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LLoadNamedFieldPolymorphic(LOperand* object) {
- inputs_[0] = object;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field-polymorphic")
- DECLARE_HYDROGEN_ACCESSOR(LoadNamedFieldPolymorphic)
-
- LOperand* object() { return inputs_[0]; }
-};
-
-
-class LLoadNamedGeneric: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LLoadNamedGeneric(LOperand* object) {
- inputs_[0] = object;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric, "load-named-generic")
- DECLARE_HYDROGEN_ACCESSOR(LoadNamedGeneric)
-
- LOperand* object() { return inputs_[0]; }
- Handle<Object> name() const { return hydrogen()->name(); }
-};
-
-
-class LLoadFunctionPrototype: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LLoadFunctionPrototype(LOperand* function) {
- inputs_[0] = function;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadFunctionPrototype, "load-function-prototype")
- DECLARE_HYDROGEN_ACCESSOR(LoadFunctionPrototype)
-
- LOperand* function() { return inputs_[0]; }
-};
-
-
-class LLoadElements: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LLoadElements(LOperand* object) {
- inputs_[0] = object;
- }
-
- LOperand* object() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadElements, "load-elements")
-};
-
-
-class LLoadExternalArrayPointer: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LLoadExternalArrayPointer(LOperand* object) {
- inputs_[0] = object;
- }
-
- LOperand* object() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadExternalArrayPointer,
- "load-external-array-pointer")
-};
-
-
-class LLoadKeyed: public LTemplateInstruction<1, 2, 0> {
- public:
- LLoadKeyed(LOperand* elements, LOperand* key) {
- inputs_[0] = elements;
- inputs_[1] = key;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadKeyed, "load-keyed")
- DECLARE_HYDROGEN_ACCESSOR(LoadKeyed)
-
- bool is_external() const {
- return hydrogen()->is_external();
- }
- LOperand* elements() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
- virtual void PrintDataTo(StringStream* stream);
- uint32_t additional_index() const { return hydrogen()->index_offset(); }
- ElementsKind elements_kind() const {
- return hydrogen()->elements_kind();
- }
-};
-
-
-class LLoadKeyedGeneric: public LTemplateInstruction<1, 2, 0> {
- public:
- LLoadKeyedGeneric(LOperand* obj, LOperand* key) {
- inputs_[0] = obj;
- inputs_[1] = key;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load-keyed-generic")
-
- LOperand* object() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
-};
-
-
-class LLoadGlobalCell: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(LoadGlobalCell, "load-global-cell")
- DECLARE_HYDROGEN_ACCESSOR(LoadGlobalCell)
-};
-
-
-class LLoadGlobalGeneric: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LLoadGlobalGeneric(LOperand* global_object) {
- inputs_[0] = global_object;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic")
- DECLARE_HYDROGEN_ACCESSOR(LoadGlobalGeneric)
-
- LOperand* global_object() { return inputs_[0]; }
- Handle<Object> name() const { return hydrogen()->name(); }
- bool for_typeof() const { return hydrogen()->for_typeof(); }
-};
-
-
-class LStoreGlobalCell: public LTemplateInstruction<0, 1, 1> {
- public:
- explicit LStoreGlobalCell(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreGlobalCell, "store-global-cell")
- DECLARE_HYDROGEN_ACCESSOR(StoreGlobalCell)
-};
-
-
-class LStoreGlobalGeneric: public LTemplateInstruction<0, 2, 0> {
- public:
- explicit LStoreGlobalGeneric(LOperand* global_object,
- LOperand* value) {
- inputs_[0] = global_object;
- inputs_[1] = value;
- }
-
- LOperand* global_object() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreGlobalGeneric, "store-global-generic")
- DECLARE_HYDROGEN_ACCESSOR(StoreGlobalGeneric)
-
- Handle<Object> name() const { return hydrogen()->name(); }
- StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
-};
-
-
-class LLoadContextSlot: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LLoadContextSlot(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot, "load-context-slot")
- DECLARE_HYDROGEN_ACCESSOR(LoadContextSlot)
-
- int slot_index() { return hydrogen()->slot_index(); }
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LStoreContextSlot: public LTemplateInstruction<0, 2, 1> {
- public:
- LStoreContextSlot(LOperand* context, LOperand* value, LOperand* temp) {
- inputs_[0] = context;
- inputs_[1] = value;
- temps_[0] = temp;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreContextSlot, "store-context-slot")
- DECLARE_HYDROGEN_ACCESSOR(StoreContextSlot)
-
- int slot_index() { return hydrogen()->slot_index(); }
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LPushArgument: public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LPushArgument(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(PushArgument, "push-argument")
-};
-
-
-class LDrop: public LTemplateInstruction<0, 0, 0> {
- public:
- explicit LDrop(int count) : count_(count) { }
-
- int count() const { return count_; }
-
- DECLARE_CONCRETE_INSTRUCTION(Drop, "drop")
-
- private:
- int count_;
-};
-
-
-class LThisFunction: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function")
- DECLARE_HYDROGEN_ACCESSOR(ThisFunction)
-};
-
-
-class LContext: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(Context, "context")
- DECLARE_HYDROGEN_ACCESSOR(Context)
-};
-
-
-class LOuterContext: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LOuterContext(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(OuterContext, "outer-context")
-};
-
-
-class LDeclareGlobals: public LTemplateInstruction<0, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(DeclareGlobals, "declare-globals")
- DECLARE_HYDROGEN_ACCESSOR(DeclareGlobals)
-};
-
-
-class LGlobalObject: public LTemplateInstruction<1, 0, 0> {
- public:
- explicit LGlobalObject(bool qml_global) : qml_global_(qml_global) {}
-
- DECLARE_CONCRETE_INSTRUCTION(GlobalObject, "global-object")
-
- bool qml_global() { return qml_global_; }
- private:
- bool qml_global_;
-};
-
-
-class LGlobalReceiver: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LGlobalReceiver(LOperand* global_object) {
- inputs_[0] = global_object;
- }
-
- LOperand* global() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(GlobalReceiver, "global-receiver")
-};
-
-
-class LCallConstantFunction: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(CallConstantFunction, "call-constant-function")
- DECLARE_HYDROGEN_ACCESSOR(CallConstantFunction)
-
- virtual void PrintDataTo(StringStream* stream);
-
- Handle<JSFunction> function() { return hydrogen()->function(); }
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
-class LInvokeFunction: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LInvokeFunction(LOperand* function) {
- inputs_[0] = function;
- }
-
- LOperand* function() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
- DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
-
- virtual void PrintDataTo(StringStream* stream);
-
- int arity() const { return hydrogen()->argument_count() - 1; }
- Handle<JSFunction> known_function() { return hydrogen()->known_function(); }
-};
-
-
-class LCallKeyed: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LCallKeyed(LOperand* key) {
- inputs_[0] = key;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CallKeyed, "call-keyed")
- DECLARE_HYDROGEN_ACCESSOR(CallKeyed)
-
- LOperand* key() { return inputs_[0]; }
-
- virtual void PrintDataTo(StringStream* stream);
-
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
-class LCallNamed: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(CallNamed, "call-named")
- DECLARE_HYDROGEN_ACCESSOR(CallNamed)
-
- virtual void PrintDataTo(StringStream* stream);
-
- Handle<String> name() const { return hydrogen()->name(); }
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
-class LCallFunction: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LCallFunction(LOperand* function) {
- inputs_[0] = function;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function")
- DECLARE_HYDROGEN_ACCESSOR(CallFunction)
-
- LOperand* function() { return inputs_[0]; }
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
-class LCallGlobal: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(CallGlobal, "call-global")
- DECLARE_HYDROGEN_ACCESSOR(CallGlobal)
-
- explicit LCallGlobal(bool qml_global) : qml_global_(qml_global) {}
-
- virtual void PrintDataTo(StringStream* stream);
-
- Handle<String> name() const {return hydrogen()->name(); }
- int arity() const { return hydrogen()->argument_count() - 1; }
-
- bool qml_global() { return qml_global_; }
- private:
- bool qml_global_;
-};
-
-
-class LCallKnownGlobal: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(CallKnownGlobal, "call-known-global")
- DECLARE_HYDROGEN_ACCESSOR(CallKnownGlobal)
-
- virtual void PrintDataTo(StringStream* stream);
-
- Handle<JSFunction> target() const { return hydrogen()->target(); }
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
-class LCallNew: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LCallNew(LOperand* constructor) {
- inputs_[0] = constructor;
- }
-
- LOperand* constructor() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallNew, "call-new")
- DECLARE_HYDROGEN_ACCESSOR(CallNew)
-
- virtual void PrintDataTo(StringStream* stream);
-
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
-class LCallNewArray: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LCallNewArray(LOperand* constructor) {
- inputs_[0] = constructor;
- }
-
- LOperand* constructor() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallNewArray, "call-new-array")
- DECLARE_HYDROGEN_ACCESSOR(CallNewArray)
-
- virtual void PrintDataTo(StringStream* stream);
-
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
-class LCallRuntime: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
- DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
-
- const Runtime::Function* function() const { return hydrogen()->function(); }
- int arity() const { return hydrogen()->argument_count(); }
-};
-
-
-class LInteger32ToDouble: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LInteger32ToDouble(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Integer32ToDouble, "int32-to-double")
-};
-
-
-class LUint32ToDouble: public LTemplateInstruction<1, 1, 1> {
- public:
- explicit LUint32ToDouble(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Uint32ToDouble, "uint32-to-double")
-};
-
-
-class LNumberTagI: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LNumberTagI(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(NumberTagI, "number-tag-i")
-};
-
-
-class LNumberTagU: public LTemplateInstruction<1, 1, 1> {
- public:
- explicit LNumberTagU(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(NumberTagU, "number-tag-u")
-};
-
-
-class LNumberTagD: public LTemplateInstruction<1, 1, 1> {
- public:
- explicit LNumberTagD(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(NumberTagD, "number-tag-d")
- DECLARE_HYDROGEN_ACCESSOR(Change)
-};
-
-
-// Sometimes truncating conversion from a tagged value to an int32.
-class LDoubleToI: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LDoubleToI(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(DoubleToI, "double-to-i")
- DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
-
- bool truncating() { return hydrogen()->CanTruncateToInt32(); }
-};
-
-
-// Truncating conversion from a tagged value to an int32.
-class LTaggedToI: public LTemplateInstruction<1, 1, 1> {
- public:
- LTaggedToI(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i")
- DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
-
- bool truncating() { return hydrogen()->CanTruncateToInt32(); }
-};
-
-
-class LSmiTag: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LSmiTag(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(SmiTag, "smi-tag")
-};
-
-
-class LNumberUntagD: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LNumberUntagD(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(NumberUntagD, "double-untag")
- DECLARE_HYDROGEN_ACCESSOR(Change);
-};
-
-
-class LSmiUntag: public LTemplateInstruction<1, 1, 0> {
- public:
- LSmiUntag(LOperand* value, bool needs_check)
- : needs_check_(needs_check) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
- bool needs_check() const { return needs_check_; }
-
- DECLARE_CONCRETE_INSTRUCTION(SmiUntag, "smi-untag")
-
- private:
- bool needs_check_;
-};
-
-
-class LStoreNamedField: public LTemplateInstruction<0, 2, 1> {
- public:
- LStoreNamedField(LOperand* object, LOperand* value, LOperand* temp) {
- inputs_[0] = object;
- inputs_[1] = value;
- temps_[0] = temp;
- }
-
- LOperand* object() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
- DECLARE_HYDROGEN_ACCESSOR(StoreNamedField)
-
- virtual void PrintDataTo(StringStream* stream);
-
- Handle<Object> name() const { return hydrogen()->name(); }
- bool is_in_object() { return hydrogen()->is_in_object(); }
- int offset() { return hydrogen()->offset(); }
- Handle<Map> transition() const { return hydrogen()->transition(); }
-};
-
-
-class LStoreNamedGeneric: public LTemplateInstruction<0, 2, 0> {
- public:
- LStoreNamedGeneric(LOperand* object, LOperand* value) {
- inputs_[0] = object;
- inputs_[1] = value;
- }
-
- LOperand* object() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
- DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
-
- virtual void PrintDataTo(StringStream* stream);
-
- Handle<Object> name() const { return hydrogen()->name(); }
- StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
-};
-
-
-class LStoreKeyed: public LTemplateInstruction<0, 3, 0> {
- public:
- LStoreKeyed(LOperand* object, LOperand* key, LOperand* value) {
- inputs_[0] = object;
- inputs_[1] = key;
- inputs_[2] = value;
- }
-
- bool is_external() const { return hydrogen()->is_external(); }
- LOperand* elements() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
- LOperand* value() { return inputs_[2]; }
- ElementsKind elements_kind() const { return hydrogen()->elements_kind(); }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreKeyed, "store-keyed")
- DECLARE_HYDROGEN_ACCESSOR(StoreKeyed)
-
- virtual void PrintDataTo(StringStream* stream);
- bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); }
- uint32_t additional_index() const { return hydrogen()->index_offset(); }
-};
-
-
-class LStoreKeyedGeneric: public LTemplateInstruction<0, 3, 0> {
- public:
- LStoreKeyedGeneric(LOperand* object, LOperand* key, LOperand* value) {
- inputs_[0] = object;
- inputs_[1] = key;
- inputs_[2] = value;
- }
-
- LOperand* object() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
- LOperand* value() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
- DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
-
- virtual void PrintDataTo(StringStream* stream);
-
- StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
-};
-
-
-class LTransitionElementsKind: public LTemplateInstruction<1, 1, 2> {
- public:
- LTransitionElementsKind(LOperand* object,
- LOperand* new_map_temp,
- LOperand* temp) {
- inputs_[0] = object;
- temps_[0] = new_map_temp;
- temps_[1] = temp;
- }
-
- LOperand* object() { return inputs_[0]; }
- LOperand* new_map_temp() { return temps_[0]; }
- LOperand* temp() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(TransitionElementsKind,
- "transition-elements-kind")
- DECLARE_HYDROGEN_ACCESSOR(TransitionElementsKind)
-
- virtual void PrintDataTo(StringStream* stream);
-
- Handle<Map> original_map() { return hydrogen()->original_map(); }
- Handle<Map> transitioned_map() { return hydrogen()->transitioned_map(); }
- ElementsKind from_kind() { return hydrogen()->from_kind(); }
- ElementsKind to_kind() { return hydrogen()->to_kind(); }
-};
-
-
-class LTrapAllocationMemento : public LTemplateInstruction<0, 1, 1> {
- public:
- LTrapAllocationMemento(LOperand* object,
- LOperand* temp) {
- inputs_[0] = object;
- temps_[0] = temp;
- }
-
- LOperand* object() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(TrapAllocationMemento,
- "trap-allocation-memento")
-};
-
-
-class LStringAdd: public LTemplateInstruction<1, 2, 0> {
- public:
- LStringAdd(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StringAdd, "string-add")
- DECLARE_HYDROGEN_ACCESSOR(StringAdd)
-};
-
-
-class LStringCharCodeAt: public LTemplateInstruction<1, 2, 0> {
- public:
- LStringCharCodeAt(LOperand* string, LOperand* index) {
- inputs_[0] = string;
- inputs_[1] = index;
- }
-
- LOperand* string() { return inputs_[0]; }
- LOperand* index() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt, "string-char-code-at")
- DECLARE_HYDROGEN_ACCESSOR(StringCharCodeAt)
-};
-
-
-class LStringCharFromCode: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LStringCharFromCode(LOperand* char_code) {
- inputs_[0] = char_code;
- }
-
- LOperand* char_code() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StringCharFromCode, "string-char-from-code")
- DECLARE_HYDROGEN_ACCESSOR(StringCharFromCode)
-};
-
-
-class LStringLength: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LStringLength(LOperand* string) {
- inputs_[0] = string;
- }
-
- LOperand* string() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StringLength, "string-length")
- DECLARE_HYDROGEN_ACCESSOR(StringLength)
-};
-
-
-class LCheckFunction: public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LCheckFunction(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckFunction, "check-function")
- DECLARE_HYDROGEN_ACCESSOR(CheckFunction)
-};
-
-
-class LCheckInstanceType: public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LCheckInstanceType(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckInstanceType, "check-instance-type")
- DECLARE_HYDROGEN_ACCESSOR(CheckInstanceType)
-};
-
-
-class LCheckMaps: public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LCheckMaps(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckMaps, "check-maps")
- DECLARE_HYDROGEN_ACCESSOR(CheckMaps)
-};
-
-
-class LCheckPrototypeMaps: public LTemplateInstruction<1, 0, 1> {
- public:
- explicit LCheckPrototypeMaps(LOperand* temp) {
- temps_[0] = temp;
- }
-
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckPrototypeMaps, "check-prototype-maps")
- DECLARE_HYDROGEN_ACCESSOR(CheckPrototypeMaps)
-
- ZoneList<Handle<JSObject> >* prototypes() const {
- return hydrogen()->prototypes();
- }
- ZoneList<Handle<Map> >* maps() const { return hydrogen()->maps(); }
-};
-
-
-class LCheckSmi: public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LCheckSmi(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckSmi, "check-smi")
-};
-
-
-class LClampDToUint8: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LClampDToUint8(LOperand* unclamped) {
- inputs_[0] = unclamped;
- }
-
- LOperand* unclamped() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ClampDToUint8, "clamp-d-to-uint8")
-};
-
-
-class LClampIToUint8: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LClampIToUint8(LOperand* unclamped) {
- inputs_[0] = unclamped;
- }
-
- LOperand* unclamped() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ClampIToUint8, "clamp-i-to-uint8")
-};
-
-
-class LClampTToUint8: public LTemplateInstruction<1, 1, 1> {
- public:
- LClampTToUint8(LOperand* unclamped,
- LOperand* temp_xmm) {
- inputs_[0] = unclamped;
- temps_[0] = temp_xmm;
- }
-
- LOperand* unclamped() { return inputs_[0]; }
- LOperand* temp_xmm() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ClampTToUint8, "clamp-t-to-uint8")
-};
-
-
-class LCheckNonSmi: public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LCheckNonSmi(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckNonSmi, "check-non-smi")
-};
-
-
-class LAllocateObject: public LTemplateInstruction<1, 0, 1> {
- public:
- explicit LAllocateObject(LOperand* temp) {
- temps_[0] = temp;
- }
-
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(AllocateObject, "allocate-object")
- DECLARE_HYDROGEN_ACCESSOR(AllocateObject)
-};
-
-
-class LAllocate: public LTemplateInstruction<1, 1, 1> {
- public:
- LAllocate(LOperand* size, LOperand* temp) {
- inputs_[0] = size;
- temps_[0] = temp;
- }
-
- LOperand* size() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Allocate, "allocate")
- DECLARE_HYDROGEN_ACCESSOR(Allocate)
-};
-
-
-class LFastLiteral: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(FastLiteral, "fast-literal")
- DECLARE_HYDROGEN_ACCESSOR(FastLiteral)
-};
-
-
-class LArrayLiteral: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ArrayLiteral, "array-literal")
- DECLARE_HYDROGEN_ACCESSOR(ArrayLiteral)
-};
-
-
-class LObjectLiteral: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(ObjectLiteral, "object-literal")
- DECLARE_HYDROGEN_ACCESSOR(ObjectLiteral)
-};
-
-
-class LRegExpLiteral: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(RegExpLiteral, "regexp-literal")
- DECLARE_HYDROGEN_ACCESSOR(RegExpLiteral)
-};
-
-
-class LFunctionLiteral: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(FunctionLiteral, "function-literal")
- DECLARE_HYDROGEN_ACCESSOR(FunctionLiteral)
-
- Handle<SharedFunctionInfo> shared_info() { return hydrogen()->shared_info(); }
-};
-
-
-class LToFastProperties: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LToFastProperties(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ToFastProperties, "to-fast-properties")
- DECLARE_HYDROGEN_ACCESSOR(ToFastProperties)
-};
-
-
-class LTypeof: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LTypeof(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Typeof, "typeof")
-};
-
-
-class LTypeofIsAndBranch: public LControlInstruction<1, 0> {
- public:
- explicit LTypeofIsAndBranch(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch, "typeof-is-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(TypeofIsAndBranch)
-
- Handle<String> type_literal() { return hydrogen()->type_literal(); }
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LIsConstructCallAndBranch: public LControlInstruction<0, 1> {
- public:
- explicit LIsConstructCallAndBranch(LOperand* temp) {
- temps_[0] = temp;
- }
-
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(IsConstructCallAndBranch,
- "is-construct-call-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(IsConstructCallAndBranch)
-};
-
-
-class LDeleteProperty: public LTemplateInstruction<1, 2, 0> {
- public:
- LDeleteProperty(LOperand* obj, LOperand* key) {
- inputs_[0] = obj;
- inputs_[1] = key;
- }
-
- LOperand* object() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(DeleteProperty, "delete-property")
-};
-
-
-class LOsrEntry: public LTemplateInstruction<0, 0, 0> {
- public:
- LOsrEntry();
-
- DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry")
-
- LOperand** SpilledRegisterArray() { return register_spills_; }
- LOperand** SpilledDoubleRegisterArray() { return double_register_spills_; }
-
- void MarkSpilledRegister(int allocation_index, LOperand* spill_operand);
- void MarkSpilledDoubleRegister(int allocation_index,
- LOperand* spill_operand);
-
- private:
- // Arrays of spill slot operands for registers with an assigned spill
- // slot, i.e., that must also be restored to the spill slot on OSR entry.
- // NULL if the register has no assigned spill slot. Indexed by allocation
- // index.
- LOperand* register_spills_[Register::kMaxNumAllocatableRegisters];
- LOperand* double_register_spills_[
- DoubleRegister::kMaxNumAllocatableRegisters];
-};
-
-
-class LStackCheck: public LTemplateInstruction<0, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(StackCheck, "stack-check")
- DECLARE_HYDROGEN_ACCESSOR(StackCheck)
-
- Label* done_label() { return &done_label_; }
-
- private:
- Label done_label_;
-};
-
-
-class LForInPrepareMap: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LForInPrepareMap(LOperand* object) {
- inputs_[0] = object;
- }
-
- LOperand* object() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ForInPrepareMap, "for-in-prepare-map")
-};
-
-
-class LForInCacheArray: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LForInCacheArray(LOperand* map) {
- inputs_[0] = map;
- }
-
- LOperand* map() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ForInCacheArray, "for-in-cache-array")
-
- int idx() {
- return HForInCacheArray::cast(this->hydrogen_value())->idx();
- }
-};
-
-
-class LCheckMapValue: public LTemplateInstruction<0, 2, 0> {
- public:
- LCheckMapValue(LOperand* value, LOperand* map) {
- inputs_[0] = value;
- inputs_[1] = map;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* map() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CheckMapValue, "check-map-value")
-};
-
-
-class LLoadFieldByIndex: public LTemplateInstruction<1, 2, 0> {
- public:
- LLoadFieldByIndex(LOperand* object, LOperand* index) {
- inputs_[0] = object;
- inputs_[1] = index;
- }
-
- LOperand* object() { return inputs_[0]; }
- LOperand* index() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadFieldByIndex, "load-field-by-index")
-};
-
-
-class LChunkBuilder;
-class LPlatformChunk: public LChunk {
- public:
- LPlatformChunk(CompilationInfo* info, HGraph* graph)
- : LChunk(info, graph) { }
-
- int GetNextSpillIndex(bool is_double);
- LOperand* GetNextSpillSlot(bool is_double);
-};
-
-
-class LChunkBuilder BASE_EMBEDDED {
- public:
- LChunkBuilder(CompilationInfo* info, HGraph* graph, LAllocator* allocator)
- : chunk_(NULL),
- info_(info),
- graph_(graph),
- zone_(graph->zone()),
- status_(UNUSED),
- current_instruction_(NULL),
- current_block_(NULL),
- next_block_(NULL),
- argument_count_(0),
- allocator_(allocator),
- position_(RelocInfo::kNoPosition),
- instruction_pending_deoptimization_environment_(NULL),
- pending_deoptimization_ast_id_(BailoutId::None()) { }
-
- // Build the sequence for the graph.
- LPlatformChunk* Build();
-
- // Declare methods that deal with the individual node types.
-#define DECLARE_DO(type) LInstruction* Do##type(H##type* node);
- HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
-#undef DECLARE_DO
-
- static HValue* SimplifiedDividendForMathFloorOfDiv(HValue* val);
- static HValue* SimplifiedDivisorForMathFloorOfDiv(HValue* val);
-
- private:
- enum Status {
- UNUSED,
- BUILDING,
- DONE,
- ABORTED
- };
-
- LPlatformChunk* chunk() const { return chunk_; }
- CompilationInfo* info() const { return info_; }
- HGraph* graph() const { return graph_; }
- Zone* zone() const { return zone_; }
-
- bool is_unused() const { return status_ == UNUSED; }
- bool is_building() const { return status_ == BUILDING; }
- bool is_done() const { return status_ == DONE; }
- bool is_aborted() const { return status_ == ABORTED; }
-
- void Abort(const char* reason);
-
- // Methods for getting operands for Use / Define / Temp.
- LUnallocated* ToUnallocated(Register reg);
- LUnallocated* ToUnallocated(XMMRegister reg);
-
- // Methods for setting up define-use relationships.
- MUST_USE_RESULT LOperand* Use(HValue* value, LUnallocated* operand);
- MUST_USE_RESULT LOperand* UseFixed(HValue* value, Register fixed_register);
- MUST_USE_RESULT LOperand* UseFixedDouble(HValue* value,
- XMMRegister fixed_register);
-
- // A value that is guaranteed to be allocated to a register.
- // Operand created by UseRegister is guaranteed to be live until the end of
- // instruction. This means that register allocator will not reuse it's
- // register for any other operand inside instruction.
- // Operand created by UseRegisterAtStart is guaranteed to be live only at
- // instruction start. Register allocator is free to assign the same register
- // to some other operand used inside instruction (i.e. temporary or
- // output).
- MUST_USE_RESULT LOperand* UseRegister(HValue* value);
- MUST_USE_RESULT LOperand* UseRegisterAtStart(HValue* value);
-
- // An input operand in a register that may be trashed.
- MUST_USE_RESULT LOperand* UseTempRegister(HValue* value);
-
- // An input operand in a register or stack slot.
- MUST_USE_RESULT LOperand* Use(HValue* value);
- MUST_USE_RESULT LOperand* UseAtStart(HValue* value);
-
- // An input operand in a register, stack slot or a constant operand.
- MUST_USE_RESULT LOperand* UseOrConstant(HValue* value);
- MUST_USE_RESULT LOperand* UseOrConstantAtStart(HValue* value);
-
- // An input operand in a register or a constant operand.
- MUST_USE_RESULT LOperand* UseRegisterOrConstant(HValue* value);
- MUST_USE_RESULT LOperand* UseRegisterOrConstantAtStart(HValue* value);
-
- // An input operand in register, stack slot or a constant operand.
- // Will not be moved to a register even if one is freely available.
- MUST_USE_RESULT LOperand* UseAny(HValue* value);
-
- // Temporary operand that must be in a register.
- MUST_USE_RESULT LUnallocated* TempRegister();
- MUST_USE_RESULT LOperand* FixedTemp(Register reg);
- MUST_USE_RESULT LOperand* FixedTemp(XMMRegister reg);
-
- // Methods for setting up define-use relationships.
- // Return the same instruction that they are passed.
- template<int I, int T>
- LInstruction* Define(LTemplateInstruction<1, I, T>* instr,
- LUnallocated* result);
- template<int I, int T>
- LInstruction* DefineAsRegister(LTemplateInstruction<1, I, T>* instr);
- template<int I, int T>
- LInstruction* DefineAsSpilled(LTemplateInstruction<1, I, T>* instr,
- int index);
- template<int I, int T>
- LInstruction* DefineSameAsFirst(LTemplateInstruction<1, I, T>* instr);
- template<int I, int T>
- LInstruction* DefineFixed(LTemplateInstruction<1, I, T>* instr,
- Register reg);
- template<int I, int T>
- LInstruction* DefineFixedDouble(LTemplateInstruction<1, I, T>* instr,
- XMMRegister reg);
- // Assigns an environment to an instruction. An instruction which can
- // deoptimize must have an environment.
- LInstruction* AssignEnvironment(LInstruction* instr);
- // Assigns a pointer map to an instruction. An instruction which can
- // trigger a GC or a lazy deoptimization must have a pointer map.
- LInstruction* AssignPointerMap(LInstruction* instr);
-
- enum CanDeoptimize { CAN_DEOPTIMIZE_EAGERLY, CANNOT_DEOPTIMIZE_EAGERLY };
-
- // Marks a call for the register allocator. Assigns a pointer map to
- // support GC and lazy deoptimization. Assigns an environment to support
- // eager deoptimization if CAN_DEOPTIMIZE_EAGERLY.
- LInstruction* MarkAsCall(
- LInstruction* instr,
- HInstruction* hinstr,
- CanDeoptimize can_deoptimize = CANNOT_DEOPTIMIZE_EAGERLY);
-
- LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env,
- int* argument_index_accumulator);
-
- void VisitInstruction(HInstruction* current);
-
- void DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block);
- LInstruction* DoShift(Token::Value op, HBitwiseBinaryOperation* instr);
- LInstruction* DoArithmeticD(Token::Value op,
- HArithmeticBinaryOperation* instr);
- LInstruction* DoArithmeticT(Token::Value op,
- HArithmeticBinaryOperation* instr);
-
- LPlatformChunk* chunk_;
- CompilationInfo* info_;
- HGraph* const graph_;
- Zone* zone_;
- Status status_;
- HInstruction* current_instruction_;
- HBasicBlock* current_block_;
- HBasicBlock* next_block_;
- int argument_count_;
- LAllocator* allocator_;
- int position_;
- LInstruction* instruction_pending_deoptimization_environment_;
- BailoutId pending_deoptimization_ast_id_;
-
- DISALLOW_COPY_AND_ASSIGN(LChunkBuilder);
-};
-
-#undef DECLARE_HYDROGEN_ACCESSOR
-#undef DECLARE_CONCRETE_INSTRUCTION
-
-} } // namespace v8::int
-
-#endif // V8_X64_LITHIUM_X64_H_
diff --git a/src/3rdparty/v8/src/x64/macro-assembler-x64.cc b/src/3rdparty/v8/src/x64/macro-assembler-x64.cc
deleted file mode 100644
index 5f467e3..0000000
--- a/src/3rdparty/v8/src/x64/macro-assembler-x64.cc
+++ /dev/null
@@ -1,4637 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_X64)
-
-#include "bootstrapper.h"
-#include "codegen.h"
-#include "assembler-x64.h"
-#include "macro-assembler-x64.h"
-#include "serialize.h"
-#include "debug.h"
-#include "heap.h"
-
-namespace v8 {
-namespace internal {
-
-MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
- : Assembler(arg_isolate, buffer, size),
- generating_stub_(false),
- allow_stub_calls_(true),
- has_frame_(false),
- root_array_available_(true) {
- if (isolate() != NULL) {
- code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
- isolate());
- }
-}
-
-
-static const int kInvalidRootRegisterDelta = -1;
-
-
-intptr_t MacroAssembler::RootRegisterDelta(ExternalReference other) {
- if (predictable_code_size() &&
- (other.address() < reinterpret_cast<Address>(isolate()) ||
- other.address() >= reinterpret_cast<Address>(isolate() + 1))) {
- return kInvalidRootRegisterDelta;
- }
- Address roots_register_value = kRootRegisterBias +
- reinterpret_cast<Address>(isolate()->heap()->roots_array_start());
- intptr_t delta = other.address() - roots_register_value;
- return delta;
-}
-
-
-Operand MacroAssembler::ExternalOperand(ExternalReference target,
- Register scratch) {
- if (root_array_available_ && !Serializer::enabled()) {
- intptr_t delta = RootRegisterDelta(target);
- if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
- Serializer::TooLateToEnableNow();
- return Operand(kRootRegister, static_cast<int32_t>(delta));
- }
- }
- movq(scratch, target);
- return Operand(scratch, 0);
-}
-
-
-void MacroAssembler::Load(Register destination, ExternalReference source) {
- if (root_array_available_ && !Serializer::enabled()) {
- intptr_t delta = RootRegisterDelta(source);
- if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
- Serializer::TooLateToEnableNow();
- movq(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
- return;
- }
- }
- // Safe code.
- if (destination.is(rax)) {
- load_rax(source);
- } else {
- movq(kScratchRegister, source);
- movq(destination, Operand(kScratchRegister, 0));
- }
-}
-
-
-void MacroAssembler::Store(ExternalReference destination, Register source) {
- if (root_array_available_ && !Serializer::enabled()) {
- intptr_t delta = RootRegisterDelta(destination);
- if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
- Serializer::TooLateToEnableNow();
- movq(Operand(kRootRegister, static_cast<int32_t>(delta)), source);
- return;
- }
- }
- // Safe code.
- if (source.is(rax)) {
- store_rax(destination);
- } else {
- movq(kScratchRegister, destination);
- movq(Operand(kScratchRegister, 0), source);
- }
-}
-
-
-void MacroAssembler::LoadAddress(Register destination,
- ExternalReference source) {
- if (root_array_available_ && !Serializer::enabled()) {
- intptr_t delta = RootRegisterDelta(source);
- if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
- Serializer::TooLateToEnableNow();
- lea(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
- return;
- }
- }
- // Safe code.
- movq(destination, source);
-}
-
-
-int MacroAssembler::LoadAddressSize(ExternalReference source) {
- if (root_array_available_ && !Serializer::enabled()) {
- // This calculation depends on the internals of LoadAddress.
- // It's correctness is ensured by the asserts in the Call
- // instruction below.
- intptr_t delta = RootRegisterDelta(source);
- if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
- Serializer::TooLateToEnableNow();
- // Operand is lea(scratch, Operand(kRootRegister, delta));
- // Opcodes : REX.W 8D ModRM Disp8/Disp32 - 4 or 7.
- int size = 4;
- if (!is_int8(static_cast<int32_t>(delta))) {
- size += 3; // Need full four-byte displacement in lea.
- }
- return size;
- }
- }
- // Size of movq(destination, src);
- return 10;
-}
-
-
-void MacroAssembler::PushAddress(ExternalReference source) {
- int64_t address = reinterpret_cast<int64_t>(source.address());
- if (is_int32(address) && !Serializer::enabled()) {
- if (emit_debug_code()) {
- movq(kScratchRegister, BitCast<int64_t>(kZapValue), RelocInfo::NONE64);
- }
- push(Immediate(static_cast<int32_t>(address)));
- return;
- }
- LoadAddress(kScratchRegister, source);
- push(kScratchRegister);
-}
-
-
-void MacroAssembler::LoadRoot(Register destination, Heap::RootListIndex index) {
- ASSERT(root_array_available_);
- movq(destination, Operand(kRootRegister,
- (index << kPointerSizeLog2) - kRootRegisterBias));
-}
-
-
-void MacroAssembler::LoadRootIndexed(Register destination,
- Register variable_offset,
- int fixed_offset) {
- ASSERT(root_array_available_);
- movq(destination,
- Operand(kRootRegister,
- variable_offset, times_pointer_size,
- (fixed_offset << kPointerSizeLog2) - kRootRegisterBias));
-}
-
-
-void MacroAssembler::StoreRoot(Register source, Heap::RootListIndex index) {
- ASSERT(root_array_available_);
- movq(Operand(kRootRegister, (index << kPointerSizeLog2) - kRootRegisterBias),
- source);
-}
-
-
-void MacroAssembler::PushRoot(Heap::RootListIndex index) {
- ASSERT(root_array_available_);
- push(Operand(kRootRegister, (index << kPointerSizeLog2) - kRootRegisterBias));
-}
-
-
-void MacroAssembler::CompareRoot(Register with, Heap::RootListIndex index) {
- ASSERT(root_array_available_);
- cmpq(with, Operand(kRootRegister,
- (index << kPointerSizeLog2) - kRootRegisterBias));
-}
-
-
-void MacroAssembler::CompareRoot(const Operand& with,
- Heap::RootListIndex index) {
- ASSERT(root_array_available_);
- ASSERT(!with.AddressUsesRegister(kScratchRegister));
- LoadRoot(kScratchRegister, index);
- cmpq(with, kScratchRegister);
-}
-
-
-void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
- Register addr,
- Register scratch,
- SaveFPRegsMode save_fp,
- RememberedSetFinalAction and_then) {
- if (emit_debug_code()) {
- Label ok;
- JumpIfNotInNewSpace(object, scratch, &ok, Label::kNear);
- int3();
- bind(&ok);
- }
- // Load store buffer top.
- LoadRoot(scratch, Heap::kStoreBufferTopRootIndex);
- // Store pointer to buffer.
- movq(Operand(scratch, 0), addr);
- // Increment buffer top.
- addq(scratch, Immediate(kPointerSize));
- // Write back new top of buffer.
- StoreRoot(scratch, Heap::kStoreBufferTopRootIndex);
- // Call stub on end of buffer.
- Label done;
- // Check for end of buffer.
- testq(scratch, Immediate(StoreBuffer::kStoreBufferOverflowBit));
- if (and_then == kReturnAtEnd) {
- Label buffer_overflowed;
- j(not_equal, &buffer_overflowed, Label::kNear);
- ret(0);
- bind(&buffer_overflowed);
- } else {
- ASSERT(and_then == kFallThroughAtEnd);
- j(equal, &done, Label::kNear);
- }
- StoreBufferOverflowStub store_buffer_overflow =
- StoreBufferOverflowStub(save_fp);
- CallStub(&store_buffer_overflow);
- if (and_then == kReturnAtEnd) {
- ret(0);
- } else {
- ASSERT(and_then == kFallThroughAtEnd);
- bind(&done);
- }
-}
-
-
-void MacroAssembler::InNewSpace(Register object,
- Register scratch,
- Condition cc,
- Label* branch,
- Label::Distance distance) {
- if (Serializer::enabled()) {
- // Can't do arithmetic on external references if it might get serialized.
- // The mask isn't really an address. We load it as an external reference in
- // case the size of the new space is different between the snapshot maker
- // and the running system.
- if (scratch.is(object)) {
- movq(kScratchRegister, ExternalReference::new_space_mask(isolate()));
- and_(scratch, kScratchRegister);
- } else {
- movq(scratch, ExternalReference::new_space_mask(isolate()));
- and_(scratch, object);
- }
- movq(kScratchRegister, ExternalReference::new_space_start(isolate()));
- cmpq(scratch, kScratchRegister);
- j(cc, branch, distance);
- } else {
- ASSERT(is_int32(static_cast<int64_t>(HEAP->NewSpaceMask())));
- intptr_t new_space_start =
- reinterpret_cast<intptr_t>(HEAP->NewSpaceStart());
- movq(kScratchRegister, -new_space_start, RelocInfo::NONE64);
- if (scratch.is(object)) {
- addq(scratch, kScratchRegister);
- } else {
- lea(scratch, Operand(object, kScratchRegister, times_1, 0));
- }
- and_(scratch, Immediate(static_cast<int32_t>(HEAP->NewSpaceMask())));
- j(cc, branch, distance);
- }
-}
-
-
-void MacroAssembler::RecordWriteField(
- Register object,
- int offset,
- Register value,
- Register dst,
- SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action,
- SmiCheck smi_check) {
- // The compiled code assumes that record write doesn't change the
- // context register, so we check that none of the clobbered
- // registers are rsi.
- ASSERT(!value.is(rsi) && !dst.is(rsi));
-
- // First, check if a write barrier is even needed. The tests below
- // catch stores of Smis.
- Label done;
-
- // Skip barrier if writing a smi.
- if (smi_check == INLINE_SMI_CHECK) {
- JumpIfSmi(value, &done);
- }
-
- // Although the object register is tagged, the offset is relative to the start
- // of the object, so so offset must be a multiple of kPointerSize.
- ASSERT(IsAligned(offset, kPointerSize));
-
- lea(dst, FieldOperand(object, offset));
- if (emit_debug_code()) {
- Label ok;
- testb(dst, Immediate((1 << kPointerSizeLog2) - 1));
- j(zero, &ok, Label::kNear);
- int3();
- bind(&ok);
- }
-
- RecordWrite(
- object, dst, value, save_fp, remembered_set_action, OMIT_SMI_CHECK);
-
- bind(&done);
-
- // Clobber clobbered input registers when running with the debug-code flag
- // turned on to provoke errors.
- if (emit_debug_code()) {
- movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE64);
- movq(dst, BitCast<int64_t>(kZapValue), RelocInfo::NONE64);
- }
-}
-
-
-void MacroAssembler::RecordWriteArray(Register object,
- Register value,
- Register index,
- SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action,
- SmiCheck smi_check) {
- // First, check if a write barrier is even needed. The tests below
- // catch stores of Smis.
- Label done;
-
- // Skip barrier if writing a smi.
- if (smi_check == INLINE_SMI_CHECK) {
- JumpIfSmi(value, &done);
- }
-
- // Array access: calculate the destination address. Index is not a smi.
- Register dst = index;
- lea(dst, Operand(object, index, times_pointer_size,
- FixedArray::kHeaderSize - kHeapObjectTag));
-
- RecordWrite(
- object, dst, value, save_fp, remembered_set_action, OMIT_SMI_CHECK);
-
- bind(&done);
-
- // Clobber clobbered input registers when running with the debug-code flag
- // turned on to provoke errors.
- if (emit_debug_code()) {
- movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE64);
- movq(index, BitCast<int64_t>(kZapValue), RelocInfo::NONE64);
- }
-}
-
-
-void MacroAssembler::RecordWrite(Register object,
- Register address,
- Register value,
- SaveFPRegsMode fp_mode,
- RememberedSetAction remembered_set_action,
- SmiCheck smi_check) {
- // The compiled code assumes that record write doesn't change the
- // context register, so we check that none of the clobbered
- // registers are rsi.
- ASSERT(!value.is(rsi) && !address.is(rsi));
-
- ASSERT(!object.is(value));
- ASSERT(!object.is(address));
- ASSERT(!value.is(address));
- AssertNotSmi(object);
-
- if (remembered_set_action == OMIT_REMEMBERED_SET &&
- !FLAG_incremental_marking) {
- return;
- }
-
- if (emit_debug_code()) {
- Label ok;
- cmpq(value, Operand(address, 0));
- j(equal, &ok, Label::kNear);
- int3();
- bind(&ok);
- }
-
- // First, check if a write barrier is even needed. The tests below
- // catch stores of smis and stores into the young generation.
- Label done;
-
- if (smi_check == INLINE_SMI_CHECK) {
- // Skip barrier if writing a smi.
- JumpIfSmi(value, &done);
- }
-
- CheckPageFlag(value,
- value, // Used as scratch.
- MemoryChunk::kPointersToHereAreInterestingMask,
- zero,
- &done,
- Label::kNear);
-
- CheckPageFlag(object,
- value, // Used as scratch.
- MemoryChunk::kPointersFromHereAreInterestingMask,
- zero,
- &done,
- Label::kNear);
-
- RecordWriteStub stub(object, value, address, remembered_set_action, fp_mode);
- CallStub(&stub);
-
- bind(&done);
-
- // Clobber clobbered registers when running with the debug-code flag
- // turned on to provoke errors.
- if (emit_debug_code()) {
- movq(address, BitCast<int64_t>(kZapValue), RelocInfo::NONE64);
- movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE64);
- }
-}
-
-
-void MacroAssembler::Assert(Condition cc, const char* msg) {
- if (emit_debug_code()) Check(cc, msg);
-}
-
-
-void MacroAssembler::AssertFastElements(Register elements) {
- if (emit_debug_code()) {
- Label ok;
- CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
- Heap::kFixedArrayMapRootIndex);
- j(equal, &ok, Label::kNear);
- CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
- Heap::kFixedDoubleArrayMapRootIndex);
- j(equal, &ok, Label::kNear);
- CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
- Heap::kFixedCOWArrayMapRootIndex);
- j(equal, &ok, Label::kNear);
- Abort("JSObject with fast elements map has slow elements");
- bind(&ok);
- }
-}
-
-
-void MacroAssembler::Check(Condition cc, const char* msg) {
- Label L;
- j(cc, &L, Label::kNear);
- Abort(msg);
- // Control will not return here.
- bind(&L);
-}
-
-
-void MacroAssembler::CheckStackAlignment() {
- int frame_alignment = OS::ActivationFrameAlignment();
- int frame_alignment_mask = frame_alignment - 1;
- if (frame_alignment > kPointerSize) {
- ASSERT(IsPowerOf2(frame_alignment));
- Label alignment_as_expected;
- testq(rsp, Immediate(frame_alignment_mask));
- j(zero, &alignment_as_expected, Label::kNear);
- // Abort if stack is not aligned.
- int3();
- bind(&alignment_as_expected);
- }
-}
-
-
-void MacroAssembler::NegativeZeroTest(Register result,
- Register op,
- Label* then_label) {
- Label ok;
- testl(result, result);
- j(not_zero, &ok, Label::kNear);
- testl(op, op);
- j(sign, then_label);
- bind(&ok);
-}
-
-
-void MacroAssembler::Abort(const char* msg) {
- // We want to pass the msg string like a smi to avoid GC
- // problems, however msg is not guaranteed to be aligned
- // properly. Instead, we pass an aligned pointer that is
- // a proper v8 smi, but also pass the alignment difference
- // from the real pointer as a smi.
- intptr_t p1 = reinterpret_cast<intptr_t>(msg);
- intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
- // Note: p0 might not be a valid Smi _value_, but it has a valid Smi tag.
- ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
-#ifdef DEBUG
- if (msg != NULL) {
- RecordComment("Abort message: ");
- RecordComment(msg);
- }
-#endif
- push(rax);
- movq(kScratchRegister, p0, RelocInfo::NONE64);
- push(kScratchRegister);
- movq(kScratchRegister,
- reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(p1 - p0))),
- RelocInfo::NONE64);
- push(kScratchRegister);
-
- if (!has_frame_) {
- // We don't actually want to generate a pile of code for this, so just
- // claim there is a stack frame, without generating one.
- FrameScope scope(this, StackFrame::NONE);
- CallRuntime(Runtime::kAbort, 2);
- } else {
- CallRuntime(Runtime::kAbort, 2);
- }
- // Control will not return here.
- int3();
-}
-
-
-void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) {
- ASSERT(AllowThisStubCall(stub)); // Calls are not allowed in some stubs
- Call(stub->GetCode(isolate()), RelocInfo::CODE_TARGET, ast_id);
-}
-
-
-void MacroAssembler::TailCallStub(CodeStub* stub) {
- ASSERT(allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe());
- Jump(stub->GetCode(isolate()), RelocInfo::CODE_TARGET);
-}
-
-
-void MacroAssembler::StubReturn(int argc) {
- ASSERT(argc >= 1 && generating_stub());
- ret((argc - 1) * kPointerSize);
-}
-
-
-bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
- if (!has_frame_ && stub->SometimesSetsUpAFrame()) return false;
- return allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe();
-}
-
-
-void MacroAssembler::IllegalOperation(int num_arguments) {
- if (num_arguments > 0) {
- addq(rsp, Immediate(num_arguments * kPointerSize));
- }
- LoadRoot(rax, Heap::kUndefinedValueRootIndex);
-}
-
-
-void MacroAssembler::IndexFromHash(Register hash, Register index) {
- // The assert checks that the constants for the maximum number of digits
- // for an array index cached in the hash field and the number of bits
- // reserved for it does not conflict.
- ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
- (1 << String::kArrayIndexValueBits));
- // We want the smi-tagged index in key. Even if we subsequently go to
- // the slow case, converting the key to a smi is always valid.
- // key: string key
- // hash: key's hash field, including its array index value.
- and_(hash, Immediate(String::kArrayIndexValueMask));
- shr(hash, Immediate(String::kHashShift));
- // Here we actually clobber the key which will be used if calling into
- // runtime later. However as the new key is the numeric value of a string key
- // there is no difference in using either key.
- Integer32ToSmi(index, hash);
-}
-
-
-void MacroAssembler::CallRuntime(Runtime::FunctionId id, int num_arguments) {
- CallRuntime(Runtime::FunctionForId(id), num_arguments);
-}
-
-
-void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
- const Runtime::Function* function = Runtime::FunctionForId(id);
- Set(rax, function->nargs);
- LoadAddress(rbx, ExternalReference(function, isolate()));
- CEntryStub ces(1, kSaveFPRegs);
- CallStub(&ces);
-}
-
-
-void MacroAssembler::CallRuntime(const Runtime::Function* f,
- int num_arguments) {
- // If the expected number of arguments of the runtime function is
- // constant, we check that the actual number of arguments match the
- // expectation.
- if (f->nargs >= 0 && f->nargs != num_arguments) {
- IllegalOperation(num_arguments);
- return;
- }
-
- // TODO(1236192): Most runtime routines don't need the number of
- // arguments passed in because it is constant. At some point we
- // should remove this need and make the runtime routine entry code
- // smarter.
- Set(rax, num_arguments);
- LoadAddress(rbx, ExternalReference(f, isolate()));
- CEntryStub ces(f->result_size);
- CallStub(&ces);
-}
-
-
-void MacroAssembler::CallExternalReference(const ExternalReference& ext,
- int num_arguments) {
- Set(rax, num_arguments);
- LoadAddress(rbx, ext);
-
- CEntryStub stub(1);
- CallStub(&stub);
-}
-
-
-void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
- int num_arguments,
- int result_size) {
- // ----------- S t a t e -------------
- // -- rsp[0] : return address
- // -- rsp[8] : argument num_arguments - 1
- // ...
- // -- rsp[8 * num_arguments] : argument 0 (receiver)
- // -----------------------------------
-
- // TODO(1236192): Most runtime routines don't need the number of
- // arguments passed in because it is constant. At some point we
- // should remove this need and make the runtime routine entry code
- // smarter.
- Set(rax, num_arguments);
- JumpToExternalReference(ext, result_size);
-}
-
-
-void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
- int num_arguments,
- int result_size) {
- TailCallExternalReference(ExternalReference(fid, isolate()),
- num_arguments,
- result_size);
-}
-
-
-static int Offset(ExternalReference ref0, ExternalReference ref1) {
- int64_t offset = (ref0.address() - ref1.address());
- // Check that fits into int.
- ASSERT(static_cast<int>(offset) == offset);
- return static_cast<int>(offset);
-}
-
-
-void MacroAssembler::PrepareCallApiFunction(int arg_stack_space) {
-#if defined(_WIN64) && !defined(__MINGW64__)
- // We need to prepare a slot for result handle on stack and put
- // a pointer to it into 1st arg register.
- EnterApiExitFrame(arg_stack_space + 1);
-
- // rcx must be used to pass the pointer to the return value slot.
- lea(rcx, StackSpaceOperand(arg_stack_space));
-#else
- EnterApiExitFrame(arg_stack_space);
-#endif
-}
-
-
-void MacroAssembler::CallApiFunctionAndReturn(Address function_address,
- int stack_space) {
- Label empty_result;
- Label prologue;
- Label promote_scheduled_exception;
- Label delete_allocated_handles;
- Label leave_exit_frame;
- Label write_back;
-
- Factory* factory = isolate()->factory();
- ExternalReference next_address =
- ExternalReference::handle_scope_next_address(isolate());
- const int kNextOffset = 0;
- const int kLimitOffset = Offset(
- ExternalReference::handle_scope_limit_address(isolate()),
- next_address);
- const int kLevelOffset = Offset(
- ExternalReference::handle_scope_level_address(isolate()),
- next_address);
- ExternalReference scheduled_exception_address =
- ExternalReference::scheduled_exception_address(isolate());
-
- // Allocate HandleScope in callee-save registers.
- Register prev_next_address_reg = r14;
- Register prev_limit_reg = rbx;
- Register base_reg = r15;
- movq(base_reg, next_address);
- movq(prev_next_address_reg, Operand(base_reg, kNextOffset));
- movq(prev_limit_reg, Operand(base_reg, kLimitOffset));
- addl(Operand(base_reg, kLevelOffset), Immediate(1));
-
- if (FLAG_log_timer_events) {
- FrameScope frame(this, StackFrame::MANUAL);
- PushSafepointRegisters();
- PrepareCallCFunction(0);
- CallCFunction(ExternalReference::log_enter_external_function(isolate()), 0);
- PopSafepointRegisters();
- }
-
- // Call the api function!
- movq(rax, reinterpret_cast<int64_t>(function_address),
- RelocInfo::EXTERNAL_REFERENCE);
- call(rax);
-
- if (FLAG_log_timer_events) {
- FrameScope frame(this, StackFrame::MANUAL);
- PushSafepointRegisters();
- PrepareCallCFunction(0);
- CallCFunction(ExternalReference::log_leave_external_function(isolate()), 0);
- PopSafepointRegisters();
- }
-
-#if defined(_WIN64) && !defined(__MINGW64__)
- // rax keeps a pointer to v8::Handle, unpack it.
- movq(rax, Operand(rax, 0));
-#endif
- // Check if the result handle holds 0.
- testq(rax, rax);
- j(zero, &empty_result);
- // It was non-zero. Dereference to get the result value.
- movq(rax, Operand(rax, 0));
- bind(&prologue);
-
- // No more valid handles (the result handle was the last one). Restore
- // previous handle scope.
- subl(Operand(base_reg, kLevelOffset), Immediate(1));
- movq(Operand(base_reg, kNextOffset), prev_next_address_reg);
- cmpq(prev_limit_reg, Operand(base_reg, kLimitOffset));
- j(not_equal, &delete_allocated_handles);
- bind(&leave_exit_frame);
-
- // Check if the function scheduled an exception.
- movq(rsi, scheduled_exception_address);
- Cmp(Operand(rsi, 0), factory->the_hole_value());
- j(not_equal, &promote_scheduled_exception);
-
-#if ENABLE_EXTRA_CHECKS
- // Check if the function returned a valid JavaScript value.
- Label ok;
- Register return_value = rax;
- Register map = rcx;
-
- JumpIfSmi(return_value, &ok, Label::kNear);
- movq(map, FieldOperand(return_value, HeapObject::kMapOffset));
-
- CmpInstanceType(map, FIRST_NONSTRING_TYPE);
- j(below, &ok, Label::kNear);
-
- CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
- j(above_equal, &ok, Label::kNear);
-
- CompareRoot(map, Heap::kHeapNumberMapRootIndex);
- j(equal, &ok, Label::kNear);
-
- CompareRoot(return_value, Heap::kUndefinedValueRootIndex);
- j(equal, &ok, Label::kNear);
-
- CompareRoot(return_value, Heap::kTrueValueRootIndex);
- j(equal, &ok, Label::kNear);
-
- CompareRoot(return_value, Heap::kFalseValueRootIndex);
- j(equal, &ok, Label::kNear);
-
- CompareRoot(return_value, Heap::kNullValueRootIndex);
- j(equal, &ok, Label::kNear);
-
- Abort("API call returned invalid object");
-
- bind(&ok);
-#endif
-
- LeaveApiExitFrame();
- ret(stack_space * kPointerSize);
-
- bind(&empty_result);
- // It was zero; the result is undefined.
- LoadRoot(rax, Heap::kUndefinedValueRootIndex);
- jmp(&prologue);
-
- bind(&promote_scheduled_exception);
- TailCallRuntime(Runtime::kPromoteScheduledException, 0, 1);
-
- // HandleScope limit has changed. Delete allocated extensions.
- bind(&delete_allocated_handles);
- movq(Operand(base_reg, kLimitOffset), prev_limit_reg);
- movq(prev_limit_reg, rax);
-#ifdef _WIN64
- LoadAddress(rcx, ExternalReference::isolate_address());
-#else
- LoadAddress(rdi, ExternalReference::isolate_address());
-#endif
- LoadAddress(rax,
- ExternalReference::delete_handle_scope_extensions(isolate()));
- call(rax);
- movq(rax, prev_limit_reg);
- jmp(&leave_exit_frame);
-}
-
-
-void MacroAssembler::JumpToExternalReference(const ExternalReference& ext,
- int result_size) {
- // Set the entry point and jump to the C entry runtime stub.
- LoadAddress(rbx, ext);
- CEntryStub ces(result_size);
- jmp(ces.GetCode(isolate()), RelocInfo::CODE_TARGET);
-}
-
-
-void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
- InvokeFlag flag,
- const CallWrapper& call_wrapper) {
- // You can't call a builtin without a valid frame.
- ASSERT(flag == JUMP_FUNCTION || has_frame());
-
- // Rely on the assertion to check that the number of provided
- // arguments match the expected number of arguments. Fake a
- // parameter count to avoid emitting code to do the check.
- ParameterCount expected(0);
- GetBuiltinEntry(rdx, id);
- InvokeCode(rdx, expected, expected, flag, call_wrapper, CALL_AS_METHOD);
-}
-
-
-void MacroAssembler::GetBuiltinFunction(Register target,
- Builtins::JavaScript id) {
- // Load the builtins object into target register.
- movq(target, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- movq(target, FieldOperand(target, GlobalObject::kBuiltinsOffset));
- movq(target, FieldOperand(target,
- JSBuiltinsObject::OffsetOfFunctionWithId(id)));
-}
-
-
-void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
- ASSERT(!target.is(rdi));
- // Load the JavaScript builtin function from the builtins object.
- GetBuiltinFunction(rdi, id);
- movq(target, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
-}
-
-
-#define REG(Name) { kRegister_ ## Name ## _Code }
-
-static const Register saved_regs[] = {
- REG(rax), REG(rcx), REG(rdx), REG(rbx), REG(rbp), REG(rsi), REG(rdi), REG(r8),
- REG(r9), REG(r10), REG(r11)
-};
-
-#undef REG
-
-static const int kNumberOfSavedRegs = sizeof(saved_regs) / sizeof(Register);
-
-
-void MacroAssembler::PushCallerSaved(SaveFPRegsMode fp_mode,
- Register exclusion1,
- Register exclusion2,
- Register exclusion3) {
- // We don't allow a GC during a store buffer overflow so there is no need to
- // store the registers in any particular way, but we do have to store and
- // restore them.
- for (int i = 0; i < kNumberOfSavedRegs; i++) {
- Register reg = saved_regs[i];
- if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3)) {
- push(reg);
- }
- }
- // R12 to r15 are callee save on all platforms.
- if (fp_mode == kSaveFPRegs) {
- CpuFeatures::Scope scope(SSE2);
- subq(rsp, Immediate(kDoubleSize * XMMRegister::kMaxNumRegisters));
- for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
- XMMRegister reg = XMMRegister::from_code(i);
- movsd(Operand(rsp, i * kDoubleSize), reg);
- }
- }
-}
-
-
-void MacroAssembler::PopCallerSaved(SaveFPRegsMode fp_mode,
- Register exclusion1,
- Register exclusion2,
- Register exclusion3) {
- if (fp_mode == kSaveFPRegs) {
- CpuFeatures::Scope scope(SSE2);
- for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
- XMMRegister reg = XMMRegister::from_code(i);
- movsd(reg, Operand(rsp, i * kDoubleSize));
- }
- addq(rsp, Immediate(kDoubleSize * XMMRegister::kMaxNumRegisters));
- }
- for (int i = kNumberOfSavedRegs - 1; i >= 0; i--) {
- Register reg = saved_regs[i];
- if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3)) {
- pop(reg);
- }
- }
-}
-
-
-void MacroAssembler::Set(Register dst, int64_t x) {
- if (x == 0) {
- xorl(dst, dst);
- } else if (is_uint32(x)) {
- movl(dst, Immediate(static_cast<uint32_t>(x)));
- } else if (is_int32(x)) {
- movq(dst, Immediate(static_cast<int32_t>(x)));
- } else {
- movq(dst, x, RelocInfo::NONE64);
- }
-}
-
-void MacroAssembler::Set(const Operand& dst, int64_t x) {
- if (is_int32(x)) {
- movq(dst, Immediate(static_cast<int32_t>(x)));
- } else {
- Set(kScratchRegister, x);
- movq(dst, kScratchRegister);
- }
-}
-
-
-bool MacroAssembler::IsUnsafeInt(const int x) {
- static const int kMaxBits = 17;
- return !is_intn(x, kMaxBits);
-}
-
-
-void MacroAssembler::SafeMove(Register dst, Smi* src) {
- ASSERT(!dst.is(kScratchRegister));
- ASSERT(kSmiValueSize == 32); // JIT cookie can be converted to Smi.
- if (IsUnsafeInt(src->value()) && jit_cookie() != 0) {
- Move(dst, Smi::FromInt(src->value() ^ jit_cookie()));
- Move(kScratchRegister, Smi::FromInt(jit_cookie()));
- xor_(dst, kScratchRegister);
- } else {
- Move(dst, src);
- }
-}
-
-
-void MacroAssembler::SafePush(Smi* src) {
- ASSERT(kSmiValueSize == 32); // JIT cookie can be converted to Smi.
- if (IsUnsafeInt(src->value()) && jit_cookie() != 0) {
- Push(Smi::FromInt(src->value() ^ jit_cookie()));
- Move(kScratchRegister, Smi::FromInt(jit_cookie()));
- xor_(Operand(rsp, 0), kScratchRegister);
- } else {
- Push(src);
- }
-}
-
-
-// ----------------------------------------------------------------------------
-// Smi tagging, untagging and tag detection.
-
-Register MacroAssembler::GetSmiConstant(Smi* source) {
- int value = source->value();
- if (value == 0) {
- xorl(kScratchRegister, kScratchRegister);
- return kScratchRegister;
- }
- if (value == 1) {
- return kSmiConstantRegister;
- }
- LoadSmiConstant(kScratchRegister, source);
- return kScratchRegister;
-}
-
-void MacroAssembler::LoadSmiConstant(Register dst, Smi* source) {
- if (emit_debug_code()) {
- movq(dst,
- reinterpret_cast<uint64_t>(Smi::FromInt(kSmiConstantRegisterValue)),
- RelocInfo::NONE64);
- cmpq(dst, kSmiConstantRegister);
- if (allow_stub_calls()) {
- Assert(equal, "Uninitialized kSmiConstantRegister");
- } else {
- Label ok;
- j(equal, &ok, Label::kNear);
- int3();
- bind(&ok);
- }
- }
- int value = source->value();
- if (value == 0) {
- xorl(dst, dst);
- return;
- }
- bool negative = value < 0;
- unsigned int uvalue = negative ? -value : value;
-
- switch (uvalue) {
- case 9:
- lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_8, 0));
- break;
- case 8:
- xorl(dst, dst);
- lea(dst, Operand(dst, kSmiConstantRegister, times_8, 0));
- break;
- case 4:
- xorl(dst, dst);
- lea(dst, Operand(dst, kSmiConstantRegister, times_4, 0));
- break;
- case 5:
- lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_4, 0));
- break;
- case 3:
- lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_2, 0));
- break;
- case 2:
- lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_1, 0));
- break;
- case 1:
- movq(dst, kSmiConstantRegister);
- break;
- case 0:
- UNREACHABLE();
- return;
- default:
- movq(dst, reinterpret_cast<uint64_t>(source), RelocInfo::NONE64);
- return;
- }
- if (negative) {
- neg(dst);
- }
-}
-
-
-void MacroAssembler::Integer32ToSmi(Register dst, Register src) {
- STATIC_ASSERT(kSmiTag == 0);
- if (!dst.is(src)) {
- movl(dst, src);
- }
- shl(dst, Immediate(kSmiShift));
-}
-
-
-void MacroAssembler::Integer32ToSmiField(const Operand& dst, Register src) {
- if (emit_debug_code()) {
- testb(dst, Immediate(0x01));
- Label ok;
- j(zero, &ok, Label::kNear);
- if (allow_stub_calls()) {
- Abort("Integer32ToSmiField writing to non-smi location");
- } else {
- int3();
- }
- bind(&ok);
- }
- ASSERT(kSmiShift % kBitsPerByte == 0);
- movl(Operand(dst, kSmiShift / kBitsPerByte), src);
-}
-
-
-void MacroAssembler::Integer64PlusConstantToSmi(Register dst,
- Register src,
- int constant) {
- if (dst.is(src)) {
- addl(dst, Immediate(constant));
- } else {
- leal(dst, Operand(src, constant));
- }
- shl(dst, Immediate(kSmiShift));
-}
-
-
-void MacroAssembler::SmiToInteger32(Register dst, Register src) {
- STATIC_ASSERT(kSmiTag == 0);
- if (!dst.is(src)) {
- movq(dst, src);
- }
- shr(dst, Immediate(kSmiShift));
-}
-
-
-void MacroAssembler::SmiToInteger32(Register dst, const Operand& src) {
- movl(dst, Operand(src, kSmiShift / kBitsPerByte));
-}
-
-
-void MacroAssembler::SmiToInteger64(Register dst, Register src) {
- STATIC_ASSERT(kSmiTag == 0);
- if (!dst.is(src)) {
- movq(dst, src);
- }
- sar(dst, Immediate(kSmiShift));
-}
-
-
-void MacroAssembler::SmiToInteger64(Register dst, const Operand& src) {
- movsxlq(dst, Operand(src, kSmiShift / kBitsPerByte));
-}
-
-
-void MacroAssembler::SmiTest(Register src) {
- testq(src, src);
-}
-
-
-void MacroAssembler::SmiCompare(Register smi1, Register smi2) {
- AssertSmi(smi1);
- AssertSmi(smi2);
- cmpq(smi1, smi2);
-}
-
-
-void MacroAssembler::SmiCompare(Register dst, Smi* src) {
- AssertSmi(dst);
- Cmp(dst, src);
-}
-
-
-void MacroAssembler::Cmp(Register dst, Smi* src) {
- ASSERT(!dst.is(kScratchRegister));
- if (src->value() == 0) {
- testq(dst, dst);
- } else {
- Register constant_reg = GetSmiConstant(src);
- cmpq(dst, constant_reg);
- }
-}
-
-
-void MacroAssembler::SmiCompare(Register dst, const Operand& src) {
- AssertSmi(dst);
- AssertSmi(src);
- cmpq(dst, src);
-}
-
-
-void MacroAssembler::SmiCompare(const Operand& dst, Register src) {
- AssertSmi(dst);
- AssertSmi(src);
- cmpq(dst, src);
-}
-
-
-void MacroAssembler::SmiCompare(const Operand& dst, Smi* src) {
- AssertSmi(dst);
- cmpl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(src->value()));
-}
-
-
-void MacroAssembler::Cmp(const Operand& dst, Smi* src) {
- // The Operand cannot use the smi register.
- Register smi_reg = GetSmiConstant(src);
- ASSERT(!dst.AddressUsesRegister(smi_reg));
- cmpq(dst, smi_reg);
-}
-
-
-void MacroAssembler::SmiCompareInteger32(const Operand& dst, Register src) {
- cmpl(Operand(dst, kSmiShift / kBitsPerByte), src);
-}
-
-
-void MacroAssembler::PositiveSmiTimesPowerOfTwoToInteger64(Register dst,
- Register src,
- int power) {
- ASSERT(power >= 0);
- ASSERT(power < 64);
- if (power == 0) {
- SmiToInteger64(dst, src);
- return;
- }
- if (!dst.is(src)) {
- movq(dst, src);
- }
- if (power < kSmiShift) {
- sar(dst, Immediate(kSmiShift - power));
- } else if (power > kSmiShift) {
- shl(dst, Immediate(power - kSmiShift));
- }
-}
-
-
-void MacroAssembler::PositiveSmiDivPowerOfTwoToInteger32(Register dst,
- Register src,
- int power) {
- ASSERT((0 <= power) && (power < 32));
- if (dst.is(src)) {
- shr(dst, Immediate(power + kSmiShift));
- } else {
- UNIMPLEMENTED(); // Not used.
- }
-}
-
-
-void MacroAssembler::SmiOrIfSmis(Register dst, Register src1, Register src2,
- Label* on_not_smis,
- Label::Distance near_jump) {
- if (dst.is(src1) || dst.is(src2)) {
- ASSERT(!src1.is(kScratchRegister));
- ASSERT(!src2.is(kScratchRegister));
- movq(kScratchRegister, src1);
- or_(kScratchRegister, src2);
- JumpIfNotSmi(kScratchRegister, on_not_smis, near_jump);
- movq(dst, kScratchRegister);
- } else {
- movq(dst, src1);
- or_(dst, src2);
- JumpIfNotSmi(dst, on_not_smis, near_jump);
- }
-}
-
-
-Condition MacroAssembler::CheckSmi(Register src) {
- STATIC_ASSERT(kSmiTag == 0);
- testb(src, Immediate(kSmiTagMask));
- return zero;
-}
-
-
-Condition MacroAssembler::CheckSmi(const Operand& src) {
- STATIC_ASSERT(kSmiTag == 0);
- testb(src, Immediate(kSmiTagMask));
- return zero;
-}
-
-
-Condition MacroAssembler::CheckNonNegativeSmi(Register src) {
- STATIC_ASSERT(kSmiTag == 0);
- // Test that both bits of the mask 0x8000000000000001 are zero.
- movq(kScratchRegister, src);
- rol(kScratchRegister, Immediate(1));
- testb(kScratchRegister, Immediate(3));
- return zero;
-}
-
-
-Condition MacroAssembler::CheckBothSmi(Register first, Register second) {
- if (first.is(second)) {
- return CheckSmi(first);
- }
- STATIC_ASSERT(kSmiTag == 0 && kHeapObjectTag == 1 && kHeapObjectTagMask == 3);
- leal(kScratchRegister, Operand(first, second, times_1, 0));
- testb(kScratchRegister, Immediate(0x03));
- return zero;
-}
-
-
-Condition MacroAssembler::CheckBothNonNegativeSmi(Register first,
- Register second) {
- if (first.is(second)) {
- return CheckNonNegativeSmi(first);
- }
- movq(kScratchRegister, first);
- or_(kScratchRegister, second);
- rol(kScratchRegister, Immediate(1));
- testl(kScratchRegister, Immediate(3));
- return zero;
-}
-
-
-Condition MacroAssembler::CheckEitherSmi(Register first,
- Register second,
- Register scratch) {
- if (first.is(second)) {
- return CheckSmi(first);
- }
- if (scratch.is(second)) {
- andl(scratch, first);
- } else {
- if (!scratch.is(first)) {
- movl(scratch, first);
- }
- andl(scratch, second);
- }
- testb(scratch, Immediate(kSmiTagMask));
- return zero;
-}
-
-
-Condition MacroAssembler::CheckIsMinSmi(Register src) {
- ASSERT(!src.is(kScratchRegister));
- // If we overflow by subtracting one, it's the minimal smi value.
- cmpq(src, kSmiConstantRegister);
- return overflow;
-}
-
-
-Condition MacroAssembler::CheckInteger32ValidSmiValue(Register src) {
- // A 32-bit integer value can always be converted to a smi.
- return always;
-}
-
-
-Condition MacroAssembler::CheckUInteger32ValidSmiValue(Register src) {
- // An unsigned 32-bit integer value is valid as long as the high bit
- // is not set.
- testl(src, src);
- return positive;
-}
-
-
-void MacroAssembler::CheckSmiToIndicator(Register dst, Register src) {
- if (dst.is(src)) {
- andl(dst, Immediate(kSmiTagMask));
- } else {
- movl(dst, Immediate(kSmiTagMask));
- andl(dst, src);
- }
-}
-
-
-void MacroAssembler::CheckSmiToIndicator(Register dst, const Operand& src) {
- if (!(src.AddressUsesRegister(dst))) {
- movl(dst, Immediate(kSmiTagMask));
- andl(dst, src);
- } else {
- movl(dst, src);
- andl(dst, Immediate(kSmiTagMask));
- }
-}
-
-
-void MacroAssembler::JumpIfNotValidSmiValue(Register src,
- Label* on_invalid,
- Label::Distance near_jump) {
- Condition is_valid = CheckInteger32ValidSmiValue(src);
- j(NegateCondition(is_valid), on_invalid, near_jump);
-}
-
-
-void MacroAssembler::JumpIfUIntNotValidSmiValue(Register src,
- Label* on_invalid,
- Label::Distance near_jump) {
- Condition is_valid = CheckUInteger32ValidSmiValue(src);
- j(NegateCondition(is_valid), on_invalid, near_jump);
-}
-
-
-void MacroAssembler::JumpIfSmi(Register src,
- Label* on_smi,
- Label::Distance near_jump) {
- Condition smi = CheckSmi(src);
- j(smi, on_smi, near_jump);
-}
-
-
-void MacroAssembler::JumpIfNotSmi(Register src,
- Label* on_not_smi,
- Label::Distance near_jump) {
- Condition smi = CheckSmi(src);
- j(NegateCondition(smi), on_not_smi, near_jump);
-}
-
-
-void MacroAssembler::JumpUnlessNonNegativeSmi(
- Register src, Label* on_not_smi_or_negative,
- Label::Distance near_jump) {
- Condition non_negative_smi = CheckNonNegativeSmi(src);
- j(NegateCondition(non_negative_smi), on_not_smi_or_negative, near_jump);
-}
-
-
-void MacroAssembler::JumpIfSmiEqualsConstant(Register src,
- Smi* constant,
- Label* on_equals,
- Label::Distance near_jump) {
- SmiCompare(src, constant);
- j(equal, on_equals, near_jump);
-}
-
-
-void MacroAssembler::JumpIfNotBothSmi(Register src1,
- Register src2,
- Label* on_not_both_smi,
- Label::Distance near_jump) {
- Condition both_smi = CheckBothSmi(src1, src2);
- j(NegateCondition(both_smi), on_not_both_smi, near_jump);
-}
-
-
-void MacroAssembler::JumpUnlessBothNonNegativeSmi(Register src1,
- Register src2,
- Label* on_not_both_smi,
- Label::Distance near_jump) {
- Condition both_smi = CheckBothNonNegativeSmi(src1, src2);
- j(NegateCondition(both_smi), on_not_both_smi, near_jump);
-}
-
-
-void MacroAssembler::SmiTryAddConstant(Register dst,
- Register src,
- Smi* constant,
- Label* on_not_smi_result,
- Label::Distance near_jump) {
- // Does not assume that src is a smi.
- ASSERT_EQ(static_cast<int>(1), static_cast<int>(kSmiTagMask));
- STATIC_ASSERT(kSmiTag == 0);
- ASSERT(!dst.is(kScratchRegister));
- ASSERT(!src.is(kScratchRegister));
-
- JumpIfNotSmi(src, on_not_smi_result, near_jump);
- Register tmp = (dst.is(src) ? kScratchRegister : dst);
- LoadSmiConstant(tmp, constant);
- addq(tmp, src);
- j(overflow, on_not_smi_result, near_jump);
- if (dst.is(src)) {
- movq(dst, tmp);
- }
-}
-
-
-void MacroAssembler::SmiAddConstant(Register dst, Register src, Smi* constant) {
- if (constant->value() == 0) {
- if (!dst.is(src)) {
- movq(dst, src);
- }
- return;
- } else if (dst.is(src)) {
- ASSERT(!dst.is(kScratchRegister));
- switch (constant->value()) {
- case 1:
- addq(dst, kSmiConstantRegister);
- return;
- case 2:
- lea(dst, Operand(src, kSmiConstantRegister, times_2, 0));
- return;
- case 4:
- lea(dst, Operand(src, kSmiConstantRegister, times_4, 0));
- return;
- case 8:
- lea(dst, Operand(src, kSmiConstantRegister, times_8, 0));
- return;
- default:
- Register constant_reg = GetSmiConstant(constant);
- addq(dst, constant_reg);
- return;
- }
- } else {
- switch (constant->value()) {
- case 1:
- lea(dst, Operand(src, kSmiConstantRegister, times_1, 0));
- return;
- case 2:
- lea(dst, Operand(src, kSmiConstantRegister, times_2, 0));
- return;
- case 4:
- lea(dst, Operand(src, kSmiConstantRegister, times_4, 0));
- return;
- case 8:
- lea(dst, Operand(src, kSmiConstantRegister, times_8, 0));
- return;
- default:
- LoadSmiConstant(dst, constant);
- addq(dst, src);
- return;
- }
- }
-}
-
-
-void MacroAssembler::SmiAddConstant(const Operand& dst, Smi* constant) {
- if (constant->value() != 0) {
- addl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(constant->value()));
- }
-}
-
-
-void MacroAssembler::SmiAddConstant(Register dst,
- Register src,
- Smi* constant,
- Label* on_not_smi_result,
- Label::Distance near_jump) {
- if (constant->value() == 0) {
- if (!dst.is(src)) {
- movq(dst, src);
- }
- } else if (dst.is(src)) {
- ASSERT(!dst.is(kScratchRegister));
-
- LoadSmiConstant(kScratchRegister, constant);
- addq(kScratchRegister, src);
- j(overflow, on_not_smi_result, near_jump);
- movq(dst, kScratchRegister);
- } else {
- LoadSmiConstant(dst, constant);
- addq(dst, src);
- j(overflow, on_not_smi_result, near_jump);
- }
-}
-
-
-void MacroAssembler::SmiSubConstant(Register dst, Register src, Smi* constant) {
- if (constant->value() == 0) {
- if (!dst.is(src)) {
- movq(dst, src);
- }
- } else if (dst.is(src)) {
- ASSERT(!dst.is(kScratchRegister));
- Register constant_reg = GetSmiConstant(constant);
- subq(dst, constant_reg);
- } else {
- if (constant->value() == Smi::kMinValue) {
- LoadSmiConstant(dst, constant);
- // Adding and subtracting the min-value gives the same result, it only
- // differs on the overflow bit, which we don't check here.
- addq(dst, src);
- } else {
- // Subtract by adding the negation.
- LoadSmiConstant(dst, Smi::FromInt(-constant->value()));
- addq(dst, src);
- }
- }
-}
-
-
-void MacroAssembler::SmiSubConstant(Register dst,
- Register src,
- Smi* constant,
- Label* on_not_smi_result,
- Label::Distance near_jump) {
- if (constant->value() == 0) {
- if (!dst.is(src)) {
- movq(dst, src);
- }
- } else if (dst.is(src)) {
- ASSERT(!dst.is(kScratchRegister));
- if (constant->value() == Smi::kMinValue) {
- // Subtracting min-value from any non-negative value will overflow.
- // We test the non-negativeness before doing the subtraction.
- testq(src, src);
- j(not_sign, on_not_smi_result, near_jump);
- LoadSmiConstant(kScratchRegister, constant);
- subq(dst, kScratchRegister);
- } else {
- // Subtract by adding the negation.
- LoadSmiConstant(kScratchRegister, Smi::FromInt(-constant->value()));
- addq(kScratchRegister, dst);
- j(overflow, on_not_smi_result, near_jump);
- movq(dst, kScratchRegister);
- }
- } else {
- if (constant->value() == Smi::kMinValue) {
- // Subtracting min-value from any non-negative value will overflow.
- // We test the non-negativeness before doing the subtraction.
- testq(src, src);
- j(not_sign, on_not_smi_result, near_jump);
- LoadSmiConstant(dst, constant);
- // Adding and subtracting the min-value gives the same result, it only
- // differs on the overflow bit, which we don't check here.
- addq(dst, src);
- } else {
- // Subtract by adding the negation.
- LoadSmiConstant(dst, Smi::FromInt(-(constant->value())));
- addq(dst, src);
- j(overflow, on_not_smi_result, near_jump);
- }
- }
-}
-
-
-void MacroAssembler::SmiNeg(Register dst,
- Register src,
- Label* on_smi_result,
- Label::Distance near_jump) {
- if (dst.is(src)) {
- ASSERT(!dst.is(kScratchRegister));
- movq(kScratchRegister, src);
- neg(dst); // Low 32 bits are retained as zero by negation.
- // Test if result is zero or Smi::kMinValue.
- cmpq(dst, kScratchRegister);
- j(not_equal, on_smi_result, near_jump);
- movq(src, kScratchRegister);
- } else {
- movq(dst, src);
- neg(dst);
- cmpq(dst, src);
- // If the result is zero or Smi::kMinValue, negation failed to create a smi.
- j(not_equal, on_smi_result, near_jump);
- }
-}
-
-
-void MacroAssembler::SmiAdd(Register dst,
- Register src1,
- Register src2,
- Label* on_not_smi_result,
- Label::Distance near_jump) {
- ASSERT_NOT_NULL(on_not_smi_result);
- ASSERT(!dst.is(src2));
- if (dst.is(src1)) {
- movq(kScratchRegister, src1);
- addq(kScratchRegister, src2);
- j(overflow, on_not_smi_result, near_jump);
- movq(dst, kScratchRegister);
- } else {
- movq(dst, src1);
- addq(dst, src2);
- j(overflow, on_not_smi_result, near_jump);
- }
-}
-
-
-void MacroAssembler::SmiAdd(Register dst,
- Register src1,
- const Operand& src2,
- Label* on_not_smi_result,
- Label::Distance near_jump) {
- ASSERT_NOT_NULL(on_not_smi_result);
- if (dst.is(src1)) {
- movq(kScratchRegister, src1);
- addq(kScratchRegister, src2);
- j(overflow, on_not_smi_result, near_jump);
- movq(dst, kScratchRegister);
- } else {
- ASSERT(!src2.AddressUsesRegister(dst));
- movq(dst, src1);
- addq(dst, src2);
- j(overflow, on_not_smi_result, near_jump);
- }
-}
-
-
-void MacroAssembler::SmiAdd(Register dst,
- Register src1,
- Register src2) {
- // No overflow checking. Use only when it's known that
- // overflowing is impossible.
- if (!dst.is(src1)) {
- if (emit_debug_code()) {
- movq(kScratchRegister, src1);
- addq(kScratchRegister, src2);
- Check(no_overflow, "Smi addition overflow");
- }
- lea(dst, Operand(src1, src2, times_1, 0));
- } else {
- addq(dst, src2);
- Assert(no_overflow, "Smi addition overflow");
- }
-}
-
-
-void MacroAssembler::SmiSub(Register dst,
- Register src1,
- Register src2,
- Label* on_not_smi_result,
- Label::Distance near_jump) {
- ASSERT_NOT_NULL(on_not_smi_result);
- ASSERT(!dst.is(src2));
- if (dst.is(src1)) {
- cmpq(dst, src2);
- j(overflow, on_not_smi_result, near_jump);
- subq(dst, src2);
- } else {
- movq(dst, src1);
- subq(dst, src2);
- j(overflow, on_not_smi_result, near_jump);
- }
-}
-
-
-void MacroAssembler::SmiSub(Register dst, Register src1, Register src2) {
- // No overflow checking. Use only when it's known that
- // overflowing is impossible (e.g., subtracting two positive smis).
- ASSERT(!dst.is(src2));
- if (!dst.is(src1)) {
- movq(dst, src1);
- }
- subq(dst, src2);
- Assert(no_overflow, "Smi subtraction overflow");
-}
-
-
-void MacroAssembler::SmiSub(Register dst,
- Register src1,
- const Operand& src2,
- Label* on_not_smi_result,
- Label::Distance near_jump) {
- ASSERT_NOT_NULL(on_not_smi_result);
- if (dst.is(src1)) {
- movq(kScratchRegister, src2);
- cmpq(src1, kScratchRegister);
- j(overflow, on_not_smi_result, near_jump);
- subq(src1, kScratchRegister);
- } else {
- movq(dst, src1);
- subq(dst, src2);
- j(overflow, on_not_smi_result, near_jump);
- }
-}
-
-
-void MacroAssembler::SmiSub(Register dst,
- Register src1,
- const Operand& src2) {
- // No overflow checking. Use only when it's known that
- // overflowing is impossible (e.g., subtracting two positive smis).
- if (!dst.is(src1)) {
- movq(dst, src1);
- }
- subq(dst, src2);
- Assert(no_overflow, "Smi subtraction overflow");
-}
-
-
-void MacroAssembler::SmiMul(Register dst,
- Register src1,
- Register src2,
- Label* on_not_smi_result,
- Label::Distance near_jump) {
- ASSERT(!dst.is(src2));
- ASSERT(!dst.is(kScratchRegister));
- ASSERT(!src1.is(kScratchRegister));
- ASSERT(!src2.is(kScratchRegister));
-
- if (dst.is(src1)) {
- Label failure, zero_correct_result;
- movq(kScratchRegister, src1); // Create backup for later testing.
- SmiToInteger64(dst, src1);
- imul(dst, src2);
- j(overflow, &failure, Label::kNear);
-
- // Check for negative zero result. If product is zero, and one
- // argument is negative, go to slow case.
- Label correct_result;
- testq(dst, dst);
- j(not_zero, &correct_result, Label::kNear);
-
- movq(dst, kScratchRegister);
- xor_(dst, src2);
- // Result was positive zero.
- j(positive, &zero_correct_result, Label::kNear);
-
- bind(&failure); // Reused failure exit, restores src1.
- movq(src1, kScratchRegister);
- jmp(on_not_smi_result, near_jump);
-
- bind(&zero_correct_result);
- Set(dst, 0);
-
- bind(&correct_result);
- } else {
- SmiToInteger64(dst, src1);
- imul(dst, src2);
- j(overflow, on_not_smi_result, near_jump);
- // Check for negative zero result. If product is zero, and one
- // argument is negative, go to slow case.
- Label correct_result;
- testq(dst, dst);
- j(not_zero, &correct_result, Label::kNear);
- // One of src1 and src2 is zero, the check whether the other is
- // negative.
- movq(kScratchRegister, src1);
- xor_(kScratchRegister, src2);
- j(negative, on_not_smi_result, near_jump);
- bind(&correct_result);
- }
-}
-
-
-void MacroAssembler::SmiDiv(Register dst,
- Register src1,
- Register src2,
- Label* on_not_smi_result,
- Label::Distance near_jump) {
- ASSERT(!src1.is(kScratchRegister));
- ASSERT(!src2.is(kScratchRegister));
- ASSERT(!dst.is(kScratchRegister));
- ASSERT(!src2.is(rax));
- ASSERT(!src2.is(rdx));
- ASSERT(!src1.is(rdx));
-
- // Check for 0 divisor (result is +/-Infinity).
- testq(src2, src2);
- j(zero, on_not_smi_result, near_jump);
-
- if (src1.is(rax)) {
- movq(kScratchRegister, src1);
- }
- SmiToInteger32(rax, src1);
- // We need to rule out dividing Smi::kMinValue by -1, since that would
- // overflow in idiv and raise an exception.
- // We combine this with negative zero test (negative zero only happens
- // when dividing zero by a negative number).
-
- // We overshoot a little and go to slow case if we divide min-value
- // by any negative value, not just -1.
- Label safe_div;
- testl(rax, Immediate(0x7fffffff));
- j(not_zero, &safe_div, Label::kNear);
- testq(src2, src2);
- if (src1.is(rax)) {
- j(positive, &safe_div, Label::kNear);
- movq(src1, kScratchRegister);
- jmp(on_not_smi_result, near_jump);
- } else {
- j(negative, on_not_smi_result, near_jump);
- }
- bind(&safe_div);
-
- SmiToInteger32(src2, src2);
- // Sign extend src1 into edx:eax.
- cdq();
- idivl(src2);
- Integer32ToSmi(src2, src2);
- // Check that the remainder is zero.
- testl(rdx, rdx);
- if (src1.is(rax)) {
- Label smi_result;
- j(zero, &smi_result, Label::kNear);
- movq(src1, kScratchRegister);
- jmp(on_not_smi_result, near_jump);
- bind(&smi_result);
- } else {
- j(not_zero, on_not_smi_result, near_jump);
- }
- if (!dst.is(src1) && src1.is(rax)) {
- movq(src1, kScratchRegister);
- }
- Integer32ToSmi(dst, rax);
-}
-
-
-void MacroAssembler::SmiMod(Register dst,
- Register src1,
- Register src2,
- Label* on_not_smi_result,
- Label::Distance near_jump) {
- ASSERT(!dst.is(kScratchRegister));
- ASSERT(!src1.is(kScratchRegister));
- ASSERT(!src2.is(kScratchRegister));
- ASSERT(!src2.is(rax));
- ASSERT(!src2.is(rdx));
- ASSERT(!src1.is(rdx));
- ASSERT(!src1.is(src2));
-
- testq(src2, src2);
- j(zero, on_not_smi_result, near_jump);
-
- if (src1.is(rax)) {
- movq(kScratchRegister, src1);
- }
- SmiToInteger32(rax, src1);
- SmiToInteger32(src2, src2);
-
- // Test for the edge case of dividing Smi::kMinValue by -1 (will overflow).
- Label safe_div;
- cmpl(rax, Immediate(Smi::kMinValue));
- j(not_equal, &safe_div, Label::kNear);
- cmpl(src2, Immediate(-1));
- j(not_equal, &safe_div, Label::kNear);
- // Retag inputs and go slow case.
- Integer32ToSmi(src2, src2);
- if (src1.is(rax)) {
- movq(src1, kScratchRegister);
- }
- jmp(on_not_smi_result, near_jump);
- bind(&safe_div);
-
- // Sign extend eax into edx:eax.
- cdq();
- idivl(src2);
- // Restore smi tags on inputs.
- Integer32ToSmi(src2, src2);
- if (src1.is(rax)) {
- movq(src1, kScratchRegister);
- }
- // Check for a negative zero result. If the result is zero, and the
- // dividend is negative, go slow to return a floating point negative zero.
- Label smi_result;
- testl(rdx, rdx);
- j(not_zero, &smi_result, Label::kNear);
- testq(src1, src1);
- j(negative, on_not_smi_result, near_jump);
- bind(&smi_result);
- Integer32ToSmi(dst, rdx);
-}
-
-
-void MacroAssembler::SmiNot(Register dst, Register src) {
- ASSERT(!dst.is(kScratchRegister));
- ASSERT(!src.is(kScratchRegister));
- // Set tag and padding bits before negating, so that they are zero afterwards.
- movl(kScratchRegister, Immediate(~0));
- if (dst.is(src)) {
- xor_(dst, kScratchRegister);
- } else {
- lea(dst, Operand(src, kScratchRegister, times_1, 0));
- }
- not_(dst);
-}
-
-
-void MacroAssembler::SmiAnd(Register dst, Register src1, Register src2) {
- ASSERT(!dst.is(src2));
- if (!dst.is(src1)) {
- movq(dst, src1);
- }
- and_(dst, src2);
-}
-
-
-void MacroAssembler::SmiAndConstant(Register dst, Register src, Smi* constant) {
- if (constant->value() == 0) {
- Set(dst, 0);
- } else if (dst.is(src)) {
- ASSERT(!dst.is(kScratchRegister));
- Register constant_reg = GetSmiConstant(constant);
- and_(dst, constant_reg);
- } else {
- LoadSmiConstant(dst, constant);
- and_(dst, src);
- }
-}
-
-
-void MacroAssembler::SmiOr(Register dst, Register src1, Register src2) {
- if (!dst.is(src1)) {
- ASSERT(!src1.is(src2));
- movq(dst, src1);
- }
- or_(dst, src2);
-}
-
-
-void MacroAssembler::SmiOrConstant(Register dst, Register src, Smi* constant) {
- if (dst.is(src)) {
- ASSERT(!dst.is(kScratchRegister));
- Register constant_reg = GetSmiConstant(constant);
- or_(dst, constant_reg);
- } else {
- LoadSmiConstant(dst, constant);
- or_(dst, src);
- }
-}
-
-
-void MacroAssembler::SmiXor(Register dst, Register src1, Register src2) {
- if (!dst.is(src1)) {
- ASSERT(!src1.is(src2));
- movq(dst, src1);
- }
- xor_(dst, src2);
-}
-
-
-void MacroAssembler::SmiXorConstant(Register dst, Register src, Smi* constant) {
- if (dst.is(src)) {
- ASSERT(!dst.is(kScratchRegister));
- Register constant_reg = GetSmiConstant(constant);
- xor_(dst, constant_reg);
- } else {
- LoadSmiConstant(dst, constant);
- xor_(dst, src);
- }
-}
-
-
-void MacroAssembler::SmiShiftArithmeticRightConstant(Register dst,
- Register src,
- int shift_value) {
- ASSERT(is_uint5(shift_value));
- if (shift_value > 0) {
- if (dst.is(src)) {
- sar(dst, Immediate(shift_value + kSmiShift));
- shl(dst, Immediate(kSmiShift));
- } else {
- UNIMPLEMENTED(); // Not used.
- }
- }
-}
-
-
-void MacroAssembler::SmiShiftLeftConstant(Register dst,
- Register src,
- int shift_value) {
- if (!dst.is(src)) {
- movq(dst, src);
- }
- if (shift_value > 0) {
- shl(dst, Immediate(shift_value));
- }
-}
-
-
-void MacroAssembler::SmiShiftLogicalRightConstant(
- Register dst, Register src, int shift_value,
- Label* on_not_smi_result, Label::Distance near_jump) {
- // Logic right shift interprets its result as an *unsigned* number.
- if (dst.is(src)) {
- UNIMPLEMENTED(); // Not used.
- } else {
- movq(dst, src);
- if (shift_value == 0) {
- testq(dst, dst);
- j(negative, on_not_smi_result, near_jump);
- }
- shr(dst, Immediate(shift_value + kSmiShift));
- shl(dst, Immediate(kSmiShift));
- }
-}
-
-
-void MacroAssembler::SmiShiftLeft(Register dst,
- Register src1,
- Register src2) {
- ASSERT(!dst.is(rcx));
- // Untag shift amount.
- if (!dst.is(src1)) {
- movq(dst, src1);
- }
- SmiToInteger32(rcx, src2);
- // Shift amount specified by lower 5 bits, not six as the shl opcode.
- and_(rcx, Immediate(0x1f));
- shl_cl(dst);
-}
-
-
-void MacroAssembler::SmiShiftLogicalRight(Register dst,
- Register src1,
- Register src2,
- Label* on_not_smi_result,
- Label::Distance near_jump) {
- ASSERT(!dst.is(kScratchRegister));
- ASSERT(!src1.is(kScratchRegister));
- ASSERT(!src2.is(kScratchRegister));
- ASSERT(!dst.is(rcx));
- // dst and src1 can be the same, because the one case that bails out
- // is a shift by 0, which leaves dst, and therefore src1, unchanged.
- if (src1.is(rcx) || src2.is(rcx)) {
- movq(kScratchRegister, rcx);
- }
- if (!dst.is(src1)) {
- movq(dst, src1);
- }
- SmiToInteger32(rcx, src2);
- orl(rcx, Immediate(kSmiShift));
- shr_cl(dst); // Shift is rcx modulo 0x1f + 32.
- shl(dst, Immediate(kSmiShift));
- testq(dst, dst);
- if (src1.is(rcx) || src2.is(rcx)) {
- Label positive_result;
- j(positive, &positive_result, Label::kNear);
- if (src1.is(rcx)) {
- movq(src1, kScratchRegister);
- } else {
- movq(src2, kScratchRegister);
- }
- jmp(on_not_smi_result, near_jump);
- bind(&positive_result);
- } else {
- // src2 was zero and src1 negative.
- j(negative, on_not_smi_result, near_jump);
- }
-}
-
-
-void MacroAssembler::SmiShiftArithmeticRight(Register dst,
- Register src1,
- Register src2) {
- ASSERT(!dst.is(kScratchRegister));
- ASSERT(!src1.is(kScratchRegister));
- ASSERT(!src2.is(kScratchRegister));
- ASSERT(!dst.is(rcx));
- if (src1.is(rcx)) {
- movq(kScratchRegister, src1);
- } else if (src2.is(rcx)) {
- movq(kScratchRegister, src2);
- }
- if (!dst.is(src1)) {
- movq(dst, src1);
- }
- SmiToInteger32(rcx, src2);
- orl(rcx, Immediate(kSmiShift));
- sar_cl(dst); // Shift 32 + original rcx & 0x1f.
- shl(dst, Immediate(kSmiShift));
- if (src1.is(rcx)) {
- movq(src1, kScratchRegister);
- } else if (src2.is(rcx)) {
- movq(src2, kScratchRegister);
- }
-}
-
-
-void MacroAssembler::SelectNonSmi(Register dst,
- Register src1,
- Register src2,
- Label* on_not_smis,
- Label::Distance near_jump) {
- ASSERT(!dst.is(kScratchRegister));
- ASSERT(!src1.is(kScratchRegister));
- ASSERT(!src2.is(kScratchRegister));
- ASSERT(!dst.is(src1));
- ASSERT(!dst.is(src2));
- // Both operands must not be smis.
-#ifdef DEBUG
- if (allow_stub_calls()) { // Check contains a stub call.
- Condition not_both_smis = NegateCondition(CheckBothSmi(src1, src2));
- Check(not_both_smis, "Both registers were smis in SelectNonSmi.");
- }
-#endif
- STATIC_ASSERT(kSmiTag == 0);
- ASSERT_EQ(0, Smi::FromInt(0));
- movl(kScratchRegister, Immediate(kSmiTagMask));
- and_(kScratchRegister, src1);
- testl(kScratchRegister, src2);
- // If non-zero then both are smis.
- j(not_zero, on_not_smis, near_jump);
-
- // Exactly one operand is a smi.
- ASSERT_EQ(1, static_cast<int>(kSmiTagMask));
- // kScratchRegister still holds src1 & kSmiTag, which is either zero or one.
- subq(kScratchRegister, Immediate(1));
- // If src1 is a smi, then scratch register all 1s, else it is all 0s.
- movq(dst, src1);
- xor_(dst, src2);
- and_(dst, kScratchRegister);
- // If src1 is a smi, dst holds src1 ^ src2, else it is zero.
- xor_(dst, src1);
- // If src1 is a smi, dst is src2, else it is src1, i.e., the non-smi.
-}
-
-
-SmiIndex MacroAssembler::SmiToIndex(Register dst,
- Register src,
- int shift) {
- ASSERT(is_uint6(shift));
- // There is a possible optimization if shift is in the range 60-63, but that
- // will (and must) never happen.
- if (!dst.is(src)) {
- movq(dst, src);
- }
- if (shift < kSmiShift) {
- sar(dst, Immediate(kSmiShift - shift));
- } else {
- shl(dst, Immediate(shift - kSmiShift));
- }
- return SmiIndex(dst, times_1);
-}
-
-SmiIndex MacroAssembler::SmiToNegativeIndex(Register dst,
- Register src,
- int shift) {
- // Register src holds a positive smi.
- ASSERT(is_uint6(shift));
- if (!dst.is(src)) {
- movq(dst, src);
- }
- neg(dst);
- if (shift < kSmiShift) {
- sar(dst, Immediate(kSmiShift - shift));
- } else {
- shl(dst, Immediate(shift - kSmiShift));
- }
- return SmiIndex(dst, times_1);
-}
-
-
-void MacroAssembler::AddSmiField(Register dst, const Operand& src) {
- ASSERT_EQ(0, kSmiShift % kBitsPerByte);
- addl(dst, Operand(src, kSmiShift / kBitsPerByte));
-}
-
-
-void MacroAssembler::JumpIfNotString(Register object,
- Register object_map,
- Label* not_string,
- Label::Distance near_jump) {
- Condition is_smi = CheckSmi(object);
- j(is_smi, not_string, near_jump);
- CmpObjectType(object, FIRST_NONSTRING_TYPE, object_map);
- j(above_equal, not_string, near_jump);
-}
-
-
-void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(
- Register first_object,
- Register second_object,
- Register scratch1,
- Register scratch2,
- Label* on_fail,
- Label::Distance near_jump) {
- // Check that both objects are not smis.
- Condition either_smi = CheckEitherSmi(first_object, second_object);
- j(either_smi, on_fail, near_jump);
-
- // Load instance type for both strings.
- movq(scratch1, FieldOperand(first_object, HeapObject::kMapOffset));
- movq(scratch2, FieldOperand(second_object, HeapObject::kMapOffset));
- movzxbl(scratch1, FieldOperand(scratch1, Map::kInstanceTypeOffset));
- movzxbl(scratch2, FieldOperand(scratch2, Map::kInstanceTypeOffset));
-
- // Check that both are flat ASCII strings.
- ASSERT(kNotStringTag != 0);
- const int kFlatAsciiStringMask =
- kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
- const int kFlatAsciiStringTag = ASCII_STRING_TYPE;
-
- andl(scratch1, Immediate(kFlatAsciiStringMask));
- andl(scratch2, Immediate(kFlatAsciiStringMask));
- // Interleave the bits to check both scratch1 and scratch2 in one test.
- ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
- lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
- cmpl(scratch1,
- Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3)));
- j(not_equal, on_fail, near_jump);
-}
-
-
-void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(
- Register instance_type,
- Register scratch,
- Label* failure,
- Label::Distance near_jump) {
- if (!scratch.is(instance_type)) {
- movl(scratch, instance_type);
- }
-
- const int kFlatAsciiStringMask =
- kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
-
- andl(scratch, Immediate(kFlatAsciiStringMask));
- cmpl(scratch, Immediate(kStringTag | kSeqStringTag | kOneByteStringTag));
- j(not_equal, failure, near_jump);
-}
-
-
-void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
- Register first_object_instance_type,
- Register second_object_instance_type,
- Register scratch1,
- Register scratch2,
- Label* on_fail,
- Label::Distance near_jump) {
- // Load instance type for both strings.
- movq(scratch1, first_object_instance_type);
- movq(scratch2, second_object_instance_type);
-
- // Check that both are flat ASCII strings.
- ASSERT(kNotStringTag != 0);
- const int kFlatAsciiStringMask =
- kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
- const int kFlatAsciiStringTag = ASCII_STRING_TYPE;
-
- andl(scratch1, Immediate(kFlatAsciiStringMask));
- andl(scratch2, Immediate(kFlatAsciiStringMask));
- // Interleave the bits to check both scratch1 and scratch2 in one test.
- ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
- lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
- cmpl(scratch1,
- Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3)));
- j(not_equal, on_fail, near_jump);
-}
-
-
-
-void MacroAssembler::Move(Register dst, Register src) {
- if (!dst.is(src)) {
- movq(dst, src);
- }
-}
-
-
-void MacroAssembler::Move(Register dst, Handle<Object> source) {
- ASSERT(!source->IsFailure());
- if (source->IsSmi()) {
- Move(dst, Smi::cast(*source));
- } else {
- movq(dst, source, RelocInfo::EMBEDDED_OBJECT);
- }
-}
-
-
-void MacroAssembler::Move(const Operand& dst, Handle<Object> source) {
- ASSERT(!source->IsFailure());
- if (source->IsSmi()) {
- Move(dst, Smi::cast(*source));
- } else {
- movq(kScratchRegister, source, RelocInfo::EMBEDDED_OBJECT);
- movq(dst, kScratchRegister);
- }
-}
-
-
-void MacroAssembler::Cmp(Register dst, Handle<Object> source) {
- if (source->IsSmi()) {
- Cmp(dst, Smi::cast(*source));
- } else {
- Move(kScratchRegister, source);
- cmpq(dst, kScratchRegister);
- }
-}
-
-
-void MacroAssembler::Cmp(const Operand& dst, Handle<Object> source) {
- if (source->IsSmi()) {
- Cmp(dst, Smi::cast(*source));
- } else {
- ASSERT(source->IsHeapObject());
- movq(kScratchRegister, source, RelocInfo::EMBEDDED_OBJECT);
- cmpq(dst, kScratchRegister);
- }
-}
-
-
-void MacroAssembler::Push(Handle<Object> source) {
- if (source->IsSmi()) {
- Push(Smi::cast(*source));
- } else {
- ASSERT(source->IsHeapObject());
- movq(kScratchRegister, source, RelocInfo::EMBEDDED_OBJECT);
- push(kScratchRegister);
- }
-}
-
-
-void MacroAssembler::LoadHeapObject(Register result,
- Handle<HeapObject> object) {
- if (isolate()->heap()->InNewSpace(*object)) {
- Handle<JSGlobalPropertyCell> cell =
- isolate()->factory()->NewJSGlobalPropertyCell(object);
- movq(result, cell, RelocInfo::GLOBAL_PROPERTY_CELL);
- movq(result, Operand(result, 0));
- } else {
- Move(result, object);
- }
-}
-
-
-void MacroAssembler::PushHeapObject(Handle<HeapObject> object) {
- if (isolate()->heap()->InNewSpace(*object)) {
- Handle<JSGlobalPropertyCell> cell =
- isolate()->factory()->NewJSGlobalPropertyCell(object);
- movq(kScratchRegister, cell, RelocInfo::GLOBAL_PROPERTY_CELL);
- movq(kScratchRegister, Operand(kScratchRegister, 0));
- push(kScratchRegister);
- } else {
- Push(object);
- }
-}
-
-
-void MacroAssembler::LoadGlobalCell(Register dst,
- Handle<JSGlobalPropertyCell> cell) {
- if (dst.is(rax)) {
- load_rax(cell.location(), RelocInfo::GLOBAL_PROPERTY_CELL);
- } else {
- movq(dst, cell, RelocInfo::GLOBAL_PROPERTY_CELL);
- movq(dst, Operand(dst, 0));
- }
-}
-
-
-void MacroAssembler::Push(Smi* source) {
- intptr_t smi = reinterpret_cast<intptr_t>(source);
- if (is_int32(smi)) {
- push(Immediate(static_cast<int32_t>(smi)));
- } else {
- Register constant = GetSmiConstant(source);
- push(constant);
- }
-}
-
-
-void MacroAssembler::Drop(int stack_elements) {
- if (stack_elements > 0) {
- addq(rsp, Immediate(stack_elements * kPointerSize));
- }
-}
-
-
-void MacroAssembler::Test(const Operand& src, Smi* source) {
- testl(Operand(src, kIntSize), Immediate(source->value()));
-}
-
-
-void MacroAssembler::TestBit(const Operand& src, int bits) {
- int byte_offset = bits / kBitsPerByte;
- int bit_in_byte = bits & (kBitsPerByte - 1);
- testb(Operand(src, byte_offset), Immediate(1 << bit_in_byte));
-}
-
-
-void MacroAssembler::Jump(ExternalReference ext) {
- LoadAddress(kScratchRegister, ext);
- jmp(kScratchRegister);
-}
-
-
-void MacroAssembler::Jump(Address destination, RelocInfo::Mode rmode) {
- movq(kScratchRegister, destination, rmode);
- jmp(kScratchRegister);
-}
-
-
-void MacroAssembler::Jump(Handle<Code> code_object, RelocInfo::Mode rmode) {
- // TODO(X64): Inline this
- jmp(code_object, rmode);
-}
-
-
-int MacroAssembler::CallSize(ExternalReference ext) {
- // Opcode for call kScratchRegister is: Rex.B FF D4 (three bytes).
- const int kCallInstructionSize = 3;
- return LoadAddressSize(ext) + kCallInstructionSize;
-}
-
-
-void MacroAssembler::Call(ExternalReference ext) {
-#ifdef DEBUG
- int end_position = pc_offset() + CallSize(ext);
-#endif
- LoadAddress(kScratchRegister, ext);
- call(kScratchRegister);
-#ifdef DEBUG
- CHECK_EQ(end_position, pc_offset());
-#endif
-}
-
-
-void MacroAssembler::Call(Address destination, RelocInfo::Mode rmode) {
-#ifdef DEBUG
- int end_position = pc_offset() + CallSize(destination, rmode);
-#endif
- movq(kScratchRegister, destination, rmode);
- call(kScratchRegister);
-#ifdef DEBUG
- CHECK_EQ(pc_offset(), end_position);
-#endif
-}
-
-
-void MacroAssembler::Call(Handle<Code> code_object,
- RelocInfo::Mode rmode,
- TypeFeedbackId ast_id) {
-#ifdef DEBUG
- int end_position = pc_offset() + CallSize(code_object);
-#endif
- ASSERT(RelocInfo::IsCodeTarget(rmode));
- call(code_object, rmode, ast_id);
-#ifdef DEBUG
- CHECK_EQ(end_position, pc_offset());
-#endif
-}
-
-
-void MacroAssembler::Pushad() {
- push(rax);
- push(rcx);
- push(rdx);
- push(rbx);
- // Not pushing rsp or rbp.
- push(rsi);
- push(rdi);
- push(r8);
- push(r9);
- // r10 is kScratchRegister.
- push(r11);
- // r12 is kSmiConstantRegister.
- // r13 is kRootRegister.
- push(r14);
- push(r15);
- STATIC_ASSERT(11 == kNumSafepointSavedRegisters);
- // Use lea for symmetry with Popad.
- int sp_delta =
- (kNumSafepointRegisters - kNumSafepointSavedRegisters) * kPointerSize;
- lea(rsp, Operand(rsp, -sp_delta));
-}
-
-
-void MacroAssembler::Popad() {
- // Popad must not change the flags, so use lea instead of addq.
- int sp_delta =
- (kNumSafepointRegisters - kNumSafepointSavedRegisters) * kPointerSize;
- lea(rsp, Operand(rsp, sp_delta));
- pop(r15);
- pop(r14);
- pop(r11);
- pop(r9);
- pop(r8);
- pop(rdi);
- pop(rsi);
- pop(rbx);
- pop(rdx);
- pop(rcx);
- pop(rax);
-}
-
-
-void MacroAssembler::Dropad() {
- addq(rsp, Immediate(kNumSafepointRegisters * kPointerSize));
-}
-
-
-// Order general registers are pushed by Pushad:
-// rax, rcx, rdx, rbx, rsi, rdi, r8, r9, r11, r14, r15.
-const int
-MacroAssembler::kSafepointPushRegisterIndices[Register::kNumRegisters] = {
- 0,
- 1,
- 2,
- 3,
- -1,
- -1,
- 4,
- 5,
- 6,
- 7,
- -1,
- 8,
- -1,
- -1,
- 9,
- 10
-};
-
-
-void MacroAssembler::StoreToSafepointRegisterSlot(Register dst,
- const Immediate& imm) {
- movq(SafepointRegisterSlot(dst), imm);
-}
-
-
-void MacroAssembler::StoreToSafepointRegisterSlot(Register dst, Register src) {
- movq(SafepointRegisterSlot(dst), src);
-}
-
-
-void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
- movq(dst, SafepointRegisterSlot(src));
-}
-
-
-Operand MacroAssembler::SafepointRegisterSlot(Register reg) {
- return Operand(rsp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
-}
-
-
-void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
- int handler_index) {
- // Adjust this code if not the case.
- STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
- STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
-
- // We will build up the handler from the bottom by pushing on the stack.
- // First push the frame pointer and context.
- if (kind == StackHandler::JS_ENTRY) {
- // The frame pointer does not point to a JS frame so we save NULL for
- // rbp. We expect the code throwing an exception to check rbp before
- // dereferencing it to restore the context.
- push(Immediate(0)); // NULL frame pointer.
- Push(Smi::FromInt(0)); // No context.
- } else {
- push(rbp);
- push(rsi);
- }
-
- // Push the state and the code object.
- unsigned state =
- StackHandler::IndexField::encode(handler_index) |
- StackHandler::KindField::encode(kind);
- push(Immediate(state));
- Push(CodeObject());
-
- // Link the current handler as the next handler.
- ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
- push(ExternalOperand(handler_address));
- // Set this new handler as the current one.
- movq(ExternalOperand(handler_address), rsp);
-}
-
-
-void MacroAssembler::PopTryHandler() {
- STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
- ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
- pop(ExternalOperand(handler_address));
- addq(rsp, Immediate(StackHandlerConstants::kSize - kPointerSize));
-}
-
-
-void MacroAssembler::JumpToHandlerEntry() {
- // Compute the handler entry address and jump to it. The handler table is
- // a fixed array of (smi-tagged) code offsets.
- // rax = exception, rdi = code object, rdx = state.
- movq(rbx, FieldOperand(rdi, Code::kHandlerTableOffset));
- shr(rdx, Immediate(StackHandler::kKindWidth));
- movq(rdx, FieldOperand(rbx, rdx, times_8, FixedArray::kHeaderSize));
- SmiToInteger64(rdx, rdx);
- lea(rdi, FieldOperand(rdi, rdx, times_1, Code::kHeaderSize));
- jmp(rdi);
-}
-
-
-void MacroAssembler::Throw(Register value) {
- // Adjust this code if not the case.
- STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
- STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
-
- // The exception is expected in rax.
- if (!value.is(rax)) {
- movq(rax, value);
- }
- // Drop the stack pointer to the top of the top handler.
- ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
- movq(rsp, ExternalOperand(handler_address));
- // Restore the next handler.
- pop(ExternalOperand(handler_address));
-
- // Remove the code object and state, compute the handler address in rdi.
- pop(rdi); // Code object.
- pop(rdx); // Offset and state.
-
- // Restore the context and frame pointer.
- pop(rsi); // Context.
- pop(rbp); // Frame pointer.
-
- // If the handler is a JS frame, restore the context to the frame.
- // (kind == ENTRY) == (rbp == 0) == (rsi == 0), so we could test either
- // rbp or rsi.
- Label skip;
- testq(rsi, rsi);
- j(zero, &skip, Label::kNear);
- movq(Operand(rbp, StandardFrameConstants::kContextOffset), rsi);
- bind(&skip);
-
- JumpToHandlerEntry();
-}
-
-
-void MacroAssembler::ThrowUncatchable(Register value) {
- // Adjust this code if not the case.
- STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
- STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
-
- // The exception is expected in rax.
- if (!value.is(rax)) {
- movq(rax, value);
- }
- // Drop the stack pointer to the top of the top stack handler.
- ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
- Load(rsp, handler_address);
-
- // Unwind the handlers until the top ENTRY handler is found.
- Label fetch_next, check_kind;
- jmp(&check_kind, Label::kNear);
- bind(&fetch_next);
- movq(rsp, Operand(rsp, StackHandlerConstants::kNextOffset));
-
- bind(&check_kind);
- STATIC_ASSERT(StackHandler::JS_ENTRY == 0);
- testl(Operand(rsp, StackHandlerConstants::kStateOffset),
- Immediate(StackHandler::KindField::kMask));
- j(not_zero, &fetch_next);
-
- // Set the top handler address to next handler past the top ENTRY handler.
- pop(ExternalOperand(handler_address));
-
- // Remove the code object and state, compute the handler address in rdi.
- pop(rdi); // Code object.
- pop(rdx); // Offset and state.
-
- // Clear the context pointer and frame pointer (0 was saved in the handler).
- pop(rsi);
- pop(rbp);
-
- JumpToHandlerEntry();
-}
-
-
-void MacroAssembler::Ret() {
- ret(0);
-}
-
-
-void MacroAssembler::Ret(int bytes_dropped, Register scratch) {
- if (is_uint16(bytes_dropped)) {
- ret(bytes_dropped);
- } else {
- pop(scratch);
- addq(rsp, Immediate(bytes_dropped));
- push(scratch);
- ret(0);
- }
-}
-
-
-void MacroAssembler::FCmp() {
- fucomip();
- fstp(0);
-}
-
-
-void MacroAssembler::CmpObjectType(Register heap_object,
- InstanceType type,
- Register map) {
- movq(map, FieldOperand(heap_object, HeapObject::kMapOffset));
- CmpInstanceType(map, type);
-}
-
-
-void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
- cmpb(FieldOperand(map, Map::kInstanceTypeOffset),
- Immediate(static_cast<int8_t>(type)));
-}
-
-
-void MacroAssembler::CheckFastElements(Register map,
- Label* fail,
- Label::Distance distance) {
- STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
- STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
- STATIC_ASSERT(FAST_ELEMENTS == 2);
- STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
- cmpb(FieldOperand(map, Map::kBitField2Offset),
- Immediate(Map::kMaximumBitField2FastHoleyElementValue));
- j(above, fail, distance);
-}
-
-
-void MacroAssembler::CheckFastObjectElements(Register map,
- Label* fail,
- Label::Distance distance) {
- STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
- STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
- STATIC_ASSERT(FAST_ELEMENTS == 2);
- STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
- cmpb(FieldOperand(map, Map::kBitField2Offset),
- Immediate(Map::kMaximumBitField2FastHoleySmiElementValue));
- j(below_equal, fail, distance);
- cmpb(FieldOperand(map, Map::kBitField2Offset),
- Immediate(Map::kMaximumBitField2FastHoleyElementValue));
- j(above, fail, distance);
-}
-
-
-void MacroAssembler::CheckFastSmiElements(Register map,
- Label* fail,
- Label::Distance distance) {
- STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
- STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
- cmpb(FieldOperand(map, Map::kBitField2Offset),
- Immediate(Map::kMaximumBitField2FastHoleySmiElementValue));
- j(above, fail, distance);
-}
-
-
-void MacroAssembler::StoreNumberToDoubleElements(
- Register maybe_number,
- Register elements,
- Register index,
- XMMRegister xmm_scratch,
- Label* fail,
- int elements_offset) {
- Label smi_value, is_nan, maybe_nan, not_nan, have_double_value, done;
-
- JumpIfSmi(maybe_number, &smi_value, Label::kNear);
-
- CheckMap(maybe_number,
- isolate()->factory()->heap_number_map(),
- fail,
- DONT_DO_SMI_CHECK);
-
- // Double value, canonicalize NaN.
- uint32_t offset = HeapNumber::kValueOffset + sizeof(kHoleNanLower32);
- cmpl(FieldOperand(maybe_number, offset),
- Immediate(kNaNOrInfinityLowerBoundUpper32));
- j(greater_equal, &maybe_nan, Label::kNear);
-
- bind(&not_nan);
- movsd(xmm_scratch, FieldOperand(maybe_number, HeapNumber::kValueOffset));
- bind(&have_double_value);
- movsd(FieldOperand(elements, index, times_8,
- FixedDoubleArray::kHeaderSize - elements_offset),
- xmm_scratch);
- jmp(&done);
-
- bind(&maybe_nan);
- // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
- // it's an Infinity, and the non-NaN code path applies.
- j(greater, &is_nan, Label::kNear);
- cmpl(FieldOperand(maybe_number, HeapNumber::kValueOffset), Immediate(0));
- j(zero, &not_nan);
- bind(&is_nan);
- // Convert all NaNs to the same canonical NaN value when they are stored in
- // the double array.
- Set(kScratchRegister, BitCast<uint64_t>(
- FixedDoubleArray::canonical_not_the_hole_nan_as_double()));
- movq(xmm_scratch, kScratchRegister);
- jmp(&have_double_value, Label::kNear);
-
- bind(&smi_value);
- // Value is a smi. convert to a double and store.
- // Preserve original value.
- SmiToInteger32(kScratchRegister, maybe_number);
- cvtlsi2sd(xmm_scratch, kScratchRegister);
- movsd(FieldOperand(elements, index, times_8,
- FixedDoubleArray::kHeaderSize - elements_offset),
- xmm_scratch);
- bind(&done);
-}
-
-
-void MacroAssembler::CompareMap(Register obj,
- Handle<Map> map,
- Label* early_success,
- CompareMapMode mode) {
- Cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
- if (mode == ALLOW_ELEMENT_TRANSITION_MAPS) {
- ElementsKind kind = map->elements_kind();
- if (IsFastElementsKind(kind)) {
- bool packed = IsFastPackedElementsKind(kind);
- Map* current_map = *map;
- while (CanTransitionToMoreGeneralFastElementsKind(kind, packed)) {
- kind = GetNextMoreGeneralFastElementsKind(kind, packed);
- current_map = current_map->LookupElementsTransitionMap(kind);
- if (!current_map) break;
- j(equal, early_success, Label::kNear);
- Cmp(FieldOperand(obj, HeapObject::kMapOffset),
- Handle<Map>(current_map));
- }
- }
- }
-}
-
-
-void MacroAssembler::CheckMap(Register obj,
- Handle<Map> map,
- Label* fail,
- SmiCheckType smi_check_type,
- CompareMapMode mode) {
- if (smi_check_type == DO_SMI_CHECK) {
- JumpIfSmi(obj, fail);
- }
-
- Label success;
- CompareMap(obj, map, &success, mode);
- j(not_equal, fail);
- bind(&success);
-}
-
-
-void MacroAssembler::ClampUint8(Register reg) {
- Label done;
- testl(reg, Immediate(0xFFFFFF00));
- j(zero, &done, Label::kNear);
- setcc(negative, reg); // 1 if negative, 0 if positive.
- decb(reg); // 0 if negative, 255 if positive.
- bind(&done);
-}
-
-
-void MacroAssembler::ClampDoubleToUint8(XMMRegister input_reg,
- XMMRegister temp_xmm_reg,
- Register result_reg) {
- Label done;
- Label conv_failure;
- xorps(temp_xmm_reg, temp_xmm_reg);
- cvtsd2si(result_reg, input_reg);
- testl(result_reg, Immediate(0xFFFFFF00));
- j(zero, &done, Label::kNear);
- cmpl(result_reg, Immediate(0x80000000));
- j(equal, &conv_failure, Label::kNear);
- movl(result_reg, Immediate(0));
- setcc(above, result_reg);
- subl(result_reg, Immediate(1));
- andl(result_reg, Immediate(255));
- jmp(&done, Label::kNear);
- bind(&conv_failure);
- Set(result_reg, 0);
- ucomisd(input_reg, temp_xmm_reg);
- j(below, &done, Label::kNear);
- Set(result_reg, 255);
- bind(&done);
-}
-
-
-void MacroAssembler::LoadUint32(XMMRegister dst,
- Register src,
- XMMRegister scratch) {
- if (FLAG_debug_code) {
- cmpq(src, Immediate(0xffffffff));
- Assert(below_equal, "input GPR is expected to have upper32 cleared");
- }
- cvtqsi2sd(dst, src);
-}
-
-
-void MacroAssembler::LoadInstanceDescriptors(Register map,
- Register descriptors) {
- movq(descriptors, FieldOperand(map, Map::kDescriptorsOffset));
-}
-
-
-void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
- movq(dst, FieldOperand(map, Map::kBitField3Offset));
- DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
-}
-
-
-void MacroAssembler::EnumLength(Register dst, Register map) {
- STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
- movq(dst, FieldOperand(map, Map::kBitField3Offset));
- Move(kScratchRegister, Smi::FromInt(Map::EnumLengthBits::kMask));
- and_(dst, kScratchRegister);
-}
-
-
-void MacroAssembler::DispatchMap(Register obj,
- Handle<Map> map,
- Handle<Code> success,
- SmiCheckType smi_check_type) {
- Label fail;
- if (smi_check_type == DO_SMI_CHECK) {
- JumpIfSmi(obj, &fail);
- }
- Cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
- j(equal, success, RelocInfo::CODE_TARGET);
-
- bind(&fail);
-}
-
-
-void MacroAssembler::AssertNumber(Register object) {
- if (emit_debug_code()) {
- Label ok;
- Condition is_smi = CheckSmi(object);
- j(is_smi, &ok, Label::kNear);
- Cmp(FieldOperand(object, HeapObject::kMapOffset),
- isolate()->factory()->heap_number_map());
- Check(equal, "Operand is not a number");
- bind(&ok);
- }
-}
-
-
-void MacroAssembler::AssertNotSmi(Register object) {
- if (emit_debug_code()) {
- Condition is_smi = CheckSmi(object);
- Check(NegateCondition(is_smi), "Operand is a smi");
- }
-}
-
-
-void MacroAssembler::AssertSmi(Register object) {
- if (emit_debug_code()) {
- Condition is_smi = CheckSmi(object);
- Check(is_smi, "Operand is not a smi");
- }
-}
-
-
-void MacroAssembler::AssertSmi(const Operand& object) {
- if (emit_debug_code()) {
- Condition is_smi = CheckSmi(object);
- Check(is_smi, "Operand is not a smi");
- }
-}
-
-
-void MacroAssembler::AssertZeroExtended(Register int32_register) {
- if (emit_debug_code()) {
- ASSERT(!int32_register.is(kScratchRegister));
- movq(kScratchRegister, 0x100000000l, RelocInfo::NONE64);
- cmpq(kScratchRegister, int32_register);
- Check(above_equal, "32 bit value in register is not zero-extended");
- }
-}
-
-
-void MacroAssembler::AssertString(Register object) {
- if (emit_debug_code()) {
- testb(object, Immediate(kSmiTagMask));
- Check(not_equal, "Operand is a smi and not a string");
- push(object);
- movq(object, FieldOperand(object, HeapObject::kMapOffset));
- CmpInstanceType(object, FIRST_NONSTRING_TYPE);
- pop(object);
- Check(below, "Operand is not a string");
- }
-}
-
-
-void MacroAssembler::AssertRootValue(Register src,
- Heap::RootListIndex root_value_index,
- const char* message) {
- if (emit_debug_code()) {
- ASSERT(!src.is(kScratchRegister));
- LoadRoot(kScratchRegister, root_value_index);
- cmpq(src, kScratchRegister);
- Check(equal, message);
- }
-}
-
-
-
-Condition MacroAssembler::IsObjectStringType(Register heap_object,
- Register map,
- Register instance_type) {
- movq(map, FieldOperand(heap_object, HeapObject::kMapOffset));
- movzxbl(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
- STATIC_ASSERT(kNotStringTag != 0);
- testb(instance_type, Immediate(kIsNotStringMask));
- return zero;
-}
-
-
-void MacroAssembler::TryGetFunctionPrototype(Register function,
- Register result,
- Label* miss,
- bool miss_on_bound_function) {
- // Check that the receiver isn't a smi.
- testl(function, Immediate(kSmiTagMask));
- j(zero, miss);
-
- // Check that the function really is a function.
- CmpObjectType(function, JS_FUNCTION_TYPE, result);
- j(not_equal, miss);
-
- if (miss_on_bound_function) {
- movq(kScratchRegister,
- FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
- // It's not smi-tagged (stored in the top half of a smi-tagged 8-byte
- // field).
- TestBit(FieldOperand(kScratchRegister,
- SharedFunctionInfo::kCompilerHintsOffset),
- SharedFunctionInfo::kBoundFunction);
- j(not_zero, miss);
- }
-
- // Make sure that the function has an instance prototype.
- Label non_instance;
- testb(FieldOperand(result, Map::kBitFieldOffset),
- Immediate(1 << Map::kHasNonInstancePrototype));
- j(not_zero, &non_instance, Label::kNear);
-
- // Get the prototype or initial map from the function.
- movq(result,
- FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
-
- // If the prototype or initial map is the hole, don't return it and
- // simply miss the cache instead. This will allow us to allocate a
- // prototype object on-demand in the runtime system.
- CompareRoot(result, Heap::kTheHoleValueRootIndex);
- j(equal, miss);
-
- // If the function does not have an initial map, we're done.
- Label done;
- CmpObjectType(result, MAP_TYPE, kScratchRegister);
- j(not_equal, &done, Label::kNear);
-
- // Get the prototype from the initial map.
- movq(result, FieldOperand(result, Map::kPrototypeOffset));
- jmp(&done, Label::kNear);
-
- // Non-instance prototype: Fetch prototype from constructor field
- // in initial map.
- bind(&non_instance);
- movq(result, FieldOperand(result, Map::kConstructorOffset));
-
- // All done.
- bind(&done);
-}
-
-
-void MacroAssembler::SetCounter(StatsCounter* counter, int value) {
- if (FLAG_native_code_counters && counter->Enabled()) {
- Operand counter_operand = ExternalOperand(ExternalReference(counter));
- movl(counter_operand, Immediate(value));
- }
-}
-
-
-void MacroAssembler::IncrementCounter(StatsCounter* counter, int value) {
- ASSERT(value > 0);
- if (FLAG_native_code_counters && counter->Enabled()) {
- Operand counter_operand = ExternalOperand(ExternalReference(counter));
- if (value == 1) {
- incl(counter_operand);
- } else {
- addl(counter_operand, Immediate(value));
- }
- }
-}
-
-
-void MacroAssembler::DecrementCounter(StatsCounter* counter, int value) {
- ASSERT(value > 0);
- if (FLAG_native_code_counters && counter->Enabled()) {
- Operand counter_operand = ExternalOperand(ExternalReference(counter));
- if (value == 1) {
- decl(counter_operand);
- } else {
- subl(counter_operand, Immediate(value));
- }
- }
-}
-
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-void MacroAssembler::DebugBreak() {
- Set(rax, 0); // No arguments.
- LoadAddress(rbx, ExternalReference(Runtime::kDebugBreak, isolate()));
- CEntryStub ces(1);
- ASSERT(AllowThisStubCall(&ces));
- Call(ces.GetCode(isolate()), RelocInfo::DEBUG_BREAK);
-}
-#endif // ENABLE_DEBUGGER_SUPPORT
-
-
-void MacroAssembler::SetCallKind(Register dst, CallKind call_kind) {
- // This macro takes the dst register to make the code more readable
- // at the call sites. However, the dst register has to be rcx to
- // follow the calling convention which requires the call type to be
- // in rcx.
- ASSERT(dst.is(rcx));
- if (call_kind == CALL_AS_FUNCTION) {
- LoadSmiConstant(dst, Smi::FromInt(1));
- } else {
- LoadSmiConstant(dst, Smi::FromInt(0));
- }
-}
-
-
-void MacroAssembler::InvokeCode(Register code,
- const ParameterCount& expected,
- const ParameterCount& actual,
- InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind) {
- // You can't call a function without a valid frame.
- ASSERT(flag == JUMP_FUNCTION || has_frame());
-
- Label done;
- bool definitely_mismatches = false;
- InvokePrologue(expected,
- actual,
- Handle<Code>::null(),
- code,
- &done,
- &definitely_mismatches,
- flag,
- Label::kNear,
- call_wrapper,
- call_kind);
- if (!definitely_mismatches) {
- if (flag == CALL_FUNCTION) {
- call_wrapper.BeforeCall(CallSize(code));
- SetCallKind(rcx, call_kind);
- call(code);
- call_wrapper.AfterCall();
- } else {
- ASSERT(flag == JUMP_FUNCTION);
- SetCallKind(rcx, call_kind);
- jmp(code);
- }
- bind(&done);
- }
-}
-
-
-void MacroAssembler::InvokeCode(Handle<Code> code,
- const ParameterCount& expected,
- const ParameterCount& actual,
- RelocInfo::Mode rmode,
- InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind) {
- // You can't call a function without a valid frame.
- ASSERT(flag == JUMP_FUNCTION || has_frame());
-
- Label done;
- bool definitely_mismatches = false;
- Register dummy = rax;
- InvokePrologue(expected,
- actual,
- code,
- dummy,
- &done,
- &definitely_mismatches,
- flag,
- Label::kNear,
- call_wrapper,
- call_kind);
- if (!definitely_mismatches) {
- if (flag == CALL_FUNCTION) {
- call_wrapper.BeforeCall(CallSize(code));
- SetCallKind(rcx, call_kind);
- Call(code, rmode);
- call_wrapper.AfterCall();
- } else {
- ASSERT(flag == JUMP_FUNCTION);
- SetCallKind(rcx, call_kind);
- Jump(code, rmode);
- }
- bind(&done);
- }
-}
-
-
-void MacroAssembler::InvokeFunction(Register function,
- const ParameterCount& actual,
- InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind) {
- // You can't call a function without a valid frame.
- ASSERT(flag == JUMP_FUNCTION || has_frame());
-
- ASSERT(function.is(rdi));
- movq(rdx, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
- movq(rsi, FieldOperand(function, JSFunction::kContextOffset));
- movsxlq(rbx,
- FieldOperand(rdx, SharedFunctionInfo::kFormalParameterCountOffset));
- // Advances rdx to the end of the Code object header, to the start of
- // the executable code.
- movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
-
- ParameterCount expected(rbx);
- InvokeCode(rdx, expected, actual, flag, call_wrapper, call_kind);
-}
-
-
-void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
- const ParameterCount& actual,
- InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind) {
- // You can't call a function without a valid frame.
- ASSERT(flag == JUMP_FUNCTION || has_frame());
-
- // Get the function and setup the context.
- LoadHeapObject(rdi, function);
- movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
-
- // We call indirectly through the code field in the function to
- // allow recompilation to take effect without changing any of the
- // call sites.
- movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
- ParameterCount expected(function->shared()->formal_parameter_count());
- InvokeCode(rdx, expected, actual, flag, call_wrapper, call_kind);
-}
-
-
-void MacroAssembler::InvokePrologue(const ParameterCount& expected,
- const ParameterCount& actual,
- Handle<Code> code_constant,
- Register code_register,
- Label* done,
- bool* definitely_mismatches,
- InvokeFlag flag,
- Label::Distance near_jump,
- const CallWrapper& call_wrapper,
- CallKind call_kind) {
- bool definitely_matches = false;
- *definitely_mismatches = false;
- Label invoke;
- if (expected.is_immediate()) {
- ASSERT(actual.is_immediate());
- if (expected.immediate() == actual.immediate()) {
- definitely_matches = true;
- } else {
- Set(rax, actual.immediate());
- if (expected.immediate() ==
- SharedFunctionInfo::kDontAdaptArgumentsSentinel) {
- // Don't worry about adapting arguments for built-ins that
- // don't want that done. Skip adaption code by making it look
- // like we have a match between expected and actual number of
- // arguments.
- definitely_matches = true;
- } else {
- *definitely_mismatches = true;
- Set(rbx, expected.immediate());
- }
- }
- } else {
- if (actual.is_immediate()) {
- // Expected is in register, actual is immediate. This is the
- // case when we invoke function values without going through the
- // IC mechanism.
- cmpq(expected.reg(), Immediate(actual.immediate()));
- j(equal, &invoke, Label::kNear);
- ASSERT(expected.reg().is(rbx));
- Set(rax, actual.immediate());
- } else if (!expected.reg().is(actual.reg())) {
- // Both expected and actual are in (different) registers. This
- // is the case when we invoke functions using call and apply.
- cmpq(expected.reg(), actual.reg());
- j(equal, &invoke, Label::kNear);
- ASSERT(actual.reg().is(rax));
- ASSERT(expected.reg().is(rbx));
- }
- }
-
- if (!definitely_matches) {
- Handle<Code> adaptor = isolate()->builtins()->ArgumentsAdaptorTrampoline();
- if (!code_constant.is_null()) {
- movq(rdx, code_constant, RelocInfo::EMBEDDED_OBJECT);
- addq(rdx, Immediate(Code::kHeaderSize - kHeapObjectTag));
- } else if (!code_register.is(rdx)) {
- movq(rdx, code_register);
- }
-
- if (flag == CALL_FUNCTION) {
- call_wrapper.BeforeCall(CallSize(adaptor));
- SetCallKind(rcx, call_kind);
- Call(adaptor, RelocInfo::CODE_TARGET);
- call_wrapper.AfterCall();
- if (!*definitely_mismatches) {
- jmp(done, near_jump);
- }
- } else {
- SetCallKind(rcx, call_kind);
- Jump(adaptor, RelocInfo::CODE_TARGET);
- }
- bind(&invoke);
- }
-}
-
-
-void MacroAssembler::EnterFrame(StackFrame::Type type) {
- push(rbp);
- movq(rbp, rsp);
- push(rsi); // Context.
- Push(Smi::FromInt(type));
- movq(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
- push(kScratchRegister);
- if (emit_debug_code()) {
- movq(kScratchRegister,
- isolate()->factory()->undefined_value(),
- RelocInfo::EMBEDDED_OBJECT);
- cmpq(Operand(rsp, 0), kScratchRegister);
- Check(not_equal, "code object not properly patched");
- }
-}
-
-
-void MacroAssembler::LeaveFrame(StackFrame::Type type) {
- if (emit_debug_code()) {
- Move(kScratchRegister, Smi::FromInt(type));
- cmpq(Operand(rbp, StandardFrameConstants::kMarkerOffset), kScratchRegister);
- Check(equal, "stack frame types must match");
- }
- movq(rsp, rbp);
- pop(rbp);
-}
-
-
-void MacroAssembler::EnterExitFramePrologue(bool save_rax) {
- // Set up the frame structure on the stack.
- // All constants are relative to the frame pointer of the exit frame.
- ASSERT(ExitFrameConstants::kCallerSPDisplacement == +2 * kPointerSize);
- ASSERT(ExitFrameConstants::kCallerPCOffset == +1 * kPointerSize);
- ASSERT(ExitFrameConstants::kCallerFPOffset == 0 * kPointerSize);
- push(rbp);
- movq(rbp, rsp);
-
- // Reserve room for entry stack pointer and push the code object.
- ASSERT(ExitFrameConstants::kSPOffset == -1 * kPointerSize);
- push(Immediate(0)); // Saved entry sp, patched before call.
- movq(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
- push(kScratchRegister); // Accessed from EditFrame::code_slot.
-
- // Save the frame pointer and the context in top.
- if (save_rax) {
- movq(r14, rax); // Backup rax in callee-save register.
- }
-
- Store(ExternalReference(Isolate::kCEntryFPAddress, isolate()), rbp);
- Store(ExternalReference(Isolate::kContextAddress, isolate()), rsi);
-}
-
-
-void MacroAssembler::EnterExitFrameEpilogue(int arg_stack_space,
- bool save_doubles) {
-#ifdef _WIN64
- const int kShadowSpace = 4;
- arg_stack_space += kShadowSpace;
-#endif
- // Optionally save all XMM registers.
- if (save_doubles) {
- int space = XMMRegister::kMaxNumRegisters * kDoubleSize +
- arg_stack_space * kPointerSize;
- subq(rsp, Immediate(space));
- int offset = -2 * kPointerSize;
- for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); i++) {
- XMMRegister reg = XMMRegister::FromAllocationIndex(i);
- movsd(Operand(rbp, offset - ((i + 1) * kDoubleSize)), reg);
- }
- } else if (arg_stack_space > 0) {
- subq(rsp, Immediate(arg_stack_space * kPointerSize));
- }
-
- // Get the required frame alignment for the OS.
- const int kFrameAlignment = OS::ActivationFrameAlignment();
- if (kFrameAlignment > 0) {
- ASSERT(IsPowerOf2(kFrameAlignment));
- ASSERT(is_int8(kFrameAlignment));
- and_(rsp, Immediate(-kFrameAlignment));
- }
-
- // Patch the saved entry sp.
- movq(Operand(rbp, ExitFrameConstants::kSPOffset), rsp);
-}
-
-
-void MacroAssembler::EnterExitFrame(int arg_stack_space, bool save_doubles) {
- EnterExitFramePrologue(true);
-
- // Set up argv in callee-saved register r15. It is reused in LeaveExitFrame,
- // so it must be retained across the C-call.
- int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
- lea(r15, Operand(rbp, r14, times_pointer_size, offset));
-
- EnterExitFrameEpilogue(arg_stack_space, save_doubles);
-}
-
-
-void MacroAssembler::EnterApiExitFrame(int arg_stack_space) {
- EnterExitFramePrologue(false);
- EnterExitFrameEpilogue(arg_stack_space, false);
-}
-
-
-void MacroAssembler::LeaveExitFrame(bool save_doubles) {
- // Registers:
- // r15 : argv
- if (save_doubles) {
- int offset = -2 * kPointerSize;
- for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); i++) {
- XMMRegister reg = XMMRegister::FromAllocationIndex(i);
- movsd(reg, Operand(rbp, offset - ((i + 1) * kDoubleSize)));
- }
- }
- // Get the return address from the stack and restore the frame pointer.
- movq(rcx, Operand(rbp, 1 * kPointerSize));
- movq(rbp, Operand(rbp, 0 * kPointerSize));
-
- // Drop everything up to and including the arguments and the receiver
- // from the caller stack.
- lea(rsp, Operand(r15, 1 * kPointerSize));
-
- // Push the return address to get ready to return.
- push(rcx);
-
- LeaveExitFrameEpilogue();
-}
-
-
-void MacroAssembler::LeaveApiExitFrame() {
- movq(rsp, rbp);
- pop(rbp);
-
- LeaveExitFrameEpilogue();
-}
-
-
-void MacroAssembler::LeaveExitFrameEpilogue() {
- // Restore current context from top and clear it in debug mode.
- ExternalReference context_address(Isolate::kContextAddress, isolate());
- Operand context_operand = ExternalOperand(context_address);
- movq(rsi, context_operand);
-#ifdef DEBUG
- movq(context_operand, Immediate(0));
-#endif
-
- // Clear the top frame.
- ExternalReference c_entry_fp_address(Isolate::kCEntryFPAddress,
- isolate());
- Operand c_entry_fp_operand = ExternalOperand(c_entry_fp_address);
- movq(c_entry_fp_operand, Immediate(0));
-}
-
-
-void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
- Register scratch,
- Label* miss) {
- Label same_contexts;
-
- ASSERT(!holder_reg.is(scratch));
- ASSERT(!scratch.is(kScratchRegister));
- // Load current lexical context from the stack frame.
- movq(scratch, Operand(rbp, StandardFrameConstants::kContextOffset));
-
- // When generating debug code, make sure the lexical context is set.
- if (emit_debug_code()) {
- cmpq(scratch, Immediate(0));
- Check(not_equal, "we should not have an empty lexical context");
- }
- // Load the native context of the current context.
- int offset =
- Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
- movq(scratch, FieldOperand(scratch, offset));
- movq(scratch, FieldOperand(scratch, GlobalObject::kNativeContextOffset));
-
- // Check the context is a native context.
- if (emit_debug_code()) {
- Cmp(FieldOperand(scratch, HeapObject::kMapOffset),
- isolate()->factory()->native_context_map());
- Check(equal, "JSGlobalObject::native_context should be a native context.");
- }
-
- // Check if both contexts are the same.
- cmpq(scratch, FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
- j(equal, &same_contexts);
-
- // Compare security tokens.
- // Check that the security token in the calling global object is
- // compatible with the security token in the receiving global
- // object.
-
- // Check the context is a native context.
- if (emit_debug_code()) {
- // Preserve original value of holder_reg.
- push(holder_reg);
- movq(holder_reg,
- FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
- CompareRoot(holder_reg, Heap::kNullValueRootIndex);
- Check(not_equal, "JSGlobalProxy::context() should not be null.");
-
- // Read the first word and compare to native_context_map(),
- movq(holder_reg, FieldOperand(holder_reg, HeapObject::kMapOffset));
- CompareRoot(holder_reg, Heap::kNativeContextMapRootIndex);
- Check(equal, "JSGlobalObject::native_context should be a native context.");
- pop(holder_reg);
- }
-
- movq(kScratchRegister,
- FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
- int token_offset =
- Context::kHeaderSize + Context::SECURITY_TOKEN_INDEX * kPointerSize;
- movq(scratch, FieldOperand(scratch, token_offset));
- cmpq(scratch, FieldOperand(kScratchRegister, token_offset));
- j(not_equal, miss);
-
- bind(&same_contexts);
-}
-
-
-void MacroAssembler::GetNumberHash(Register r0, Register scratch) {
- // First of all we assign the hash seed to scratch.
- LoadRoot(scratch, Heap::kHashSeedRootIndex);
- SmiToInteger32(scratch, scratch);
-
- // Xor original key with a seed.
- xorl(r0, scratch);
-
- // Compute the hash code from the untagged key. This must be kept in sync
- // with ComputeIntegerHash in utils.h.
- //
- // hash = ~hash + (hash << 15);
- movl(scratch, r0);
- notl(r0);
- shll(scratch, Immediate(15));
- addl(r0, scratch);
- // hash = hash ^ (hash >> 12);
- movl(scratch, r0);
- shrl(scratch, Immediate(12));
- xorl(r0, scratch);
- // hash = hash + (hash << 2);
- leal(r0, Operand(r0, r0, times_4, 0));
- // hash = hash ^ (hash >> 4);
- movl(scratch, r0);
- shrl(scratch, Immediate(4));
- xorl(r0, scratch);
- // hash = hash * 2057;
- imull(r0, r0, Immediate(2057));
- // hash = hash ^ (hash >> 16);
- movl(scratch, r0);
- shrl(scratch, Immediate(16));
- xorl(r0, scratch);
-}
-
-
-
-void MacroAssembler::LoadFromNumberDictionary(Label* miss,
- Register elements,
- Register key,
- Register r0,
- Register r1,
- Register r2,
- Register result) {
- // Register use:
- //
- // elements - holds the slow-case elements of the receiver on entry.
- // Unchanged unless 'result' is the same register.
- //
- // key - holds the smi key on entry.
- // Unchanged unless 'result' is the same register.
- //
- // Scratch registers:
- //
- // r0 - holds the untagged key on entry and holds the hash once computed.
- //
- // r1 - used to hold the capacity mask of the dictionary
- //
- // r2 - used for the index into the dictionary.
- //
- // result - holds the result on exit if the load succeeded.
- // Allowed to be the same as 'key' or 'result'.
- // Unchanged on bailout so 'key' or 'result' can be used
- // in further computation.
-
- Label done;
-
- GetNumberHash(r0, r1);
-
- // Compute capacity mask.
- SmiToInteger32(r1, FieldOperand(elements,
- SeededNumberDictionary::kCapacityOffset));
- decl(r1);
-
- // Generate an unrolled loop that performs a few probes before giving up.
- const int kProbes = 4;
- for (int i = 0; i < kProbes; i++) {
- // Use r2 for index calculations and keep the hash intact in r0.
- movq(r2, r0);
- // Compute the masked index: (hash + i + i * i) & mask.
- if (i > 0) {
- addl(r2, Immediate(SeededNumberDictionary::GetProbeOffset(i)));
- }
- and_(r2, r1);
-
- // Scale the index by multiplying by the entry size.
- ASSERT(SeededNumberDictionary::kEntrySize == 3);
- lea(r2, Operand(r2, r2, times_2, 0)); // r2 = r2 * 3
-
- // Check if the key matches.
- cmpq(key, FieldOperand(elements,
- r2,
- times_pointer_size,
- SeededNumberDictionary::kElementsStartOffset));
- if (i != (kProbes - 1)) {
- j(equal, &done);
- } else {
- j(not_equal, miss);
- }
- }
-
- bind(&done);
- // Check that the value is a normal propety.
- const int kDetailsOffset =
- SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
- ASSERT_EQ(NORMAL, 0);
- Test(FieldOperand(elements, r2, times_pointer_size, kDetailsOffset),
- Smi::FromInt(PropertyDetails::TypeField::kMask));
- j(not_zero, miss);
-
- // Get the value at the masked, scaled index.
- const int kValueOffset =
- SeededNumberDictionary::kElementsStartOffset + kPointerSize;
- movq(result, FieldOperand(elements, r2, times_pointer_size, kValueOffset));
-}
-
-
-void MacroAssembler::LoadAllocationTopHelper(Register result,
- Register scratch,
- AllocationFlags flags) {
- ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address(isolate());
-
- // Just return if allocation top is already known.
- if ((flags & RESULT_CONTAINS_TOP) != 0) {
- // No use of scratch if allocation top is provided.
- ASSERT(!scratch.is_valid());
-#ifdef DEBUG
- // Assert that result actually contains top on entry.
- Operand top_operand = ExternalOperand(new_space_allocation_top);
- cmpq(result, top_operand);
- Check(equal, "Unexpected allocation top");
-#endif
- return;
- }
-
- // Move address of new object to result. Use scratch register if available,
- // and keep address in scratch until call to UpdateAllocationTopHelper.
- if (scratch.is_valid()) {
- LoadAddress(scratch, new_space_allocation_top);
- movq(result, Operand(scratch, 0));
- } else {
- Load(result, new_space_allocation_top);
- }
-}
-
-
-void MacroAssembler::UpdateAllocationTopHelper(Register result_end,
- Register scratch) {
- if (emit_debug_code()) {
- testq(result_end, Immediate(kObjectAlignmentMask));
- Check(zero, "Unaligned allocation in new space");
- }
-
- ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address(isolate());
-
- // Update new top.
- if (scratch.is_valid()) {
- // Scratch already contains address of allocation top.
- movq(Operand(scratch, 0), result_end);
- } else {
- Store(new_space_allocation_top, result_end);
- }
-}
-
-
-void MacroAssembler::AllocateInNewSpace(int object_size,
- Register result,
- Register result_end,
- Register scratch,
- Label* gc_required,
- AllocationFlags flags) {
- ASSERT((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
- if (!FLAG_inline_new) {
- if (emit_debug_code()) {
- // Trash the registers to simulate an allocation failure.
- movl(result, Immediate(0x7091));
- if (result_end.is_valid()) {
- movl(result_end, Immediate(0x7191));
- }
- if (scratch.is_valid()) {
- movl(scratch, Immediate(0x7291));
- }
- }
- jmp(gc_required);
- return;
- }
- ASSERT(!result.is(result_end));
-
- // Load address of new object into result.
- LoadAllocationTopHelper(result, scratch, flags);
-
- // Align the next allocation. Storing the filler map without checking top is
- // always safe because the limit of the heap is always aligned.
- if (((flags & DOUBLE_ALIGNMENT) != 0) && FLAG_debug_code) {
- testq(result, Immediate(kDoubleAlignmentMask));
- Check(zero, "Allocation is not double aligned");
- }
-
- // Calculate new top and bail out if new space is exhausted.
- ExternalReference new_space_allocation_limit =
- ExternalReference::new_space_allocation_limit_address(isolate());
-
- Register top_reg = result_end.is_valid() ? result_end : result;
-
- if (!top_reg.is(result)) {
- movq(top_reg, result);
- }
- addq(top_reg, Immediate(object_size));
- j(carry, gc_required);
- Operand limit_operand = ExternalOperand(new_space_allocation_limit);
- cmpq(top_reg, limit_operand);
- j(above, gc_required);
-
- // Update allocation top.
- UpdateAllocationTopHelper(top_reg, scratch);
-
- bool tag_result = (flags & TAG_OBJECT) != 0;
- if (top_reg.is(result)) {
- if (tag_result) {
- subq(result, Immediate(object_size - kHeapObjectTag));
- } else {
- subq(result, Immediate(object_size));
- }
- } else if (tag_result) {
- // Tag the result if requested.
- ASSERT(kHeapObjectTag == 1);
- incq(result);
- }
-}
-
-
-void MacroAssembler::AllocateInNewSpace(int header_size,
- ScaleFactor element_size,
- Register element_count,
- Register result,
- Register result_end,
- Register scratch,
- Label* gc_required,
- AllocationFlags flags) {
- ASSERT((flags & SIZE_IN_WORDS) == 0);
- if (!FLAG_inline_new) {
- if (emit_debug_code()) {
- // Trash the registers to simulate an allocation failure.
- movl(result, Immediate(0x7091));
- movl(result_end, Immediate(0x7191));
- if (scratch.is_valid()) {
- movl(scratch, Immediate(0x7291));
- }
- // Register element_count is not modified by the function.
- }
- jmp(gc_required);
- return;
- }
- ASSERT(!result.is(result_end));
-
- // Load address of new object into result.
- LoadAllocationTopHelper(result, scratch, flags);
-
- // Align the next allocation. Storing the filler map without checking top is
- // always safe because the limit of the heap is always aligned.
- if (((flags & DOUBLE_ALIGNMENT) != 0) && FLAG_debug_code) {
- testq(result, Immediate(kDoubleAlignmentMask));
- Check(zero, "Allocation is not double aligned");
- }
-
- // Calculate new top and bail out if new space is exhausted.
- ExternalReference new_space_allocation_limit =
- ExternalReference::new_space_allocation_limit_address(isolate());
-
- // We assume that element_count*element_size + header_size does not
- // overflow.
- lea(result_end, Operand(element_count, element_size, header_size));
- addq(result_end, result);
- j(carry, gc_required);
- Operand limit_operand = ExternalOperand(new_space_allocation_limit);
- cmpq(result_end, limit_operand);
- j(above, gc_required);
-
- // Update allocation top.
- UpdateAllocationTopHelper(result_end, scratch);
-
- // Tag the result if requested.
- if ((flags & TAG_OBJECT) != 0) {
- ASSERT(kHeapObjectTag == 1);
- incq(result);
- }
-}
-
-
-void MacroAssembler::AllocateInNewSpace(Register object_size,
- Register result,
- Register result_end,
- Register scratch,
- Label* gc_required,
- AllocationFlags flags) {
- ASSERT((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
- if (!FLAG_inline_new) {
- if (emit_debug_code()) {
- // Trash the registers to simulate an allocation failure.
- movl(result, Immediate(0x7091));
- movl(result_end, Immediate(0x7191));
- if (scratch.is_valid()) {
- movl(scratch, Immediate(0x7291));
- }
- // object_size is left unchanged by this function.
- }
- jmp(gc_required);
- return;
- }
- ASSERT(!result.is(result_end));
-
- // Load address of new object into result.
- LoadAllocationTopHelper(result, scratch, flags);
-
- // Calculate new top and bail out if new space is exhausted.
- ExternalReference new_space_allocation_limit =
- ExternalReference::new_space_allocation_limit_address(isolate());
- if (!object_size.is(result_end)) {
- movq(result_end, object_size);
- }
- addq(result_end, result);
- j(carry, gc_required);
- Operand limit_operand = ExternalOperand(new_space_allocation_limit);
- cmpq(result_end, limit_operand);
- j(above, gc_required);
-
- // Update allocation top.
- UpdateAllocationTopHelper(result_end, scratch);
-
- // Align the next allocation. Storing the filler map without checking top is
- // always safe because the limit of the heap is always aligned.
- if (((flags & DOUBLE_ALIGNMENT) != 0) && FLAG_debug_code) {
- testq(result, Immediate(kDoubleAlignmentMask));
- Check(zero, "Allocation is not double aligned");
- }
-
- // Tag the result if requested.
- if ((flags & TAG_OBJECT) != 0) {
- addq(result, Immediate(kHeapObjectTag));
- }
-}
-
-
-void MacroAssembler::UndoAllocationInNewSpace(Register object) {
- ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address(isolate());
-
- // Make sure the object has no tag before resetting top.
- and_(object, Immediate(~kHeapObjectTagMask));
- Operand top_operand = ExternalOperand(new_space_allocation_top);
-#ifdef DEBUG
- cmpq(object, top_operand);
- Check(below, "Undo allocation of non allocated memory");
-#endif
- movq(top_operand, object);
-}
-
-
-void MacroAssembler::AllocateHeapNumber(Register result,
- Register scratch,
- Label* gc_required) {
- // Allocate heap number in new space.
- AllocateInNewSpace(HeapNumber::kSize,
- result,
- scratch,
- no_reg,
- gc_required,
- TAG_OBJECT);
-
- // Set the map.
- LoadRoot(kScratchRegister, Heap::kHeapNumberMapRootIndex);
- movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
-}
-
-
-void MacroAssembler::AllocateTwoByteString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* gc_required) {
- // Calculate the number of bytes needed for the characters in the string while
- // observing object alignment.
- const int kHeaderAlignment = SeqTwoByteString::kHeaderSize &
- kObjectAlignmentMask;
- ASSERT(kShortSize == 2);
- // scratch1 = length * 2 + kObjectAlignmentMask.
- lea(scratch1, Operand(length, length, times_1, kObjectAlignmentMask +
- kHeaderAlignment));
- and_(scratch1, Immediate(~kObjectAlignmentMask));
- if (kHeaderAlignment > 0) {
- subq(scratch1, Immediate(kHeaderAlignment));
- }
-
- // Allocate two byte string in new space.
- AllocateInNewSpace(SeqTwoByteString::kHeaderSize,
- times_1,
- scratch1,
- result,
- scratch2,
- scratch3,
- gc_required,
- TAG_OBJECT);
-
- // Set the map, length and hash field.
- LoadRoot(kScratchRegister, Heap::kStringMapRootIndex);
- movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
- Integer32ToSmi(scratch1, length);
- movq(FieldOperand(result, String::kLengthOffset), scratch1);
- movq(FieldOperand(result, String::kHashFieldOffset),
- Immediate(String::kEmptyHashField));
-}
-
-
-void MacroAssembler::AllocateAsciiString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* gc_required) {
- // Calculate the number of bytes needed for the characters in the string while
- // observing object alignment.
- const int kHeaderAlignment = SeqOneByteString::kHeaderSize &
- kObjectAlignmentMask;
- movl(scratch1, length);
- ASSERT(kCharSize == 1);
- addq(scratch1, Immediate(kObjectAlignmentMask + kHeaderAlignment));
- and_(scratch1, Immediate(~kObjectAlignmentMask));
- if (kHeaderAlignment > 0) {
- subq(scratch1, Immediate(kHeaderAlignment));
- }
-
- // Allocate ASCII string in new space.
- AllocateInNewSpace(SeqOneByteString::kHeaderSize,
- times_1,
- scratch1,
- result,
- scratch2,
- scratch3,
- gc_required,
- TAG_OBJECT);
-
- // Set the map, length and hash field.
- LoadRoot(kScratchRegister, Heap::kAsciiStringMapRootIndex);
- movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
- Integer32ToSmi(scratch1, length);
- movq(FieldOperand(result, String::kLengthOffset), scratch1);
- movq(FieldOperand(result, String::kHashFieldOffset),
- Immediate(String::kEmptyHashField));
-}
-
-
-void MacroAssembler::AllocateTwoByteConsString(Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
- // Allocate heap number in new space.
- AllocateInNewSpace(ConsString::kSize,
- result,
- scratch1,
- scratch2,
- gc_required,
- TAG_OBJECT);
-
- // Set the map. The other fields are left uninitialized.
- LoadRoot(kScratchRegister, Heap::kConsStringMapRootIndex);
- movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
-}
-
-
-void MacroAssembler::AllocateAsciiConsString(Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
- // Allocate heap number in new space.
- AllocateInNewSpace(ConsString::kSize,
- result,
- scratch1,
- scratch2,
- gc_required,
- TAG_OBJECT);
-
- // Set the map. The other fields are left uninitialized.
- LoadRoot(kScratchRegister, Heap::kConsAsciiStringMapRootIndex);
- movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
-}
-
-
-void MacroAssembler::AllocateTwoByteSlicedString(Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
- // Allocate heap number in new space.
- AllocateInNewSpace(SlicedString::kSize,
- result,
- scratch1,
- scratch2,
- gc_required,
- TAG_OBJECT);
-
- // Set the map. The other fields are left uninitialized.
- LoadRoot(kScratchRegister, Heap::kSlicedStringMapRootIndex);
- movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
-}
-
-
-void MacroAssembler::AllocateAsciiSlicedString(Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
- // Allocate heap number in new space.
- AllocateInNewSpace(SlicedString::kSize,
- result,
- scratch1,
- scratch2,
- gc_required,
- TAG_OBJECT);
-
- // Set the map. The other fields are left uninitialized.
- LoadRoot(kScratchRegister, Heap::kSlicedAsciiStringMapRootIndex);
- movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
-}
-
-
-// Copy memory, byte-by-byte, from source to destination. Not optimized for
-// long or aligned copies. The contents of scratch and length are destroyed.
-// Destination is incremented by length, source, length and scratch are
-// clobbered.
-// A simpler loop is faster on small copies, but slower on large ones.
-// The cld() instruction must have been emitted, to set the direction flag(),
-// before calling this function.
-void MacroAssembler::CopyBytes(Register destination,
- Register source,
- Register length,
- int min_length,
- Register scratch) {
- ASSERT(min_length >= 0);
- if (emit_debug_code()) {
- cmpl(length, Immediate(min_length));
- Assert(greater_equal, "Invalid min_length");
- }
- Label loop, done, short_string, short_loop;
-
- const int kLongStringLimit = 20;
- if (min_length <= kLongStringLimit) {
- cmpl(length, Immediate(kLongStringLimit));
- j(less_equal, &short_string);
- }
-
- ASSERT(source.is(rsi));
- ASSERT(destination.is(rdi));
- ASSERT(length.is(rcx));
-
- // Because source is 8-byte aligned in our uses of this function,
- // we keep source aligned for the rep movs operation by copying the odd bytes
- // at the end of the ranges.
- movq(scratch, length);
- shrl(length, Immediate(3));
- repmovsq();
- // Move remaining bytes of length.
- andl(scratch, Immediate(0x7));
- movq(length, Operand(source, scratch, times_1, -8));
- movq(Operand(destination, scratch, times_1, -8), length);
- addq(destination, scratch);
-
- if (min_length <= kLongStringLimit) {
- jmp(&done);
-
- bind(&short_string);
- if (min_length == 0) {
- testl(length, length);
- j(zero, &done);
- }
- lea(scratch, Operand(destination, length, times_1, 0));
-
- bind(&short_loop);
- movb(length, Operand(source, 0));
- movb(Operand(destination, 0), length);
- incq(source);
- incq(destination);
- cmpq(destination, scratch);
- j(not_equal, &short_loop);
-
- bind(&done);
- }
-}
-
-
-void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
- Register end_offset,
- Register filler) {
- Label loop, entry;
- jmp(&entry);
- bind(&loop);
- movq(Operand(start_offset, 0), filler);
- addq(start_offset, Immediate(kPointerSize));
- bind(&entry);
- cmpq(start_offset, end_offset);
- j(less, &loop);
-}
-
-
-void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
- if (context_chain_length > 0) {
- // Move up the chain of contexts to the context containing the slot.
- movq(dst, Operand(rsi, Context::SlotOffset(Context::PREVIOUS_INDEX)));
- for (int i = 1; i < context_chain_length; i++) {
- movq(dst, Operand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
- }
- } else {
- // Slot is in the current function context. Move it into the
- // destination register in case we store into it (the write barrier
- // cannot be allowed to destroy the context in rsi).
- movq(dst, rsi);
- }
-
- // We should not have found a with context by walking the context
- // chain (i.e., the static scope chain and runtime context chain do
- // not agree). A variable occurring in such a scope should have
- // slot type LOOKUP and not CONTEXT.
- if (emit_debug_code()) {
- CompareRoot(FieldOperand(dst, HeapObject::kMapOffset),
- Heap::kWithContextMapRootIndex);
- Check(not_equal, "Variable resolved to with context.");
- }
-}
-
-
-void MacroAssembler::LoadTransitionedArrayMapConditional(
- ElementsKind expected_kind,
- ElementsKind transitioned_kind,
- Register map_in_out,
- Register scratch,
- Label* no_map_match) {
- // Load the global or builtins object from the current context.
- movq(scratch,
- Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- movq(scratch, FieldOperand(scratch, GlobalObject::kNativeContextOffset));
-
- // Check that the function's map is the same as the expected cached map.
- movq(scratch, Operand(scratch,
- Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
-
- int offset = expected_kind * kPointerSize +
- FixedArrayBase::kHeaderSize;
- cmpq(map_in_out, FieldOperand(scratch, offset));
- j(not_equal, no_map_match);
-
- // Use the transitioned cached map.
- offset = transitioned_kind * kPointerSize +
- FixedArrayBase::kHeaderSize;
- movq(map_in_out, FieldOperand(scratch, offset));
-}
-
-
-void MacroAssembler::LoadInitialArrayMap(
- Register function_in, Register scratch,
- Register map_out, bool can_have_holes) {
- ASSERT(!function_in.is(map_out));
- Label done;
- movq(map_out, FieldOperand(function_in,
- JSFunction::kPrototypeOrInitialMapOffset));
- if (!FLAG_smi_only_arrays) {
- ElementsKind kind = can_have_holes ? FAST_HOLEY_ELEMENTS : FAST_ELEMENTS;
- LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- kind,
- map_out,
- scratch,
- &done);
- } else if (can_have_holes) {
- LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- FAST_HOLEY_SMI_ELEMENTS,
- map_out,
- scratch,
- &done);
- }
- bind(&done);
-}
-
-#ifdef _WIN64
-static const int kRegisterPassedArguments = 4;
-#else
-static const int kRegisterPassedArguments = 6;
-#endif
-
-void MacroAssembler::LoadGlobalFunction(int index, Register function) {
- // Load the global or builtins object from the current context.
- movq(function,
- Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- // Load the native context from the global or builtins object.
- movq(function, FieldOperand(function, GlobalObject::kNativeContextOffset));
- // Load the function from the native context.
- movq(function, Operand(function, Context::SlotOffset(index)));
-}
-
-
-void MacroAssembler::LoadArrayFunction(Register function) {
- movq(function,
- Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- movq(function, FieldOperand(function, GlobalObject::kGlobalContextOffset));
- movq(function,
- Operand(function, Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
-}
-
-
-void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
- Register map) {
- // Load the initial map. The global functions all have initial maps.
- movq(map, FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
- if (emit_debug_code()) {
- Label ok, fail;
- CheckMap(map, isolate()->factory()->meta_map(), &fail, DO_SMI_CHECK);
- jmp(&ok);
- bind(&fail);
- Abort("Global functions must have initial map");
- bind(&ok);
- }
-}
-
-
-int MacroAssembler::ArgumentStackSlotsForCFunctionCall(int num_arguments) {
- // On Windows 64 stack slots are reserved by the caller for all arguments
- // including the ones passed in registers, and space is always allocated for
- // the four register arguments even if the function takes fewer than four
- // arguments.
- // On AMD64 ABI (Linux/Mac) the first six arguments are passed in registers
- // and the caller does not reserve stack slots for them.
- ASSERT(num_arguments >= 0);
-#ifdef _WIN64
- const int kMinimumStackSlots = kRegisterPassedArguments;
- if (num_arguments < kMinimumStackSlots) return kMinimumStackSlots;
- return num_arguments;
-#else
- if (num_arguments < kRegisterPassedArguments) return 0;
- return num_arguments - kRegisterPassedArguments;
-#endif
-}
-
-
-void MacroAssembler::PrepareCallCFunction(int num_arguments) {
- int frame_alignment = OS::ActivationFrameAlignment();
- ASSERT(frame_alignment != 0);
- ASSERT(num_arguments >= 0);
-
- // Make stack end at alignment and allocate space for arguments and old rsp.
- movq(kScratchRegister, rsp);
- ASSERT(IsPowerOf2(frame_alignment));
- int argument_slots_on_stack =
- ArgumentStackSlotsForCFunctionCall(num_arguments);
- subq(rsp, Immediate((argument_slots_on_stack + 1) * kPointerSize));
- and_(rsp, Immediate(-frame_alignment));
- movq(Operand(rsp, argument_slots_on_stack * kPointerSize), kScratchRegister);
-}
-
-
-void MacroAssembler::CallCFunction(ExternalReference function,
- int num_arguments) {
- LoadAddress(rax, function);
- CallCFunction(rax, num_arguments);
-}
-
-
-void MacroAssembler::CallCFunction(Register function, int num_arguments) {
- ASSERT(has_frame());
- // Check stack alignment.
- if (emit_debug_code()) {
- CheckStackAlignment();
- }
-
- call(function);
- ASSERT(OS::ActivationFrameAlignment() != 0);
- ASSERT(num_arguments >= 0);
- int argument_slots_on_stack =
- ArgumentStackSlotsForCFunctionCall(num_arguments);
- movq(rsp, Operand(rsp, argument_slots_on_stack * kPointerSize));
-}
-
-
-bool AreAliased(Register r1, Register r2, Register r3, Register r4) {
- if (r1.is(r2)) return true;
- if (r1.is(r3)) return true;
- if (r1.is(r4)) return true;
- if (r2.is(r3)) return true;
- if (r2.is(r4)) return true;
- if (r3.is(r4)) return true;
- return false;
-}
-
-
-CodePatcher::CodePatcher(byte* address, int size)
- : address_(address),
- size_(size),
- masm_(NULL, address, size + Assembler::kGap) {
- // Create a new macro assembler pointing to the address of the code to patch.
- // The size is adjusted with kGap on order for the assembler to generate size
- // bytes of instructions without failing with buffer size constraints.
- ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
-}
-
-
-CodePatcher::~CodePatcher() {
- // Indicate that code has changed.
- CPU::FlushICache(address_, size_);
-
- // Check that the code was patched as expected.
- ASSERT(masm_.pc_ == address_ + size_);
- ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
-}
-
-
-void MacroAssembler::CheckPageFlag(
- Register object,
- Register scratch,
- int mask,
- Condition cc,
- Label* condition_met,
- Label::Distance condition_met_distance) {
- ASSERT(cc == zero || cc == not_zero);
- if (scratch.is(object)) {
- and_(scratch, Immediate(~Page::kPageAlignmentMask));
- } else {
- movq(scratch, Immediate(~Page::kPageAlignmentMask));
- and_(scratch, object);
- }
- if (mask < (1 << kBitsPerByte)) {
- testb(Operand(scratch, MemoryChunk::kFlagsOffset),
- Immediate(static_cast<uint8_t>(mask)));
- } else {
- testl(Operand(scratch, MemoryChunk::kFlagsOffset), Immediate(mask));
- }
- j(cc, condition_met, condition_met_distance);
-}
-
-
-void MacroAssembler::JumpIfBlack(Register object,
- Register bitmap_scratch,
- Register mask_scratch,
- Label* on_black,
- Label::Distance on_black_distance) {
- ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, rcx));
- GetMarkBits(object, bitmap_scratch, mask_scratch);
-
- ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
- // The mask_scratch register contains a 1 at the position of the first bit
- // and a 0 at all other positions, including the position of the second bit.
- movq(rcx, mask_scratch);
- // Make rcx into a mask that covers both marking bits using the operation
- // rcx = mask | (mask << 1).
- lea(rcx, Operand(mask_scratch, mask_scratch, times_2, 0));
- // Note that we are using a 4-byte aligned 8-byte load.
- and_(rcx, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
- cmpq(mask_scratch, rcx);
- j(equal, on_black, on_black_distance);
-}
-
-
-// Detect some, but not all, common pointer-free objects. This is used by the
-// incremental write barrier which doesn't care about oddballs (they are always
-// marked black immediately so this code is not hit).
-void MacroAssembler::JumpIfDataObject(
- Register value,
- Register scratch,
- Label* not_data_object,
- Label::Distance not_data_object_distance) {
- Label is_data_object;
- movq(scratch, FieldOperand(value, HeapObject::kMapOffset));
- CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
- j(equal, &is_data_object, Label::kNear);
- ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
- ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
- // If it's a string and it's not a cons string then it's an object containing
- // no GC pointers.
- testb(FieldOperand(scratch, Map::kInstanceTypeOffset),
- Immediate(kIsIndirectStringMask | kIsNotStringMask));
- j(not_zero, not_data_object, not_data_object_distance);
- bind(&is_data_object);
-}
-
-
-void MacroAssembler::GetMarkBits(Register addr_reg,
- Register bitmap_reg,
- Register mask_reg) {
- ASSERT(!AreAliased(addr_reg, bitmap_reg, mask_reg, rcx));
- movq(bitmap_reg, addr_reg);
- // Sign extended 32 bit immediate.
- and_(bitmap_reg, Immediate(~Page::kPageAlignmentMask));
- movq(rcx, addr_reg);
- int shift =
- Bitmap::kBitsPerCellLog2 + kPointerSizeLog2 - Bitmap::kBytesPerCellLog2;
- shrl(rcx, Immediate(shift));
- and_(rcx,
- Immediate((Page::kPageAlignmentMask >> shift) &
- ~(Bitmap::kBytesPerCell - 1)));
-
- addq(bitmap_reg, rcx);
- movq(rcx, addr_reg);
- shrl(rcx, Immediate(kPointerSizeLog2));
- and_(rcx, Immediate((1 << Bitmap::kBitsPerCellLog2) - 1));
- movl(mask_reg, Immediate(1));
- shl_cl(mask_reg);
-}
-
-
-void MacroAssembler::EnsureNotWhite(
- Register value,
- Register bitmap_scratch,
- Register mask_scratch,
- Label* value_is_white_and_not_data,
- Label::Distance distance) {
- ASSERT(!AreAliased(value, bitmap_scratch, mask_scratch, rcx));
- GetMarkBits(value, bitmap_scratch, mask_scratch);
-
- // If the value is black or grey we don't need to do anything.
- ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
- ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
- ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
- ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
-
- Label done;
-
- // Since both black and grey have a 1 in the first position and white does
- // not have a 1 there we only need to check one bit.
- testq(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
- j(not_zero, &done, Label::kNear);
-
- if (emit_debug_code()) {
- // Check for impossible bit pattern.
- Label ok;
- push(mask_scratch);
- // shl. May overflow making the check conservative.
- addq(mask_scratch, mask_scratch);
- testq(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
- j(zero, &ok, Label::kNear);
- int3();
- bind(&ok);
- pop(mask_scratch);
- }
-
- // Value is white. We check whether it is data that doesn't need scanning.
- // Currently only checks for HeapNumber and non-cons strings.
- Register map = rcx; // Holds map while checking type.
- Register length = rcx; // Holds length of object after checking type.
- Label not_heap_number;
- Label is_data_object;
-
- // Check for heap-number
- movq(map, FieldOperand(value, HeapObject::kMapOffset));
- CompareRoot(map, Heap::kHeapNumberMapRootIndex);
- j(not_equal, &not_heap_number, Label::kNear);
- movq(length, Immediate(HeapNumber::kSize));
- jmp(&is_data_object, Label::kNear);
-
- bind(&not_heap_number);
- // Check for strings.
- ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
- ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
- // If it's a string and it's not a cons string then it's an object containing
- // no GC pointers.
- Register instance_type = rcx;
- movzxbl(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
- testb(instance_type, Immediate(kIsIndirectStringMask | kIsNotStringMask));
- j(not_zero, value_is_white_and_not_data);
- // It's a non-indirect (non-cons and non-slice) string.
- // If it's external, the length is just ExternalString::kSize.
- // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
- Label not_external;
- // External strings are the only ones with the kExternalStringTag bit
- // set.
- ASSERT_EQ(0, kSeqStringTag & kExternalStringTag);
- ASSERT_EQ(0, kConsStringTag & kExternalStringTag);
- testb(instance_type, Immediate(kExternalStringTag));
- j(zero, &not_external, Label::kNear);
- movq(length, Immediate(ExternalString::kSize));
- jmp(&is_data_object, Label::kNear);
-
- bind(&not_external);
- // Sequential string, either ASCII or UC16.
- ASSERT(kOneByteStringTag == 0x04);
- and_(length, Immediate(kStringEncodingMask));
- xor_(length, Immediate(kStringEncodingMask));
- addq(length, Immediate(0x04));
- // Value now either 4 (if ASCII) or 8 (if UC16), i.e. char-size shifted by 2.
- imul(length, FieldOperand(value, String::kLengthOffset));
- shr(length, Immediate(2 + kSmiTagSize + kSmiShiftSize));
- addq(length, Immediate(SeqString::kHeaderSize + kObjectAlignmentMask));
- and_(length, Immediate(~kObjectAlignmentMask));
-
- bind(&is_data_object);
- // Value is a data object, and it is white. Mark it black. Since we know
- // that the object is white we can make it black by flipping one bit.
- or_(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
-
- and_(bitmap_scratch, Immediate(~Page::kPageAlignmentMask));
- addl(Operand(bitmap_scratch, MemoryChunk::kLiveBytesOffset), length);
-
- bind(&done);
-}
-
-
-void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
- Label next, start;
- Register empty_fixed_array_value = r8;
- LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
- movq(rcx, rax);
-
- // Check if the enum length field is properly initialized, indicating that
- // there is an enum cache.
- movq(rbx, FieldOperand(rcx, HeapObject::kMapOffset));
-
- EnumLength(rdx, rbx);
- Cmp(rdx, Smi::FromInt(Map::kInvalidEnumCache));
- j(equal, call_runtime);
-
- jmp(&start);
-
- bind(&next);
-
- movq(rbx, FieldOperand(rcx, HeapObject::kMapOffset));
-
- // For all objects but the receiver, check that the cache is empty.
- EnumLength(rdx, rbx);
- Cmp(rdx, Smi::FromInt(0));
- j(not_equal, call_runtime);
-
- bind(&start);
-
- // Check that there are no elements. Register rcx contains the current JS
- // object we've reached through the prototype chain.
- cmpq(empty_fixed_array_value,
- FieldOperand(rcx, JSObject::kElementsOffset));
- j(not_equal, call_runtime);
-
- movq(rcx, FieldOperand(rbx, Map::kPrototypeOffset));
- cmpq(rcx, null_value);
- j(not_equal, &next);
-}
-
-void MacroAssembler::TestJSArrayForAllocationSiteInfo(
- Register receiver_reg,
- Register scratch_reg) {
- Label no_info_available;
- ExternalReference new_space_start =
- ExternalReference::new_space_start(isolate());
- ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address(isolate());
-
- lea(scratch_reg, Operand(receiver_reg,
- JSArray::kSize + AllocationSiteInfo::kSize - kHeapObjectTag));
- movq(kScratchRegister, new_space_start);
- cmpq(scratch_reg, kScratchRegister);
- j(less, &no_info_available);
- cmpq(scratch_reg, ExternalOperand(new_space_allocation_top));
- j(greater, &no_info_available);
- CompareRoot(MemOperand(scratch_reg, -AllocationSiteInfo::kSize),
- Heap::kAllocationSiteInfoMapRootIndex);
- bind(&no_info_available);
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_X64
diff --git a/src/3rdparty/v8/src/x64/macro-assembler-x64.h b/src/3rdparty/v8/src/x64/macro-assembler-x64.h
deleted file mode 100644
index 43b6bfb..0000000
--- a/src/3rdparty/v8/src/x64/macro-assembler-x64.h
+++ /dev/null
@@ -1,1508 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_X64_MACRO_ASSEMBLER_X64_H_
-#define V8_X64_MACRO_ASSEMBLER_X64_H_
-
-#include "assembler.h"
-#include "frames.h"
-#include "v8globals.h"
-
-namespace v8 {
-namespace internal {
-
-// Default scratch register used by MacroAssembler (and other code that needs
-// a spare register). The register isn't callee save, and not used by the
-// function calling convention.
-const Register kScratchRegister = { 10 }; // r10.
-const Register kSmiConstantRegister = { 12 }; // r12 (callee save).
-const Register kRootRegister = { 13 }; // r13 (callee save).
-// Value of smi in kSmiConstantRegister.
-const int kSmiConstantRegisterValue = 1;
-// Actual value of root register is offset from the root array's start
-// to take advantage of negitive 8-bit displacement values.
-const int kRootRegisterBias = 128;
-
-// Convenience for platform-independent signatures.
-typedef Operand MemOperand;
-
-enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
-enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
-
-bool AreAliased(Register r1, Register r2, Register r3, Register r4);
-
-// Forward declaration.
-class JumpTarget;
-
-struct SmiIndex {
- SmiIndex(Register index_register, ScaleFactor scale)
- : reg(index_register),
- scale(scale) {}
- Register reg;
- ScaleFactor scale;
-};
-
-
-// MacroAssembler implements a collection of frequently used macros.
-class MacroAssembler: public Assembler {
- public:
- // The isolate parameter can be NULL if the macro assembler should
- // not use isolate-dependent functionality. In this case, it's the
- // responsibility of the caller to never invoke such function on the
- // macro assembler.
- MacroAssembler(Isolate* isolate, void* buffer, int size);
-
- // Prevent the use of the RootArray during the lifetime of this
- // scope object.
- class NoRootArrayScope BASE_EMBEDDED {
- public:
- explicit NoRootArrayScope(MacroAssembler* assembler)
- : variable_(&assembler->root_array_available_),
- old_value_(assembler->root_array_available_) {
- assembler->root_array_available_ = false;
- }
- ~NoRootArrayScope() {
- *variable_ = old_value_;
- }
- private:
- bool* variable_;
- bool old_value_;
- };
-
- // Operand pointing to an external reference.
- // May emit code to set up the scratch register. The operand is
- // only guaranteed to be correct as long as the scratch register
- // isn't changed.
- // If the operand is used more than once, use a scratch register
- // that is guaranteed not to be clobbered.
- Operand ExternalOperand(ExternalReference reference,
- Register scratch = kScratchRegister);
- // Loads and stores the value of an external reference.
- // Special case code for load and store to take advantage of
- // load_rax/store_rax if possible/necessary.
- // For other operations, just use:
- // Operand operand = ExternalOperand(extref);
- // operation(operand, ..);
- void Load(Register destination, ExternalReference source);
- void Store(ExternalReference destination, Register source);
- // Loads the address of the external reference into the destination
- // register.
- void LoadAddress(Register destination, ExternalReference source);
- // Returns the size of the code generated by LoadAddress.
- // Used by CallSize(ExternalReference) to find the size of a call.
- int LoadAddressSize(ExternalReference source);
- // Pushes the address of the external reference onto the stack.
- void PushAddress(ExternalReference source);
-
- // Operations on roots in the root-array.
- void LoadRoot(Register destination, Heap::RootListIndex index);
- void StoreRoot(Register source, Heap::RootListIndex index);
- // Load a root value where the index (or part of it) is variable.
- // The variable_offset register is added to the fixed_offset value
- // to get the index into the root-array.
- void LoadRootIndexed(Register destination,
- Register variable_offset,
- int fixed_offset);
- void CompareRoot(Register with, Heap::RootListIndex index);
- void CompareRoot(const Operand& with, Heap::RootListIndex index);
- void PushRoot(Heap::RootListIndex index);
-
- // These functions do not arrange the registers in any particular order so
- // they are not useful for calls that can cause a GC. The caller can
- // exclude up to 3 registers that do not need to be saved and restored.
- void PushCallerSaved(SaveFPRegsMode fp_mode,
- Register exclusion1 = no_reg,
- Register exclusion2 = no_reg,
- Register exclusion3 = no_reg);
- void PopCallerSaved(SaveFPRegsMode fp_mode,
- Register exclusion1 = no_reg,
- Register exclusion2 = no_reg,
- Register exclusion3 = no_reg);
-
-// ---------------------------------------------------------------------------
-// GC Support
-
-
- enum RememberedSetFinalAction {
- kReturnAtEnd,
- kFallThroughAtEnd
- };
-
- // Record in the remembered set the fact that we have a pointer to new space
- // at the address pointed to by the addr register. Only works if addr is not
- // in new space.
- void RememberedSetHelper(Register object, // Used for debug code.
- Register addr,
- Register scratch,
- SaveFPRegsMode save_fp,
- RememberedSetFinalAction and_then);
-
- void CheckPageFlag(Register object,
- Register scratch,
- int mask,
- Condition cc,
- Label* condition_met,
- Label::Distance condition_met_distance = Label::kFar);
-
- // Check if object is in new space. Jumps if the object is not in new space.
- // The register scratch can be object itself, but scratch will be clobbered.
- void JumpIfNotInNewSpace(Register object,
- Register scratch,
- Label* branch,
- Label::Distance distance = Label::kFar) {
- InNewSpace(object, scratch, not_equal, branch, distance);
- }
-
- // Check if object is in new space. Jumps if the object is in new space.
- // The register scratch can be object itself, but it will be clobbered.
- void JumpIfInNewSpace(Register object,
- Register scratch,
- Label* branch,
- Label::Distance distance = Label::kFar) {
- InNewSpace(object, scratch, equal, branch, distance);
- }
-
- // Check if an object has the black incremental marking color. Also uses rcx!
- void JumpIfBlack(Register object,
- Register scratch0,
- Register scratch1,
- Label* on_black,
- Label::Distance on_black_distance = Label::kFar);
-
- // Detects conservatively whether an object is data-only, i.e. it does need to
- // be scanned by the garbage collector.
- void JumpIfDataObject(Register value,
- Register scratch,
- Label* not_data_object,
- Label::Distance not_data_object_distance);
-
- // Checks the color of an object. If the object is already grey or black
- // then we just fall through, since it is already live. If it is white and
- // we can determine that it doesn't need to be scanned, then we just mark it
- // black and fall through. For the rest we jump to the label so the
- // incremental marker can fix its assumptions.
- void EnsureNotWhite(Register object,
- Register scratch1,
- Register scratch2,
- Label* object_is_white_and_not_data,
- Label::Distance distance);
-
- // Notify the garbage collector that we wrote a pointer into an object.
- // |object| is the object being stored into, |value| is the object being
- // stored. value and scratch registers are clobbered by the operation.
- // The offset is the offset from the start of the object, not the offset from
- // the tagged HeapObject pointer. For use with FieldOperand(reg, off).
- void RecordWriteField(
- Register object,
- int offset,
- Register value,
- Register scratch,
- SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK);
-
- // As above, but the offset has the tag presubtracted. For use with
- // Operand(reg, off).
- void RecordWriteContextSlot(
- Register context,
- int offset,
- Register value,
- Register scratch,
- SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK) {
- RecordWriteField(context,
- offset + kHeapObjectTag,
- value,
- scratch,
- save_fp,
- remembered_set_action,
- smi_check);
- }
-
- // Notify the garbage collector that we wrote a pointer into a fixed array.
- // |array| is the array being stored into, |value| is the
- // object being stored. |index| is the array index represented as a non-smi.
- // All registers are clobbered by the operation RecordWriteArray
- // filters out smis so it does not update the write barrier if the
- // value is a smi.
- void RecordWriteArray(
- Register array,
- Register value,
- Register index,
- SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK);
-
- // For page containing |object| mark region covering |address|
- // dirty. |object| is the object being stored into, |value| is the
- // object being stored. The address and value registers are clobbered by the
- // operation. RecordWrite filters out smis so it does not update
- // the write barrier if the value is a smi.
- void RecordWrite(
- Register object,
- Register address,
- Register value,
- SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK);
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- // ---------------------------------------------------------------------------
- // Debugger Support
-
- void DebugBreak();
-#endif
-
- // Enter specific kind of exit frame; either in normal or
- // debug mode. Expects the number of arguments in register rax and
- // sets up the number of arguments in register rdi and the pointer
- // to the first argument in register rsi.
- //
- // Allocates arg_stack_space * kPointerSize memory (not GCed) on the stack
- // accessible via StackSpaceOperand.
- void EnterExitFrame(int arg_stack_space = 0, bool save_doubles = false);
-
- // Enter specific kind of exit frame. Allocates arg_stack_space * kPointerSize
- // memory (not GCed) on the stack accessible via StackSpaceOperand.
- void EnterApiExitFrame(int arg_stack_space);
-
- // Leave the current exit frame. Expects/provides the return value in
- // register rax:rdx (untouched) and the pointer to the first
- // argument in register rsi.
- void LeaveExitFrame(bool save_doubles = false);
-
- // Leave the current exit frame. Expects/provides the return value in
- // register rax (untouched).
- void LeaveApiExitFrame();
-
- // Push and pop the registers that can hold pointers.
- void PushSafepointRegisters() { Pushad(); }
- void PopSafepointRegisters() { Popad(); }
- // Store the value in register src in the safepoint register stack
- // slot for register dst.
- void StoreToSafepointRegisterSlot(Register dst, const Immediate& imm);
- void StoreToSafepointRegisterSlot(Register dst, Register src);
- void LoadFromSafepointRegisterSlot(Register dst, Register src);
-
- void InitializeRootRegister() {
- ExternalReference roots_array_start =
- ExternalReference::roots_array_start(isolate());
- movq(kRootRegister, roots_array_start);
- addq(kRootRegister, Immediate(kRootRegisterBias));
- }
-
- // ---------------------------------------------------------------------------
- // JavaScript invokes
-
- // Set up call kind marking in rcx. The method takes rcx as an
- // explicit first parameter to make the code more readable at the
- // call sites.
- void SetCallKind(Register dst, CallKind kind);
-
- // Invoke the JavaScript function code by either calling or jumping.
- void InvokeCode(Register code,
- const ParameterCount& expected,
- const ParameterCount& actual,
- InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind);
-
- void InvokeCode(Handle<Code> code,
- const ParameterCount& expected,
- const ParameterCount& actual,
- RelocInfo::Mode rmode,
- InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind);
-
- // Invoke the JavaScript function in the given register. Changes the
- // current context to the context in the function before invoking.
- void InvokeFunction(Register function,
- const ParameterCount& actual,
- InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind);
-
- void InvokeFunction(Handle<JSFunction> function,
- const ParameterCount& actual,
- InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind);
-
- // Invoke specified builtin JavaScript function. Adds an entry to
- // the unresolved list if the name does not resolve.
- void InvokeBuiltin(Builtins::JavaScript id,
- InvokeFlag flag,
- const CallWrapper& call_wrapper = NullCallWrapper());
-
- // Store the function for the given builtin in the target register.
- void GetBuiltinFunction(Register target, Builtins::JavaScript id);
-
- // Store the code object for the given builtin in the target register.
- void GetBuiltinEntry(Register target, Builtins::JavaScript id);
-
-
- // ---------------------------------------------------------------------------
- // Smi tagging, untagging and operations on tagged smis.
-
- void InitializeSmiConstantRegister() {
- movq(kSmiConstantRegister,
- reinterpret_cast<uint64_t>(Smi::FromInt(kSmiConstantRegisterValue)),
- RelocInfo::NONE64);
- }
-
- // Conversions between tagged smi values and non-tagged integer values.
-
- // Tag an integer value. The result must be known to be a valid smi value.
- // Only uses the low 32 bits of the src register. Sets the N and Z flags
- // based on the value of the resulting smi.
- void Integer32ToSmi(Register dst, Register src);
-
- // Stores an integer32 value into a memory field that already holds a smi.
- void Integer32ToSmiField(const Operand& dst, Register src);
-
- // Adds constant to src and tags the result as a smi.
- // Result must be a valid smi.
- void Integer64PlusConstantToSmi(Register dst, Register src, int constant);
-
- // Convert smi to 32-bit integer. I.e., not sign extended into
- // high 32 bits of destination.
- void SmiToInteger32(Register dst, Register src);
- void SmiToInteger32(Register dst, const Operand& src);
-
- // Convert smi to 64-bit integer (sign extended if necessary).
- void SmiToInteger64(Register dst, Register src);
- void SmiToInteger64(Register dst, const Operand& src);
-
- // Multiply a positive smi's integer value by a power of two.
- // Provides result as 64-bit integer value.
- void PositiveSmiTimesPowerOfTwoToInteger64(Register dst,
- Register src,
- int power);
-
- // Divide a positive smi's integer value by a power of two.
- // Provides result as 32-bit integer value.
- void PositiveSmiDivPowerOfTwoToInteger32(Register dst,
- Register src,
- int power);
-
- // Perform the logical or of two smi values and return a smi value.
- // If either argument is not a smi, jump to on_not_smis and retain
- // the original values of source registers. The destination register
- // may be changed if it's not one of the source registers.
- void SmiOrIfSmis(Register dst,
- Register src1,
- Register src2,
- Label* on_not_smis,
- Label::Distance near_jump = Label::kFar);
-
-
- // Simple comparison of smis. Both sides must be known smis to use these,
- // otherwise use Cmp.
- void SmiCompare(Register smi1, Register smi2);
- void SmiCompare(Register dst, Smi* src);
- void SmiCompare(Register dst, const Operand& src);
- void SmiCompare(const Operand& dst, Register src);
- void SmiCompare(const Operand& dst, Smi* src);
- // Compare the int32 in src register to the value of the smi stored at dst.
- void SmiCompareInteger32(const Operand& dst, Register src);
- // Sets sign and zero flags depending on value of smi in register.
- void SmiTest(Register src);
-
- // Functions performing a check on a known or potential smi. Returns
- // a condition that is satisfied if the check is successful.
-
- // Is the value a tagged smi.
- Condition CheckSmi(Register src);
- Condition CheckSmi(const Operand& src);
-
- // Is the value a non-negative tagged smi.
- Condition CheckNonNegativeSmi(Register src);
-
- // Are both values tagged smis.
- Condition CheckBothSmi(Register first, Register second);
-
- // Are both values non-negative tagged smis.
- Condition CheckBothNonNegativeSmi(Register first, Register second);
-
- // Are either value a tagged smi.
- Condition CheckEitherSmi(Register first,
- Register second,
- Register scratch = kScratchRegister);
-
- // Is the value the minimum smi value (since we are using
- // two's complement numbers, negating the value is known to yield
- // a non-smi value).
- Condition CheckIsMinSmi(Register src);
-
- // Checks whether an 32-bit integer value is a valid for conversion
- // to a smi.
- Condition CheckInteger32ValidSmiValue(Register src);
-
- // Checks whether an 32-bit unsigned integer value is a valid for
- // conversion to a smi.
- Condition CheckUInteger32ValidSmiValue(Register src);
-
- // Check whether src is a Smi, and set dst to zero if it is a smi,
- // and to one if it isn't.
- void CheckSmiToIndicator(Register dst, Register src);
- void CheckSmiToIndicator(Register dst, const Operand& src);
-
- // Test-and-jump functions. Typically combines a check function
- // above with a conditional jump.
-
- // Jump if the value cannot be represented by a smi.
- void JumpIfNotValidSmiValue(Register src, Label* on_invalid,
- Label::Distance near_jump = Label::kFar);
-
- // Jump if the unsigned integer value cannot be represented by a smi.
- void JumpIfUIntNotValidSmiValue(Register src, Label* on_invalid,
- Label::Distance near_jump = Label::kFar);
-
- // Jump to label if the value is a tagged smi.
- void JumpIfSmi(Register src,
- Label* on_smi,
- Label::Distance near_jump = Label::kFar);
-
- // Jump to label if the value is not a tagged smi.
- void JumpIfNotSmi(Register src,
- Label* on_not_smi,
- Label::Distance near_jump = Label::kFar);
-
- // Jump to label if the value is not a non-negative tagged smi.
- void JumpUnlessNonNegativeSmi(Register src,
- Label* on_not_smi,
- Label::Distance near_jump = Label::kFar);
-
- // Jump to label if the value, which must be a tagged smi, has value equal
- // to the constant.
- void JumpIfSmiEqualsConstant(Register src,
- Smi* constant,
- Label* on_equals,
- Label::Distance near_jump = Label::kFar);
-
- // Jump if either or both register are not smi values.
- void JumpIfNotBothSmi(Register src1,
- Register src2,
- Label* on_not_both_smi,
- Label::Distance near_jump = Label::kFar);
-
- // Jump if either or both register are not non-negative smi values.
- void JumpUnlessBothNonNegativeSmi(Register src1, Register src2,
- Label* on_not_both_smi,
- Label::Distance near_jump = Label::kFar);
-
- // Operations on tagged smi values.
-
- // Smis represent a subset of integers. The subset is always equivalent to
- // a two's complement interpretation of a fixed number of bits.
-
- // Optimistically adds an integer constant to a supposed smi.
- // If the src is not a smi, or the result is not a smi, jump to
- // the label.
- void SmiTryAddConstant(Register dst,
- Register src,
- Smi* constant,
- Label* on_not_smi_result,
- Label::Distance near_jump = Label::kFar);
-
- // Add an integer constant to a tagged smi, giving a tagged smi as result.
- // No overflow testing on the result is done.
- void SmiAddConstant(Register dst, Register src, Smi* constant);
-
- // Add an integer constant to a tagged smi, giving a tagged smi as result.
- // No overflow testing on the result is done.
- void SmiAddConstant(const Operand& dst, Smi* constant);
-
- // Add an integer constant to a tagged smi, giving a tagged smi as result,
- // or jumping to a label if the result cannot be represented by a smi.
- void SmiAddConstant(Register dst,
- Register src,
- Smi* constant,
- Label* on_not_smi_result,
- Label::Distance near_jump = Label::kFar);
-
- // Subtract an integer constant from a tagged smi, giving a tagged smi as
- // result. No testing on the result is done. Sets the N and Z flags
- // based on the value of the resulting integer.
- void SmiSubConstant(Register dst, Register src, Smi* constant);
-
- // Subtract an integer constant from a tagged smi, giving a tagged smi as
- // result, or jumping to a label if the result cannot be represented by a smi.
- void SmiSubConstant(Register dst,
- Register src,
- Smi* constant,
- Label* on_not_smi_result,
- Label::Distance near_jump = Label::kFar);
-
- // Negating a smi can give a negative zero or too large positive value.
- // NOTICE: This operation jumps on success, not failure!
- void SmiNeg(Register dst,
- Register src,
- Label* on_smi_result,
- Label::Distance near_jump = Label::kFar);
-
- // Adds smi values and return the result as a smi.
- // If dst is src1, then src1 will be destroyed, even if
- // the operation is unsuccessful.
- void SmiAdd(Register dst,
- Register src1,
- Register src2,
- Label* on_not_smi_result,
- Label::Distance near_jump = Label::kFar);
- void SmiAdd(Register dst,
- Register src1,
- const Operand& src2,
- Label* on_not_smi_result,
- Label::Distance near_jump = Label::kFar);
-
- void SmiAdd(Register dst,
- Register src1,
- Register src2);
-
- // Subtracts smi values and return the result as a smi.
- // If dst is src1, then src1 will be destroyed, even if
- // the operation is unsuccessful.
- void SmiSub(Register dst,
- Register src1,
- Register src2,
- Label* on_not_smi_result,
- Label::Distance near_jump = Label::kFar);
-
- void SmiSub(Register dst,
- Register src1,
- Register src2);
-
- void SmiSub(Register dst,
- Register src1,
- const Operand& src2,
- Label* on_not_smi_result,
- Label::Distance near_jump = Label::kFar);
-
- void SmiSub(Register dst,
- Register src1,
- const Operand& src2);
-
- // Multiplies smi values and return the result as a smi,
- // if possible.
- // If dst is src1, then src1 will be destroyed, even if
- // the operation is unsuccessful.
- void SmiMul(Register dst,
- Register src1,
- Register src2,
- Label* on_not_smi_result,
- Label::Distance near_jump = Label::kFar);
-
- // Divides one smi by another and returns the quotient.
- // Clobbers rax and rdx registers.
- void SmiDiv(Register dst,
- Register src1,
- Register src2,
- Label* on_not_smi_result,
- Label::Distance near_jump = Label::kFar);
-
- // Divides one smi by another and returns the remainder.
- // Clobbers rax and rdx registers.
- void SmiMod(Register dst,
- Register src1,
- Register src2,
- Label* on_not_smi_result,
- Label::Distance near_jump = Label::kFar);
-
- // Bitwise operations.
- void SmiNot(Register dst, Register src);
- void SmiAnd(Register dst, Register src1, Register src2);
- void SmiOr(Register dst, Register src1, Register src2);
- void SmiXor(Register dst, Register src1, Register src2);
- void SmiAndConstant(Register dst, Register src1, Smi* constant);
- void SmiOrConstant(Register dst, Register src1, Smi* constant);
- void SmiXorConstant(Register dst, Register src1, Smi* constant);
-
- void SmiShiftLeftConstant(Register dst,
- Register src,
- int shift_value);
- void SmiShiftLogicalRightConstant(Register dst,
- Register src,
- int shift_value,
- Label* on_not_smi_result,
- Label::Distance near_jump = Label::kFar);
- void SmiShiftArithmeticRightConstant(Register dst,
- Register src,
- int shift_value);
-
- // Shifts a smi value to the left, and returns the result if that is a smi.
- // Uses and clobbers rcx, so dst may not be rcx.
- void SmiShiftLeft(Register dst,
- Register src1,
- Register src2);
- // Shifts a smi value to the right, shifting in zero bits at the top, and
- // returns the unsigned intepretation of the result if that is a smi.
- // Uses and clobbers rcx, so dst may not be rcx.
- void SmiShiftLogicalRight(Register dst,
- Register src1,
- Register src2,
- Label* on_not_smi_result,
- Label::Distance near_jump = Label::kFar);
- // Shifts a smi value to the right, sign extending the top, and
- // returns the signed intepretation of the result. That will always
- // be a valid smi value, since it's numerically smaller than the
- // original.
- // Uses and clobbers rcx, so dst may not be rcx.
- void SmiShiftArithmeticRight(Register dst,
- Register src1,
- Register src2);
-
- // Specialized operations
-
- // Select the non-smi register of two registers where exactly one is a
- // smi. If neither are smis, jump to the failure label.
- void SelectNonSmi(Register dst,
- Register src1,
- Register src2,
- Label* on_not_smis,
- Label::Distance near_jump = Label::kFar);
-
- // Converts, if necessary, a smi to a combination of number and
- // multiplier to be used as a scaled index.
- // The src register contains a *positive* smi value. The shift is the
- // power of two to multiply the index value by (e.g.
- // to index by smi-value * kPointerSize, pass the smi and kPointerSizeLog2).
- // The returned index register may be either src or dst, depending
- // on what is most efficient. If src and dst are different registers,
- // src is always unchanged.
- SmiIndex SmiToIndex(Register dst, Register src, int shift);
-
- // Converts a positive smi to a negative index.
- SmiIndex SmiToNegativeIndex(Register dst, Register src, int shift);
-
- // Add the value of a smi in memory to an int32 register.
- // Sets flags as a normal add.
- void AddSmiField(Register dst, const Operand& src);
-
- // Basic Smi operations.
- void Move(Register dst, Smi* source) {
- LoadSmiConstant(dst, source);
- }
-
- void Move(const Operand& dst, Smi* source) {
- Register constant = GetSmiConstant(source);
- movq(dst, constant);
- }
-
- void Push(Smi* smi);
- void Test(const Operand& dst, Smi* source);
-
-
- // ---------------------------------------------------------------------------
- // String macros.
-
- // If object is a string, its map is loaded into object_map.
- void JumpIfNotString(Register object,
- Register object_map,
- Label* not_string,
- Label::Distance near_jump = Label::kFar);
-
-
- void JumpIfNotBothSequentialAsciiStrings(
- Register first_object,
- Register second_object,
- Register scratch1,
- Register scratch2,
- Label* on_not_both_flat_ascii,
- Label::Distance near_jump = Label::kFar);
-
- // Check whether the instance type represents a flat ASCII string. Jump to the
- // label if not. If the instance type can be scratched specify same register
- // for both instance type and scratch.
- void JumpIfInstanceTypeIsNotSequentialAscii(
- Register instance_type,
- Register scratch,
- Label*on_not_flat_ascii_string,
- Label::Distance near_jump = Label::kFar);
-
- void JumpIfBothInstanceTypesAreNotSequentialAscii(
- Register first_object_instance_type,
- Register second_object_instance_type,
- Register scratch1,
- Register scratch2,
- Label* on_fail,
- Label::Distance near_jump = Label::kFar);
-
- // ---------------------------------------------------------------------------
- // Macro instructions.
-
- // Load a register with a long value as efficiently as possible.
- void Set(Register dst, int64_t x);
- void Set(const Operand& dst, int64_t x);
-
- // Move if the registers are not identical.
- void Move(Register target, Register source);
-
- // Support for constant splitting.
- bool IsUnsafeInt(const int x);
- void SafeMove(Register dst, Smi* src);
- void SafePush(Smi* src);
-
- // Bit-field support.
- void TestBit(const Operand& dst, int bit_index);
-
- // Handle support
- void Move(Register dst, Handle<Object> source);
- void Move(const Operand& dst, Handle<Object> source);
- void Cmp(Register dst, Handle<Object> source);
- void Cmp(const Operand& dst, Handle<Object> source);
- void Cmp(Register dst, Smi* src);
- void Cmp(const Operand& dst, Smi* src);
- void Push(Handle<Object> source);
-
- // Load a heap object and handle the case of new-space objects by
- // indirecting via a global cell.
- void LoadHeapObject(Register result, Handle<HeapObject> object);
- void PushHeapObject(Handle<HeapObject> object);
-
- void LoadObject(Register result, Handle<Object> object) {
- if (object->IsHeapObject()) {
- LoadHeapObject(result, Handle<HeapObject>::cast(object));
- } else {
- Move(result, object);
- }
- }
-
- // Load a global cell into a register.
- void LoadGlobalCell(Register dst, Handle<JSGlobalPropertyCell> cell);
-
- // Emit code to discard a non-negative number of pointer-sized elements
- // from the stack, clobbering only the rsp register.
- void Drop(int stack_elements);
-
- void Call(Label* target) { call(target); }
-
- // Control Flow
- void Jump(Address destination, RelocInfo::Mode rmode);
- void Jump(ExternalReference ext);
- void Jump(Handle<Code> code_object, RelocInfo::Mode rmode);
-
- void Call(Address destination, RelocInfo::Mode rmode);
- void Call(ExternalReference ext);
- void Call(Handle<Code> code_object,
- RelocInfo::Mode rmode,
- TypeFeedbackId ast_id = TypeFeedbackId::None());
-
- // The size of the code generated for different call instructions.
- int CallSize(Address destination, RelocInfo::Mode rmode) {
- return kCallInstructionLength;
- }
- int CallSize(ExternalReference ext);
- int CallSize(Handle<Code> code_object) {
- // Code calls use 32-bit relative addressing.
- return kShortCallInstructionLength;
- }
- int CallSize(Register target) {
- // Opcode: REX_opt FF /2 m64
- return (target.high_bit() != 0) ? 3 : 2;
- }
- int CallSize(const Operand& target) {
- // Opcode: REX_opt FF /2 m64
- return (target.requires_rex() ? 2 : 1) + target.operand_size();
- }
-
- // Emit call to the code we are currently generating.
- void CallSelf() {
- Handle<Code> self(reinterpret_cast<Code**>(CodeObject().location()));
- Call(self, RelocInfo::CODE_TARGET);
- }
-
- // Non-x64 instructions.
- // Push/pop all general purpose registers.
- // Does not push rsp/rbp nor any of the assembler's special purpose registers
- // (kScratchRegister, kSmiConstantRegister, kRootRegister).
- void Pushad();
- void Popad();
- // Sets the stack as after performing Popad, without actually loading the
- // registers.
- void Dropad();
-
- // Compare object type for heap object.
- // Always use unsigned comparisons: above and below, not less and greater.
- // Incoming register is heap_object and outgoing register is map.
- // They may be the same register, and may be kScratchRegister.
- void CmpObjectType(Register heap_object, InstanceType type, Register map);
-
- // Compare instance type for map.
- // Always use unsigned comparisons: above and below, not less and greater.
- void CmpInstanceType(Register map, InstanceType type);
-
- // Check if a map for a JSObject indicates that the object has fast elements.
- // Jump to the specified label if it does not.
- void CheckFastElements(Register map,
- Label* fail,
- Label::Distance distance = Label::kFar);
-
- // Check if a map for a JSObject indicates that the object can have both smi
- // and HeapObject elements. Jump to the specified label if it does not.
- void CheckFastObjectElements(Register map,
- Label* fail,
- Label::Distance distance = Label::kFar);
-
- // Check if a map for a JSObject indicates that the object has fast smi only
- // elements. Jump to the specified label if it does not.
- void CheckFastSmiElements(Register map,
- Label* fail,
- Label::Distance distance = Label::kFar);
-
- // Check to see if maybe_number can be stored as a double in
- // FastDoubleElements. If it can, store it at the index specified by index in
- // the FastDoubleElements array elements, otherwise jump to fail. Note that
- // index must not be smi-tagged.
- void StoreNumberToDoubleElements(Register maybe_number,
- Register elements,
- Register index,
- XMMRegister xmm_scratch,
- Label* fail,
- int elements_offset = 0);
-
- // Compare an object's map with the specified map and its transitioned
- // elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. FLAGS are set with
- // result of map compare. If multiple map compares are required, the compare
- // sequences branches to early_success.
- void CompareMap(Register obj,
- Handle<Map> map,
- Label* early_success,
- CompareMapMode mode = REQUIRE_EXACT_MAP);
-
- // Check if the map of an object is equal to a specified map and branch to
- // label if not. Skip the smi check if not required (object is known to be a
- // heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match
- // against maps that are ElementsKind transition maps of the specified map.
- void CheckMap(Register obj,
- Handle<Map> map,
- Label* fail,
- SmiCheckType smi_check_type,
- CompareMapMode mode = REQUIRE_EXACT_MAP);
-
- // Check if the map of an object is equal to a specified map and branch to a
- // specified target if equal. Skip the smi check if not required (object is
- // known to be a heap object)
- void DispatchMap(Register obj,
- Handle<Map> map,
- Handle<Code> success,
- SmiCheckType smi_check_type);
-
- // Check if the object in register heap_object is a string. Afterwards the
- // register map contains the object map and the register instance_type
- // contains the instance_type. The registers map and instance_type can be the
- // same in which case it contains the instance type afterwards. Either of the
- // registers map and instance_type can be the same as heap_object.
- Condition IsObjectStringType(Register heap_object,
- Register map,
- Register instance_type);
-
- // FCmp compares and pops the two values on top of the FPU stack.
- // The flag results are similar to integer cmp, but requires unsigned
- // jcc instructions (je, ja, jae, jb, jbe, je, and jz).
- void FCmp();
-
- void ClampUint8(Register reg);
-
- void ClampDoubleToUint8(XMMRegister input_reg,
- XMMRegister temp_xmm_reg,
- Register result_reg);
-
- void LoadUint32(XMMRegister dst, Register src, XMMRegister scratch);
-
- void LoadInstanceDescriptors(Register map, Register descriptors);
- void EnumLength(Register dst, Register map);
- void NumberOfOwnDescriptors(Register dst, Register map);
-
- template<typename Field>
- void DecodeField(Register reg) {
- static const int shift = Field::kShift + kSmiShift;
- static const int mask = Field::kMask >> Field::kShift;
- shr(reg, Immediate(shift));
- and_(reg, Immediate(mask));
- shl(reg, Immediate(kSmiShift));
- }
-
- // Abort execution if argument is not a number, enabled via --debug-code.
- void AssertNumber(Register object);
-
- // Abort execution if argument is a smi, enabled via --debug-code.
- void AssertNotSmi(Register object);
-
- // Abort execution if argument is not a smi, enabled via --debug-code.
- void AssertSmi(Register object);
- void AssertSmi(const Operand& object);
-
- // Abort execution if a 64 bit register containing a 32 bit payload does not
- // have zeros in the top 32 bits, enabled via --debug-code.
- void AssertZeroExtended(Register reg);
-
- // Abort execution if argument is not a string, enabled via --debug-code.
- void AssertString(Register object);
-
- // Abort execution if argument is not the root value with the given index,
- // enabled via --debug-code.
- void AssertRootValue(Register src,
- Heap::RootListIndex root_value_index,
- const char* message);
-
- // ---------------------------------------------------------------------------
- // Exception handling
-
- // Push a new try handler and link it into try handler chain.
- void PushTryHandler(StackHandler::Kind kind, int handler_index);
-
- // Unlink the stack handler on top of the stack from the try handler chain.
- void PopTryHandler();
-
- // Activate the top handler in the try hander chain and pass the
- // thrown value.
- void Throw(Register value);
-
- // Propagate an uncatchable exception out of the current JS stack.
- void ThrowUncatchable(Register value);
-
- // ---------------------------------------------------------------------------
- // Inline caching support
-
- // Generate code for checking access rights - used for security checks
- // on access to global objects across environments. The holder register
- // is left untouched, but the scratch register and kScratchRegister,
- // which must be different, are clobbered.
- void CheckAccessGlobalProxy(Register holder_reg,
- Register scratch,
- Label* miss);
-
- void GetNumberHash(Register r0, Register scratch);
-
- void LoadFromNumberDictionary(Label* miss,
- Register elements,
- Register key,
- Register r0,
- Register r1,
- Register r2,
- Register result);
-
-
- // ---------------------------------------------------------------------------
- // Allocation support
-
- // Allocate an object in new space. If the new space is exhausted control
- // continues at the gc_required label. The allocated object is returned in
- // result and end of the new object is returned in result_end. The register
- // scratch can be passed as no_reg in which case an additional object
- // reference will be added to the reloc info. The returned pointers in result
- // and result_end have not yet been tagged as heap objects. If
- // result_contains_top_on_entry is true the content of result is known to be
- // the allocation top on entry (could be result_end from a previous call to
- // AllocateInNewSpace). If result_contains_top_on_entry is true scratch
- // should be no_reg as it is never used.
- void AllocateInNewSpace(int object_size,
- Register result,
- Register result_end,
- Register scratch,
- Label* gc_required,
- AllocationFlags flags);
-
- void AllocateInNewSpace(int header_size,
- ScaleFactor element_size,
- Register element_count,
- Register result,
- Register result_end,
- Register scratch,
- Label* gc_required,
- AllocationFlags flags);
-
- void AllocateInNewSpace(Register object_size,
- Register result,
- Register result_end,
- Register scratch,
- Label* gc_required,
- AllocationFlags flags);
-
- // Undo allocation in new space. The object passed and objects allocated after
- // it will no longer be allocated. Make sure that no pointers are left to the
- // object(s) no longer allocated as they would be invalid when allocation is
- // un-done.
- void UndoAllocationInNewSpace(Register object);
-
- // Allocate a heap number in new space with undefined value. Returns
- // tagged pointer in result register, or jumps to gc_required if new
- // space is full.
- void AllocateHeapNumber(Register result,
- Register scratch,
- Label* gc_required);
-
- // Allocate a sequential string. All the header fields of the string object
- // are initialized.
- void AllocateTwoByteString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* gc_required);
- void AllocateAsciiString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* gc_required);
-
- // Allocate a raw cons string object. Only the map field of the result is
- // initialized.
- void AllocateTwoByteConsString(Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required);
- void AllocateAsciiConsString(Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required);
-
- // Allocate a raw sliced string object. Only the map field of the result is
- // initialized.
- void AllocateTwoByteSlicedString(Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required);
- void AllocateAsciiSlicedString(Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required);
-
- // ---------------------------------------------------------------------------
- // Support functions.
-
- // Check if result is zero and op is negative.
- void NegativeZeroTest(Register result, Register op, Label* then_label);
-
- // Check if result is zero and op is negative in code using jump targets.
- void NegativeZeroTest(CodeGenerator* cgen,
- Register result,
- Register op,
- JumpTarget* then_target);
-
- // Check if result is zero and any of op1 and op2 are negative.
- // Register scratch is destroyed, and it must be different from op2.
- void NegativeZeroTest(Register result, Register op1, Register op2,
- Register scratch, Label* then_label);
-
- // Try to get function prototype of a function and puts the value in
- // the result register. Checks that the function really is a
- // function and jumps to the miss label if the fast checks fail. The
- // function register will be untouched; the other register may be
- // clobbered.
- void TryGetFunctionPrototype(Register function,
- Register result,
- Label* miss,
- bool miss_on_bound_function = false);
-
- // Generates code for reporting that an illegal operation has
- // occurred.
- void IllegalOperation(int num_arguments);
-
- // Picks out an array index from the hash field.
- // Register use:
- // hash - holds the index's hash. Clobbered.
- // index - holds the overwritten index on exit.
- void IndexFromHash(Register hash, Register index);
-
- // Find the function context up the context chain.
- void LoadContext(Register dst, int context_chain_length);
-
- // Conditionally load the cached Array transitioned map of type
- // transitioned_kind from the native context if the map in register
- // map_in_out is the cached Array map in the native context of
- // expected_kind.
- void LoadTransitionedArrayMapConditional(
- ElementsKind expected_kind,
- ElementsKind transitioned_kind,
- Register map_in_out,
- Register scratch,
- Label* no_map_match);
-
- // Load the initial map for new Arrays from a JSFunction.
- void LoadInitialArrayMap(Register function_in,
- Register scratch,
- Register map_out,
- bool can_have_holes);
-
- // Load the global function with the given index.
- void LoadGlobalFunction(int index, Register function);
- void LoadArrayFunction(Register function);
-
- // Load the initial map from the global function. The registers
- // function and map can be the same.
- void LoadGlobalFunctionInitialMap(Register function, Register map);
-
- // ---------------------------------------------------------------------------
- // Runtime calls
-
- // Call a code stub.
- void CallStub(CodeStub* stub, TypeFeedbackId ast_id = TypeFeedbackId::None());
-
- // Tail call a code stub (jump).
- void TailCallStub(CodeStub* stub);
-
- // Return from a code stub after popping its arguments.
- void StubReturn(int argc);
-
- // Call a runtime routine.
- void CallRuntime(const Runtime::Function* f, int num_arguments);
-
- // Call a runtime function and save the value of XMM registers.
- void CallRuntimeSaveDoubles(Runtime::FunctionId id);
-
- // Convenience function: Same as above, but takes the fid instead.
- void CallRuntime(Runtime::FunctionId id, int num_arguments);
-
- // Convenience function: call an external reference.
- void CallExternalReference(const ExternalReference& ext,
- int num_arguments);
-
- // Tail call of a runtime routine (jump).
- // Like JumpToExternalReference, but also takes care of passing the number
- // of parameters.
- void TailCallExternalReference(const ExternalReference& ext,
- int num_arguments,
- int result_size);
-
- // Convenience function: tail call a runtime routine (jump).
- void TailCallRuntime(Runtime::FunctionId fid,
- int num_arguments,
- int result_size);
-
- // Jump to a runtime routine.
- void JumpToExternalReference(const ExternalReference& ext, int result_size);
-
- // Prepares stack to put arguments (aligns and so on). WIN64 calling
- // convention requires to put the pointer to the return value slot into
- // rcx (rcx must be preserverd until CallApiFunctionAndReturn). Saves
- // context (rsi). Clobbers rax. Allocates arg_stack_space * kPointerSize
- // inside the exit frame (not GCed) accessible via StackSpaceOperand.
- void PrepareCallApiFunction(int arg_stack_space);
-
- // Calls an API function. Allocates HandleScope, extracts returned value
- // from handle and propagates exceptions. Clobbers r14, r15, rbx and
- // caller-save registers. Restores context. On return removes
- // stack_space * kPointerSize (GCed).
- void CallApiFunctionAndReturn(Address function_address, int stack_space);
-
- // Before calling a C-function from generated code, align arguments on stack.
- // After aligning the frame, arguments must be stored in esp[0], esp[4],
- // etc., not pushed. The argument count assumes all arguments are word sized.
- // The number of slots reserved for arguments depends on platform. On Windows
- // stack slots are reserved for the arguments passed in registers. On other
- // platforms stack slots are only reserved for the arguments actually passed
- // on the stack.
- void PrepareCallCFunction(int num_arguments);
-
- // Calls a C function and cleans up the space for arguments allocated
- // by PrepareCallCFunction. The called function is not allowed to trigger a
- // garbage collection, since that might move the code and invalidate the
- // return address (unless this is somehow accounted for by the called
- // function).
- void CallCFunction(ExternalReference function, int num_arguments);
- void CallCFunction(Register function, int num_arguments);
-
- // Calculate the number of stack slots to reserve for arguments when calling a
- // C function.
- int ArgumentStackSlotsForCFunctionCall(int num_arguments);
-
- // ---------------------------------------------------------------------------
- // Utilities
-
- void Ret();
-
- // Return and drop arguments from stack, where the number of arguments
- // may be bigger than 2^16 - 1. Requires a scratch register.
- void Ret(int bytes_dropped, Register scratch);
-
- Handle<Object> CodeObject() {
- ASSERT(!code_object_.is_null());
- return code_object_;
- }
-
- // Copy length bytes from source to destination.
- // Uses scratch register internally (if you have a low-eight register
- // free, do use it, otherwise kScratchRegister will be used).
- // The min_length is a minimum limit on the value that length will have.
- // The algorithm has some special cases that might be omitted if the string
- // is known to always be long.
- void CopyBytes(Register destination,
- Register source,
- Register length,
- int min_length = 0,
- Register scratch = kScratchRegister);
-
- // Initialize fields with filler values. Fields starting at |start_offset|
- // not including end_offset are overwritten with the value in |filler|. At
- // the end the loop, |start_offset| takes the value of |end_offset|.
- void InitializeFieldsWithFiller(Register start_offset,
- Register end_offset,
- Register filler);
-
-
- // ---------------------------------------------------------------------------
- // StatsCounter support
-
- void SetCounter(StatsCounter* counter, int value);
- void IncrementCounter(StatsCounter* counter, int value);
- void DecrementCounter(StatsCounter* counter, int value);
-
-
- // ---------------------------------------------------------------------------
- // Debugging
-
- // Calls Abort(msg) if the condition cc is not satisfied.
- // Use --debug_code to enable.
- void Assert(Condition cc, const char* msg);
-
- void AssertFastElements(Register elements);
-
- // Like Assert(), but always enabled.
- void Check(Condition cc, const char* msg);
-
- // Print a message to stdout and abort execution.
- void Abort(const char* msg);
-
- // Check that the stack is aligned.
- void CheckStackAlignment();
-
- // Verify restrictions about code generated in stubs.
- void set_generating_stub(bool value) { generating_stub_ = value; }
- bool generating_stub() { return generating_stub_; }
- void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; }
- bool allow_stub_calls() { return allow_stub_calls_; }
- void set_has_frame(bool value) { has_frame_ = value; }
- bool has_frame() { return has_frame_; }
- inline bool AllowThisStubCall(CodeStub* stub);
-
- static int SafepointRegisterStackIndex(Register reg) {
- return SafepointRegisterStackIndex(reg.code());
- }
-
- // Activation support.
- void EnterFrame(StackFrame::Type type);
- void LeaveFrame(StackFrame::Type type);
-
- // Expects object in rax and returns map with validated enum cache
- // in rax. Assumes that any other register can be used as a scratch.
- void CheckEnumCache(Register null_value,
- Label* call_runtime);
-
- // AllocationSiteInfo support. Arrays may have an associated
- // AllocationSiteInfo object that can be checked for in order to pretransition
- // to another type.
- // On entry, receiver_reg should point to the array object.
- // scratch_reg gets clobbered.
- // If allocation info is present, condition flags are set to equal
- void TestJSArrayForAllocationSiteInfo(Register receiver_reg,
- Register scratch_reg);
-
- private:
- // Order general registers are pushed by Pushad.
- // rax, rcx, rdx, rbx, rsi, rdi, r8, r9, r11, r14, r15.
- static const int kSafepointPushRegisterIndices[Register::kNumRegisters];
- static const int kNumSafepointSavedRegisters = 11;
- static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
-
- bool generating_stub_;
- bool allow_stub_calls_;
- bool has_frame_;
- bool root_array_available_;
-
- // Returns a register holding the smi value. The register MUST NOT be
- // modified. It may be the "smi 1 constant" register.
- Register GetSmiConstant(Smi* value);
-
- intptr_t RootRegisterDelta(ExternalReference other);
-
- // Moves the smi value to the destination register.
- void LoadSmiConstant(Register dst, Smi* value);
-
- // This handle will be patched with the code object on installation.
- Handle<Object> code_object_;
-
- // Helper functions for generating invokes.
- void InvokePrologue(const ParameterCount& expected,
- const ParameterCount& actual,
- Handle<Code> code_constant,
- Register code_register,
- Label* done,
- bool* definitely_mismatches,
- InvokeFlag flag,
- Label::Distance near_jump = Label::kFar,
- const CallWrapper& call_wrapper = NullCallWrapper(),
- CallKind call_kind = CALL_AS_METHOD);
-
- void EnterExitFramePrologue(bool save_rax);
-
- // Allocates arg_stack_space * kPointerSize memory (not GCed) on the stack
- // accessible via StackSpaceOperand.
- void EnterExitFrameEpilogue(int arg_stack_space, bool save_doubles);
-
- void LeaveExitFrameEpilogue();
-
- // Allocation support helpers.
- // Loads the top of new-space into the result register.
- // Otherwise the address of the new-space top is loaded into scratch (if
- // scratch is valid), and the new-space top is loaded into result.
- void LoadAllocationTopHelper(Register result,
- Register scratch,
- AllocationFlags flags);
- // Update allocation top with value in result_end register.
- // If scratch is valid, it contains the address of the allocation top.
- void UpdateAllocationTopHelper(Register result_end, Register scratch);
-
- // Helper for PopHandleScope. Allowed to perform a GC and returns
- // NULL if gc_allowed. Does not perform a GC if !gc_allowed, and
- // possibly returns a failure object indicating an allocation failure.
- Object* PopHandleScopeHelper(Register saved,
- Register scratch,
- bool gc_allowed);
-
- // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
- void InNewSpace(Register object,
- Register scratch,
- Condition cc,
- Label* branch,
- Label::Distance distance = Label::kFar);
-
- // Helper for finding the mark bits for an address. Afterwards, the
- // bitmap register points at the word with the mark bits and the mask
- // the position of the first bit. Uses rcx as scratch and leaves addr_reg
- // unchanged.
- inline void GetMarkBits(Register addr_reg,
- Register bitmap_reg,
- Register mask_reg);
-
- // Helper for throwing exceptions. Compute a handler address and jump to
- // it. See the implementation for register usage.
- void JumpToHandlerEntry();
-
- // Compute memory operands for safepoint stack slots.
- Operand SafepointRegisterSlot(Register reg);
- static int SafepointRegisterStackIndex(int reg_code) {
- return kNumSafepointRegisters - kSafepointPushRegisterIndices[reg_code] - 1;
- }
-
- // Needs access to SafepointRegisterStackIndex for compiled frame
- // traversal.
- friend class StandardFrame;
-};
-
-
-// The code patcher is used to patch (typically) small parts of code e.g. for
-// debugging and other types of instrumentation. When using the code patcher
-// the exact number of bytes specified must be emitted. Is not legal to emit
-// relocation information. If any of these constraints are violated it causes
-// an assertion.
-class CodePatcher {
- public:
- CodePatcher(byte* address, int size);
- virtual ~CodePatcher();
-
- // Macro assembler to emit code.
- MacroAssembler* masm() { return &masm_; }
-
- private:
- byte* address_; // The address of the code being patched.
- int size_; // Number of bytes of the expected patch size.
- MacroAssembler masm_; // Macro assembler used to generate the code.
-};
-
-
-// -----------------------------------------------------------------------------
-// Static helper functions.
-
-// Generate an Operand for loading a field from an object.
-inline Operand FieldOperand(Register object, int offset) {
- return Operand(object, offset - kHeapObjectTag);
-}
-
-
-// Generate an Operand for loading an indexed field from an object.
-inline Operand FieldOperand(Register object,
- Register index,
- ScaleFactor scale,
- int offset) {
- return Operand(object, index, scale, offset - kHeapObjectTag);
-}
-
-
-inline Operand ContextOperand(Register context, int index) {
- return Operand(context, Context::SlotOffset(index));
-}
-
-
-inline Operand GlobalObjectOperand() {
- return ContextOperand(rsi, Context::GLOBAL_OBJECT_INDEX);
-}
-
-
-static inline Operand QmlGlobalObjectOperand() {
- return ContextOperand(rsi, Context::QML_GLOBAL_OBJECT_INDEX);
-}
-
-
-// Provides access to exit frame stack space (not GCed).
-inline Operand StackSpaceOperand(int index) {
-#ifdef _WIN64
- const int kShaddowSpace = 4;
- return Operand(rsp, (index + kShaddowSpace) * kPointerSize);
-#else
- return Operand(rsp, index * kPointerSize);
-#endif
-}
-
-
-
-#ifdef GENERATED_CODE_COVERAGE
-extern void LogGeneratedCodeCoverage(const char* file_line);
-#define CODE_COVERAGE_STRINGIFY(x) #x
-#define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
-#define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__)
-#define ACCESS_MASM(masm) { \
- Address x64_coverage_function = FUNCTION_ADDR(LogGeneratedCodeCoverage); \
- masm->pushfq(); \
- masm->Pushad(); \
- masm->push(Immediate(reinterpret_cast<int>(&__FILE_LINE__))); \
- masm->Call(x64_coverage_function, RelocInfo::EXTERNAL_REFERENCE); \
- masm->pop(rax); \
- masm->Popad(); \
- masm->popfq(); \
- } \
- masm->
-#else
-#define ACCESS_MASM(masm) masm->
-#endif
-
-} } // namespace v8::internal
-
-#endif // V8_X64_MACRO_ASSEMBLER_X64_H_
diff --git a/src/3rdparty/v8/src/x64/regexp-macro-assembler-x64.cc b/src/3rdparty/v8/src/x64/regexp-macro-assembler-x64.cc
deleted file mode 100644
index f5b5e95..0000000
--- a/src/3rdparty/v8/src/x64/regexp-macro-assembler-x64.cc
+++ /dev/null
@@ -1,1553 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_X64)
-
-#include "serialize.h"
-#include "unicode.h"
-#include "log.h"
-#include "regexp-stack.h"
-#include "macro-assembler.h"
-#include "regexp-macro-assembler.h"
-#include "x64/regexp-macro-assembler-x64.h"
-
-namespace v8 {
-namespace internal {
-
-#ifndef V8_INTERPRETED_REGEXP
-
-/*
- * This assembler uses the following register assignment convention
- * - rdx : Currently loaded character(s) as ASCII or UC16. Must be loaded
- * using LoadCurrentCharacter before using any of the dispatch methods.
- * Temporarily stores the index of capture start after a matching pass
- * for a global regexp.
- * - rdi : Current position in input, as negative offset from end of string.
- * Please notice that this is the byte offset, not the character
- * offset! Is always a 32-bit signed (negative) offset, but must be
- * maintained sign-extended to 64 bits, since it is used as index.
- * - rsi : End of input (points to byte after last character in input),
- * so that rsi+rdi points to the current character.
- * - rbp : Frame pointer. Used to access arguments, local variables and
- * RegExp registers.
- * - rsp : Points to tip of C stack.
- * - rcx : Points to tip of backtrack stack. The backtrack stack contains
- * only 32-bit values. Most are offsets from some base (e.g., character
- * positions from end of string or code location from Code* pointer).
- * - r8 : Code object pointer. Used to convert between absolute and
- * code-object-relative addresses.
- *
- * The registers rax, rbx, r9 and r11 are free to use for computations.
- * If changed to use r12+, they should be saved as callee-save registers.
- * The macro assembler special registers r12 and r13 (kSmiConstantRegister,
- * kRootRegister) aren't special during execution of RegExp code (they don't
- * hold the values assumed when creating JS code), so no Smi or Root related
- * macro operations can be used.
- *
- * Each call to a C++ method should retain these registers.
- *
- * The stack will have the following content, in some order, indexable from the
- * frame pointer (see, e.g., kStackHighEnd):
- * - Isolate* isolate (address of the current isolate)
- * - direct_call (if 1, direct call from JavaScript code, if 0 call
- * through the runtime system)
- * - stack_area_base (high end of the memory area to use as
- * backtracking stack)
- * - capture array size (may fit multiple sets of matches)
- * - int* capture_array (int[num_saved_registers_], for output).
- * - end of input (address of end of string)
- * - start of input (address of first character in string)
- * - start index (character index of start)
- * - String* input_string (input string)
- * - return address
- * - backup of callee save registers (rbx, possibly rsi and rdi).
- * - success counter (only useful for global regexp to count matches)
- * - Offset of location before start of input (effectively character
- * position -1). Used to initialize capture registers to a non-position.
- * - At start of string (if 1, we are starting at the start of the
- * string, otherwise 0)
- * - register 0 rbp[-n] (Only positions must be stored in the first
- * - register 1 rbp[-n-8] num_saved_registers_ registers)
- * - ...
- *
- * The first num_saved_registers_ registers are initialized to point to
- * "character -1" in the string (i.e., char_size() bytes before the first
- * character of the string). The remaining registers starts out uninitialized.
- *
- * The first seven values must be provided by the calling code by
- * calling the code's entry address cast to a function pointer with the
- * following signature:
- * int (*match)(String* input_string,
- * int start_index,
- * Address start,
- * Address end,
- * int* capture_output_array,
- * bool at_start,
- * byte* stack_area_base,
- * bool direct_call)
- */
-
-#define __ ACCESS_MASM((&masm_))
-
-RegExpMacroAssemblerX64::RegExpMacroAssemblerX64(
- Mode mode,
- int registers_to_save,
- Zone* zone)
- : NativeRegExpMacroAssembler(zone),
- masm_(Isolate::Current(), NULL, kRegExpCodeSize),
- no_root_array_scope_(&masm_),
- code_relative_fixup_positions_(4, zone),
- mode_(mode),
- num_registers_(registers_to_save),
- num_saved_registers_(registers_to_save),
- entry_label_(),
- start_label_(),
- success_label_(),
- backtrack_label_(),
- exit_label_() {
- ASSERT_EQ(0, registers_to_save % 2);
- __ jmp(&entry_label_); // We'll write the entry code when we know more.
- __ bind(&start_label_); // And then continue from here.
-}
-
-
-RegExpMacroAssemblerX64::~RegExpMacroAssemblerX64() {
- // Unuse labels in case we throw away the assembler without calling GetCode.
- entry_label_.Unuse();
- start_label_.Unuse();
- success_label_.Unuse();
- backtrack_label_.Unuse();
- exit_label_.Unuse();
- check_preempt_label_.Unuse();
- stack_overflow_label_.Unuse();
-}
-
-
-int RegExpMacroAssemblerX64::stack_limit_slack() {
- return RegExpStack::kStackLimitSlack;
-}
-
-
-void RegExpMacroAssemblerX64::AdvanceCurrentPosition(int by) {
- if (by != 0) {
- __ addq(rdi, Immediate(by * char_size()));
- }
-}
-
-
-void RegExpMacroAssemblerX64::AdvanceRegister(int reg, int by) {
- ASSERT(reg >= 0);
- ASSERT(reg < num_registers_);
- if (by != 0) {
- __ addq(register_location(reg), Immediate(by));
- }
-}
-
-
-void RegExpMacroAssemblerX64::Backtrack() {
- CheckPreemption();
- // Pop Code* offset from backtrack stack, add Code* and jump to location.
- Pop(rbx);
- __ addq(rbx, code_object_pointer());
- __ jmp(rbx);
-}
-
-
-void RegExpMacroAssemblerX64::Bind(Label* label) {
- __ bind(label);
-}
-
-
-void RegExpMacroAssemblerX64::CheckCharacter(uint32_t c, Label* on_equal) {
- __ cmpl(current_character(), Immediate(c));
- BranchOrBacktrack(equal, on_equal);
-}
-
-
-void RegExpMacroAssemblerX64::CheckCharacterGT(uc16 limit, Label* on_greater) {
- __ cmpl(current_character(), Immediate(limit));
- BranchOrBacktrack(greater, on_greater);
-}
-
-
-void RegExpMacroAssemblerX64::CheckAtStart(Label* on_at_start) {
- Label not_at_start;
- // Did we start the match at the start of the string at all?
- __ cmpl(Operand(rbp, kStartIndex), Immediate(0));
- BranchOrBacktrack(not_equal, &not_at_start);
- // If we did, are we still at the start of the input?
- __ lea(rax, Operand(rsi, rdi, times_1, 0));
- __ cmpq(rax, Operand(rbp, kInputStart));
- BranchOrBacktrack(equal, on_at_start);
- __ bind(&not_at_start);
-}
-
-
-void RegExpMacroAssemblerX64::CheckNotAtStart(Label* on_not_at_start) {
- // Did we start the match at the start of the string at all?
- __ cmpl(Operand(rbp, kStartIndex), Immediate(0));
- BranchOrBacktrack(not_equal, on_not_at_start);
- // If we did, are we still at the start of the input?
- __ lea(rax, Operand(rsi, rdi, times_1, 0));
- __ cmpq(rax, Operand(rbp, kInputStart));
- BranchOrBacktrack(not_equal, on_not_at_start);
-}
-
-
-void RegExpMacroAssemblerX64::CheckCharacterLT(uc16 limit, Label* on_less) {
- __ cmpl(current_character(), Immediate(limit));
- BranchOrBacktrack(less, on_less);
-}
-
-
-void RegExpMacroAssemblerX64::CheckCharacters(Vector<const uc16> str,
- int cp_offset,
- Label* on_failure,
- bool check_end_of_string) {
-#ifdef DEBUG
- // If input is ASCII, don't even bother calling here if the string to
- // match contains a non-ASCII character.
- if (mode_ == ASCII) {
- ASSERT(String::IsOneByte(str.start(), str.length()));
- }
-#endif
- int byte_length = str.length() * char_size();
- int byte_offset = cp_offset * char_size();
- if (check_end_of_string) {
- // Check that there are at least str.length() characters left in the input.
- __ cmpl(rdi, Immediate(-(byte_offset + byte_length)));
- BranchOrBacktrack(greater, on_failure);
- }
-
- if (on_failure == NULL) {
- // Instead of inlining a backtrack, (re)use the global backtrack target.
- on_failure = &backtrack_label_;
- }
-
- // Do one character test first to minimize loading for the case that
- // we don't match at all (loading more than one character introduces that
- // chance of reading unaligned and reading across cache boundaries).
- // If the first character matches, expect a larger chance of matching the
- // string, and start loading more characters at a time.
- if (mode_ == ASCII) {
- __ cmpb(Operand(rsi, rdi, times_1, byte_offset),
- Immediate(static_cast<int8_t>(str[0])));
- } else {
- // Don't use 16-bit immediate. The size changing prefix throws off
- // pre-decoding.
- __ movzxwl(rax,
- Operand(rsi, rdi, times_1, byte_offset));
- __ cmpl(rax, Immediate(static_cast<int32_t>(str[0])));
- }
- BranchOrBacktrack(not_equal, on_failure);
-
- __ lea(rbx, Operand(rsi, rdi, times_1, 0));
- for (int i = 1, n = str.length(); i < n; ) {
- if (mode_ == ASCII) {
- if (i + 8 <= n) {
- uint64_t combined_chars =
- (static_cast<uint64_t>(str[i + 0]) << 0) ||
- (static_cast<uint64_t>(str[i + 1]) << 8) ||
- (static_cast<uint64_t>(str[i + 2]) << 16) ||
- (static_cast<uint64_t>(str[i + 3]) << 24) ||
- (static_cast<uint64_t>(str[i + 4]) << 32) ||
- (static_cast<uint64_t>(str[i + 5]) << 40) ||
- (static_cast<uint64_t>(str[i + 6]) << 48) ||
- (static_cast<uint64_t>(str[i + 7]) << 56);
- __ movq(rax, combined_chars, RelocInfo::NONE64);
- __ cmpq(rax, Operand(rbx, byte_offset + i));
- i += 8;
- } else if (i + 4 <= n) {
- uint32_t combined_chars =
- (static_cast<uint32_t>(str[i + 0]) << 0) ||
- (static_cast<uint32_t>(str[i + 1]) << 8) ||
- (static_cast<uint32_t>(str[i + 2]) << 16) ||
- (static_cast<uint32_t>(str[i + 3]) << 24);
- __ cmpl(Operand(rbx, byte_offset + i), Immediate(combined_chars));
- i += 4;
- } else {
- __ cmpb(Operand(rbx, byte_offset + i),
- Immediate(static_cast<int8_t>(str[i])));
- i++;
- }
- } else {
- ASSERT(mode_ == UC16);
- if (i + 4 <= n) {
- uint64_t combined_chars = *reinterpret_cast<const uint64_t*>(&str[i]);
- __ movq(rax, combined_chars, RelocInfo::NONE64);
- __ cmpq(rax,
- Operand(rsi, rdi, times_1, byte_offset + i * sizeof(uc16)));
- i += 4;
- } else if (i + 2 <= n) {
- uint32_t combined_chars = *reinterpret_cast<const uint32_t*>(&str[i]);
- __ cmpl(Operand(rsi, rdi, times_1, byte_offset + i * sizeof(uc16)),
- Immediate(combined_chars));
- i += 2;
- } else {
- __ movzxwl(rax,
- Operand(rsi, rdi, times_1, byte_offset + i * sizeof(uc16)));
- __ cmpl(rax, Immediate(str[i]));
- i++;
- }
- }
- BranchOrBacktrack(not_equal, on_failure);
- }
-}
-
-
-void RegExpMacroAssemblerX64::CheckGreedyLoop(Label* on_equal) {
- Label fallthrough;
- __ cmpl(rdi, Operand(backtrack_stackpointer(), 0));
- __ j(not_equal, &fallthrough);
- Drop();
- BranchOrBacktrack(no_condition, on_equal);
- __ bind(&fallthrough);
-}
-
-
-void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
- int start_reg,
- Label* on_no_match) {
- Label fallthrough;
- __ movq(rdx, register_location(start_reg)); // Offset of start of capture
- __ movq(rbx, register_location(start_reg + 1)); // Offset of end of capture
- __ subq(rbx, rdx); // Length of capture.
-
- // -----------------------
- // rdx = Start offset of capture.
- // rbx = Length of capture
-
- // If length is negative, this code will fail (it's a symptom of a partial or
- // illegal capture where start of capture after end of capture).
- // This must not happen (no back-reference can reference a capture that wasn't
- // closed before in the reg-exp, and we must not generate code that can cause
- // this condition).
-
- // If length is zero, either the capture is empty or it is nonparticipating.
- // In either case succeed immediately.
- __ j(equal, &fallthrough);
-
- // -----------------------
- // rdx - Start of capture
- // rbx - length of capture
- // Check that there are sufficient characters left in the input.
- __ movl(rax, rdi);
- __ addl(rax, rbx);
- BranchOrBacktrack(greater, on_no_match);
-
- if (mode_ == ASCII) {
- Label loop_increment;
- if (on_no_match == NULL) {
- on_no_match = &backtrack_label_;
- }
-
- __ lea(r9, Operand(rsi, rdx, times_1, 0));
- __ lea(r11, Operand(rsi, rdi, times_1, 0));
- __ addq(rbx, r9); // End of capture
- // ---------------------
- // r11 - current input character address
- // r9 - current capture character address
- // rbx - end of capture
-
- Label loop;
- __ bind(&loop);
- __ movzxbl(rdx, Operand(r9, 0));
- __ movzxbl(rax, Operand(r11, 0));
- // al - input character
- // dl - capture character
- __ cmpb(rax, rdx);
- __ j(equal, &loop_increment);
-
- // Mismatch, try case-insensitive match (converting letters to lower-case).
- // I.e., if or-ing with 0x20 makes values equal and in range 'a'-'z', it's
- // a match.
- __ or_(rax, Immediate(0x20)); // Convert match character to lower-case.
- __ or_(rdx, Immediate(0x20)); // Convert capture character to lower-case.
- __ cmpb(rax, rdx);
- __ j(not_equal, on_no_match); // Definitely not equal.
- __ subb(rax, Immediate('a'));
- __ cmpb(rax, Immediate('z' - 'a'));
-#ifndef ENABLE_LATIN_1
- __ j(above, on_no_match); // Weren't letters anyway.
-#else
- __ j(below_equal, &loop_increment); // In range 'a'-'z'.
- // Latin-1: Check for values in range [224,254] but not 247.
- __ subb(rax, Immediate(224 - 'a'));
- __ cmpb(rax, Immediate(254 - 224));
- __ j(above, on_no_match); // Weren't Latin-1 letters.
- __ cmpb(rax, Immediate(247 - 224)); // Check for 247.
- __ j(equal, on_no_match);
-#endif
- __ bind(&loop_increment);
- // Increment pointers into match and capture strings.
- __ addq(r11, Immediate(1));
- __ addq(r9, Immediate(1));
- // Compare to end of capture, and loop if not done.
- __ cmpq(r9, rbx);
- __ j(below, &loop);
-
- // Compute new value of character position after the matched part.
- __ movq(rdi, r11);
- __ subq(rdi, rsi);
- } else {
- ASSERT(mode_ == UC16);
- // Save important/volatile registers before calling C function.
-#ifndef _WIN64
- // Caller save on Linux and callee save in Windows.
- __ push(rsi);
- __ push(rdi);
-#endif
- __ push(backtrack_stackpointer());
-
- static const int num_arguments = 4;
- __ PrepareCallCFunction(num_arguments);
-
- // Put arguments into parameter registers. Parameters are
- // Address byte_offset1 - Address captured substring's start.
- // Address byte_offset2 - Address of current character position.
- // size_t byte_length - length of capture in bytes(!)
- // Isolate* isolate
-#ifdef _WIN64
- // Compute and set byte_offset1 (start of capture).
- __ lea(rcx, Operand(rsi, rdx, times_1, 0));
- // Set byte_offset2.
- __ lea(rdx, Operand(rsi, rdi, times_1, 0));
- // Set byte_length.
- __ movq(r8, rbx);
- // Isolate.
- __ LoadAddress(r9, ExternalReference::isolate_address());
-#else // AMD64 calling convention
- // Compute byte_offset2 (current position = rsi+rdi).
- __ lea(rax, Operand(rsi, rdi, times_1, 0));
- // Compute and set byte_offset1 (start of capture).
- __ lea(rdi, Operand(rsi, rdx, times_1, 0));
- // Set byte_offset2.
- __ movq(rsi, rax);
- // Set byte_length.
- __ movq(rdx, rbx);
- // Isolate.
- __ LoadAddress(rcx, ExternalReference::isolate_address());
-#endif
-
- { // NOLINT: Can't find a way to open this scope without confusing the
- // linter.
- AllowExternalCallThatCantCauseGC scope(&masm_);
- ExternalReference compare =
- ExternalReference::re_case_insensitive_compare_uc16(masm_.isolate());
- __ CallCFunction(compare, num_arguments);
- }
-
- // Restore original values before reacting on result value.
- __ Move(code_object_pointer(), masm_.CodeObject());
- __ pop(backtrack_stackpointer());
-#ifndef _WIN64
- __ pop(rdi);
- __ pop(rsi);
-#endif
-
- // Check if function returned non-zero for success or zero for failure.
- __ testq(rax, rax);
- BranchOrBacktrack(zero, on_no_match);
- // On success, increment position by length of capture.
- // Requires that rbx is callee save (true for both Win64 and AMD64 ABIs).
- __ addq(rdi, rbx);
- }
- __ bind(&fallthrough);
-}
-
-
-void RegExpMacroAssemblerX64::CheckNotBackReference(
- int start_reg,
- Label* on_no_match) {
- Label fallthrough;
-
- // Find length of back-referenced capture.
- __ movq(rdx, register_location(start_reg));
- __ movq(rax, register_location(start_reg + 1));
- __ subq(rax, rdx); // Length to check.
-
- // Fail on partial or illegal capture (start of capture after end of capture).
- // This must not happen (no back-reference can reference a capture that wasn't
- // closed before in the reg-exp).
- __ Check(greater_equal, "Invalid capture referenced");
-
- // Succeed on empty capture (including non-participating capture)
- __ j(equal, &fallthrough);
-
- // -----------------------
- // rdx - Start of capture
- // rax - length of capture
-
- // Check that there are sufficient characters left in the input.
- __ movl(rbx, rdi);
- __ addl(rbx, rax);
- BranchOrBacktrack(greater, on_no_match);
-
- // Compute pointers to match string and capture string
- __ lea(rbx, Operand(rsi, rdi, times_1, 0)); // Start of match.
- __ addq(rdx, rsi); // Start of capture.
- __ lea(r9, Operand(rdx, rax, times_1, 0)); // End of capture
-
- // -----------------------
- // rbx - current capture character address.
- // rbx - current input character address .
- // r9 - end of input to match (capture length after rbx).
-
- Label loop;
- __ bind(&loop);
- if (mode_ == ASCII) {
- __ movzxbl(rax, Operand(rdx, 0));
- __ cmpb(rax, Operand(rbx, 0));
- } else {
- ASSERT(mode_ == UC16);
- __ movzxwl(rax, Operand(rdx, 0));
- __ cmpw(rax, Operand(rbx, 0));
- }
- BranchOrBacktrack(not_equal, on_no_match);
- // Increment pointers into capture and match string.
- __ addq(rbx, Immediate(char_size()));
- __ addq(rdx, Immediate(char_size()));
- // Check if we have reached end of match area.
- __ cmpq(rdx, r9);
- __ j(below, &loop);
-
- // Success.
- // Set current character position to position after match.
- __ movq(rdi, rbx);
- __ subq(rdi, rsi);
-
- __ bind(&fallthrough);
-}
-
-
-void RegExpMacroAssemblerX64::CheckNotCharacter(uint32_t c,
- Label* on_not_equal) {
- __ cmpl(current_character(), Immediate(c));
- BranchOrBacktrack(not_equal, on_not_equal);
-}
-
-
-void RegExpMacroAssemblerX64::CheckCharacterAfterAnd(uint32_t c,
- uint32_t mask,
- Label* on_equal) {
- if (c == 0) {
- __ testl(current_character(), Immediate(mask));
- } else {
- __ movl(rax, Immediate(mask));
- __ and_(rax, current_character());
- __ cmpl(rax, Immediate(c));
- }
- BranchOrBacktrack(equal, on_equal);
-}
-
-
-void RegExpMacroAssemblerX64::CheckNotCharacterAfterAnd(uint32_t c,
- uint32_t mask,
- Label* on_not_equal) {
- if (c == 0) {
- __ testl(current_character(), Immediate(mask));
- } else {
- __ movl(rax, Immediate(mask));
- __ and_(rax, current_character());
- __ cmpl(rax, Immediate(c));
- }
- BranchOrBacktrack(not_equal, on_not_equal);
-}
-
-
-void RegExpMacroAssemblerX64::CheckNotCharacterAfterMinusAnd(
- uc16 c,
- uc16 minus,
- uc16 mask,
- Label* on_not_equal) {
- ASSERT(minus < String::kMaxUtf16CodeUnit);
- __ lea(rax, Operand(current_character(), -minus));
- __ and_(rax, Immediate(mask));
- __ cmpl(rax, Immediate(c));
- BranchOrBacktrack(not_equal, on_not_equal);
-}
-
-
-void RegExpMacroAssemblerX64::CheckCharacterInRange(
- uc16 from,
- uc16 to,
- Label* on_in_range) {
- __ leal(rax, Operand(current_character(), -from));
- __ cmpl(rax, Immediate(to - from));
- BranchOrBacktrack(below_equal, on_in_range);
-}
-
-
-void RegExpMacroAssemblerX64::CheckCharacterNotInRange(
- uc16 from,
- uc16 to,
- Label* on_not_in_range) {
- __ leal(rax, Operand(current_character(), -from));
- __ cmpl(rax, Immediate(to - from));
- BranchOrBacktrack(above, on_not_in_range);
-}
-
-
-void RegExpMacroAssemblerX64::CheckBitInTable(
- Handle<ByteArray> table,
- Label* on_bit_set) {
- __ Move(rax, table);
- Register index = current_character();
- if (mode_ != ASCII || kTableMask != String::kMaxOneByteCharCode) {
- __ movq(rbx, current_character());
- __ and_(rbx, Immediate(kTableMask));
- index = rbx;
- }
- __ cmpb(FieldOperand(rax, index, times_1, ByteArray::kHeaderSize),
- Immediate(0));
- BranchOrBacktrack(not_equal, on_bit_set);
-}
-
-
-bool RegExpMacroAssemblerX64::CheckSpecialCharacterClass(uc16 type,
- Label* on_no_match) {
- // Range checks (c in min..max) are generally implemented by an unsigned
- // (c - min) <= (max - min) check, using the sequence:
- // lea(rax, Operand(current_character(), -min)) or sub(rax, Immediate(min))
- // cmp(rax, Immediate(max - min))
- switch (type) {
- case 's':
- // Match space-characters
- if (mode_ == ASCII) {
- // ASCII space characters are '\t'..'\r' and ' '.
- Label success;
- __ cmpl(current_character(), Immediate(' '));
- __ j(equal, &success);
- // Check range 0x09..0x0d
- __ lea(rax, Operand(current_character(), -'\t'));
- __ cmpl(rax, Immediate('\r' - '\t'));
- BranchOrBacktrack(above, on_no_match);
- __ bind(&success);
- return true;
- }
- return false;
- case 'S':
- // Match non-space characters.
- if (mode_ == ASCII) {
- // ASCII space characters are '\t'..'\r' and ' '.
- __ cmpl(current_character(), Immediate(' '));
- BranchOrBacktrack(equal, on_no_match);
- __ lea(rax, Operand(current_character(), -'\t'));
- __ cmpl(rax, Immediate('\r' - '\t'));
- BranchOrBacktrack(below_equal, on_no_match);
- return true;
- }
- return false;
- case 'd':
- // Match ASCII digits ('0'..'9')
- __ lea(rax, Operand(current_character(), -'0'));
- __ cmpl(rax, Immediate('9' - '0'));
- BranchOrBacktrack(above, on_no_match);
- return true;
- case 'D':
- // Match non ASCII-digits
- __ lea(rax, Operand(current_character(), -'0'));
- __ cmpl(rax, Immediate('9' - '0'));
- BranchOrBacktrack(below_equal, on_no_match);
- return true;
- case '.': {
- // Match non-newlines (not 0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029)
- __ movl(rax, current_character());
- __ xor_(rax, Immediate(0x01));
- // See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c
- __ subl(rax, Immediate(0x0b));
- __ cmpl(rax, Immediate(0x0c - 0x0b));
- BranchOrBacktrack(below_equal, on_no_match);
- if (mode_ == UC16) {
- // Compare original value to 0x2028 and 0x2029, using the already
- // computed (current_char ^ 0x01 - 0x0b). I.e., check for
- // 0x201d (0x2028 - 0x0b) or 0x201e.
- __ subl(rax, Immediate(0x2028 - 0x0b));
- __ cmpl(rax, Immediate(0x2029 - 0x2028));
- BranchOrBacktrack(below_equal, on_no_match);
- }
- return true;
- }
- case 'n': {
- // Match newlines (0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029)
- __ movl(rax, current_character());
- __ xor_(rax, Immediate(0x01));
- // See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c
- __ subl(rax, Immediate(0x0b));
- __ cmpl(rax, Immediate(0x0c - 0x0b));
- if (mode_ == ASCII) {
- BranchOrBacktrack(above, on_no_match);
- } else {
- Label done;
- BranchOrBacktrack(below_equal, &done);
- // Compare original value to 0x2028 and 0x2029, using the already
- // computed (current_char ^ 0x01 - 0x0b). I.e., check for
- // 0x201d (0x2028 - 0x0b) or 0x201e.
- __ subl(rax, Immediate(0x2028 - 0x0b));
- __ cmpl(rax, Immediate(0x2029 - 0x2028));
- BranchOrBacktrack(above, on_no_match);
- __ bind(&done);
- }
- return true;
- }
- case 'w': {
- if (mode_ != ASCII) {
- // Table is 128 entries, so all ASCII characters can be tested.
- __ cmpl(current_character(), Immediate('z'));
- BranchOrBacktrack(above, on_no_match);
- }
- __ movq(rbx, ExternalReference::re_word_character_map());
- ASSERT_EQ(0, word_character_map[0]); // Character '\0' is not a word char.
- __ testb(Operand(rbx, current_character(), times_1, 0),
- current_character());
- BranchOrBacktrack(zero, on_no_match);
- return true;
- }
- case 'W': {
- Label done;
- if (mode_ != ASCII) {
- // Table is 128 entries, so all ASCII characters can be tested.
- __ cmpl(current_character(), Immediate('z'));
- __ j(above, &done);
- }
- __ movq(rbx, ExternalReference::re_word_character_map());
- ASSERT_EQ(0, word_character_map[0]); // Character '\0' is not a word char.
- __ testb(Operand(rbx, current_character(), times_1, 0),
- current_character());
- BranchOrBacktrack(not_zero, on_no_match);
- if (mode_ != ASCII) {
- __ bind(&done);
- }
- return true;
- }
-
- case '*':
- // Match any character.
- return true;
- // No custom implementation (yet): s(UC16), S(UC16).
- default:
- return false;
- }
-}
-
-
-void RegExpMacroAssemblerX64::Fail() {
- STATIC_ASSERT(FAILURE == 0); // Return value for failure is zero.
- if (!global()) {
- __ Set(rax, FAILURE);
- }
- __ jmp(&exit_label_);
-}
-
-
-Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
- Label return_rax;
- // Finalize code - write the entry point code now we know how many
- // registers we need.
- // Entry code:
- __ bind(&entry_label_);
-
- // Tell the system that we have a stack frame. Because the type is MANUAL, no
- // is generated.
- FrameScope scope(&masm_, StackFrame::MANUAL);
-
- // Actually emit code to start a new stack frame.
- __ push(rbp);
- __ movq(rbp, rsp);
- // Save parameters and callee-save registers. Order here should correspond
- // to order of kBackup_ebx etc.
-#ifdef _WIN64
- // MSVC passes arguments in rcx, rdx, r8, r9, with backing stack slots.
- // Store register parameters in pre-allocated stack slots,
- __ movq(Operand(rbp, kInputString), rcx);
- __ movq(Operand(rbp, kStartIndex), rdx); // Passed as int32 in edx.
- __ movq(Operand(rbp, kInputStart), r8);
- __ movq(Operand(rbp, kInputEnd), r9);
- // Callee-save on Win64.
- __ push(rsi);
- __ push(rdi);
- __ push(rbx);
-#else
- // GCC passes arguments in rdi, rsi, rdx, rcx, r8, r9 (and then on stack).
- // Push register parameters on stack for reference.
- ASSERT_EQ(kInputString, -1 * kPointerSize);
- ASSERT_EQ(kStartIndex, -2 * kPointerSize);
- ASSERT_EQ(kInputStart, -3 * kPointerSize);
- ASSERT_EQ(kInputEnd, -4 * kPointerSize);
- ASSERT_EQ(kRegisterOutput, -5 * kPointerSize);
- ASSERT_EQ(kNumOutputRegisters, -6 * kPointerSize);
- __ push(rdi);
- __ push(rsi);
- __ push(rdx);
- __ push(rcx);
- __ push(r8);
- __ push(r9);
-
- __ push(rbx); // Callee-save
-#endif
-
- __ push(Immediate(0)); // Number of successful matches in a global regexp.
- __ push(Immediate(0)); // Make room for "input start - 1" constant.
-
- // Check if we have space on the stack for registers.
- Label stack_limit_hit;
- Label stack_ok;
-
- ExternalReference stack_limit =
- ExternalReference::address_of_stack_limit(masm_.isolate());
- __ movq(rcx, rsp);
- __ movq(kScratchRegister, stack_limit);
- __ subq(rcx, Operand(kScratchRegister, 0));
- // Handle it if the stack pointer is already below the stack limit.
- __ j(below_equal, &stack_limit_hit);
- // Check if there is room for the variable number of registers above
- // the stack limit.
- __ cmpq(rcx, Immediate(num_registers_ * kPointerSize));
- __ j(above_equal, &stack_ok);
- // Exit with OutOfMemory exception. There is not enough space on the stack
- // for our working registers.
- __ Set(rax, EXCEPTION);
- __ jmp(&return_rax);
-
- __ bind(&stack_limit_hit);
- __ Move(code_object_pointer(), masm_.CodeObject());
- CallCheckStackGuardState(); // Preserves no registers beside rbp and rsp.
- __ testq(rax, rax);
- // If returned value is non-zero, we exit with the returned value as result.
- __ j(not_zero, &return_rax);
-
- __ bind(&stack_ok);
-
- // Allocate space on stack for registers.
- __ subq(rsp, Immediate(num_registers_ * kPointerSize));
- // Load string length.
- __ movq(rsi, Operand(rbp, kInputEnd));
- // Load input position.
- __ movq(rdi, Operand(rbp, kInputStart));
- // Set up rdi to be negative offset from string end.
- __ subq(rdi, rsi);
- // Set rax to address of char before start of the string
- // (effectively string position -1).
- __ movq(rbx, Operand(rbp, kStartIndex));
- __ neg(rbx);
- if (mode_ == UC16) {
- __ lea(rax, Operand(rdi, rbx, times_2, -char_size()));
- } else {
- __ lea(rax, Operand(rdi, rbx, times_1, -char_size()));
- }
- // Store this value in a local variable, for use when clearing
- // position registers.
- __ movq(Operand(rbp, kInputStartMinusOne), rax);
-
-#ifdef WIN32
- // Ensure that we have written to each stack page, in order. Skipping a page
- // on Windows can cause segmentation faults. Assuming page size is 4k.
- const int kPageSize = 4096;
- const int kRegistersPerPage = kPageSize / kPointerSize;
- for (int i = num_saved_registers_ + kRegistersPerPage - 1;
- i < num_registers_;
- i += kRegistersPerPage) {
- __ movq(register_location(i), rax); // One write every page.
- }
-#endif // WIN32
-
- // Initialize code object pointer.
- __ Move(code_object_pointer(), masm_.CodeObject());
-
- Label load_char_start_regexp, start_regexp;
- // Load newline if index is at start, previous character otherwise.
- __ cmpl(Operand(rbp, kStartIndex), Immediate(0));
- __ j(not_equal, &load_char_start_regexp, Label::kNear);
- __ Set(current_character(), '\n');
- __ jmp(&start_regexp, Label::kNear);
-
- // Global regexp restarts matching here.
- __ bind(&load_char_start_regexp);
- // Load previous char as initial value of current character register.
- LoadCurrentCharacterUnchecked(-1, 1);
- __ bind(&start_regexp);
-
- // Initialize on-stack registers.
- if (num_saved_registers_ > 0) {
- // Fill saved registers with initial value = start offset - 1
- // Fill in stack push order, to avoid accessing across an unwritten
- // page (a problem on Windows).
- if (num_saved_registers_ > 8) {
- __ Set(rcx, kRegisterZero);
- Label init_loop;
- __ bind(&init_loop);
- __ movq(Operand(rbp, rcx, times_1, 0), rax);
- __ subq(rcx, Immediate(kPointerSize));
- __ cmpq(rcx,
- Immediate(kRegisterZero - num_saved_registers_ * kPointerSize));
- __ j(greater, &init_loop);
- } else { // Unroll the loop.
- for (int i = 0; i < num_saved_registers_; i++) {
- __ movq(register_location(i), rax);
- }
- }
- }
-
- // Initialize backtrack stack pointer.
- __ movq(backtrack_stackpointer(), Operand(rbp, kStackHighEnd));
-
- __ jmp(&start_label_);
-
- // Exit code:
- if (success_label_.is_linked()) {
- // Save captures when successful.
- __ bind(&success_label_);
- if (num_saved_registers_ > 0) {
- // copy captures to output
- __ movq(rdx, Operand(rbp, kStartIndex));
- __ movq(rbx, Operand(rbp, kRegisterOutput));
- __ movq(rcx, Operand(rbp, kInputEnd));
- __ subq(rcx, Operand(rbp, kInputStart));
- if (mode_ == UC16) {
- __ lea(rcx, Operand(rcx, rdx, times_2, 0));
- } else {
- __ addq(rcx, rdx);
- }
- for (int i = 0; i < num_saved_registers_; i++) {
- __ movq(rax, register_location(i));
- if (i == 0 && global_with_zero_length_check()) {
- // Keep capture start in rdx for the zero-length check later.
- __ movq(rdx, rax);
- }
- __ addq(rax, rcx); // Convert to index from start, not end.
- if (mode_ == UC16) {
- __ sar(rax, Immediate(1)); // Convert byte index to character index.
- }
- __ movl(Operand(rbx, i * kIntSize), rax);
- }
- }
-
- if (global()) {
- // Restart matching if the regular expression is flagged as global.
- // Increment success counter.
- __ incq(Operand(rbp, kSuccessfulCaptures));
- // Capture results have been stored, so the number of remaining global
- // output registers is reduced by the number of stored captures.
- __ movsxlq(rcx, Operand(rbp, kNumOutputRegisters));
- __ subq(rcx, Immediate(num_saved_registers_));
- // Check whether we have enough room for another set of capture results.
- __ cmpq(rcx, Immediate(num_saved_registers_));
- __ j(less, &exit_label_);
-
- __ movq(Operand(rbp, kNumOutputRegisters), rcx);
- // Advance the location for output.
- __ addq(Operand(rbp, kRegisterOutput),
- Immediate(num_saved_registers_ * kIntSize));
-
- // Prepare rax to initialize registers with its value in the next run.
- __ movq(rax, Operand(rbp, kInputStartMinusOne));
-
- if (global_with_zero_length_check()) {
- // Special case for zero-length matches.
- // rdx: capture start index
- __ cmpq(rdi, rdx);
- // Not a zero-length match, restart.
- __ j(not_equal, &load_char_start_regexp);
- // rdi (offset from the end) is zero if we already reached the end.
- __ testq(rdi, rdi);
- __ j(zero, &exit_label_, Label::kNear);
- // Advance current position after a zero-length match.
- if (mode_ == UC16) {
- __ addq(rdi, Immediate(2));
- } else {
- __ incq(rdi);
- }
- }
-
- __ jmp(&load_char_start_regexp);
- } else {
- __ movq(rax, Immediate(SUCCESS));
- }
- }
-
- __ bind(&exit_label_);
- if (global()) {
- // Return the number of successful captures.
- __ movq(rax, Operand(rbp, kSuccessfulCaptures));
- }
-
- __ bind(&return_rax);
-#ifdef _WIN64
- // Restore callee save registers.
- __ lea(rsp, Operand(rbp, kLastCalleeSaveRegister));
- __ pop(rbx);
- __ pop(rdi);
- __ pop(rsi);
- // Stack now at rbp.
-#else
- // Restore callee save register.
- __ movq(rbx, Operand(rbp, kBackup_rbx));
- // Skip rsp to rbp.
- __ movq(rsp, rbp);
-#endif
- // Exit function frame, restore previous one.
- __ pop(rbp);
- __ ret(0);
-
- // Backtrack code (branch target for conditional backtracks).
- if (backtrack_label_.is_linked()) {
- __ bind(&backtrack_label_);
- Backtrack();
- }
-
- Label exit_with_exception;
-
- // Preempt-code
- if (check_preempt_label_.is_linked()) {
- SafeCallTarget(&check_preempt_label_);
-
- __ push(backtrack_stackpointer());
- __ push(rdi);
-
- CallCheckStackGuardState();
- __ testq(rax, rax);
- // If returning non-zero, we should end execution with the given
- // result as return value.
- __ j(not_zero, &return_rax);
-
- // Restore registers.
- __ Move(code_object_pointer(), masm_.CodeObject());
- __ pop(rdi);
- __ pop(backtrack_stackpointer());
- // String might have moved: Reload esi from frame.
- __ movq(rsi, Operand(rbp, kInputEnd));
- SafeReturn();
- }
-
- // Backtrack stack overflow code.
- if (stack_overflow_label_.is_linked()) {
- SafeCallTarget(&stack_overflow_label_);
- // Reached if the backtrack-stack limit has been hit.
-
- Label grow_failed;
- // Save registers before calling C function
-#ifndef _WIN64
- // Callee-save in Microsoft 64-bit ABI, but not in AMD64 ABI.
- __ push(rsi);
- __ push(rdi);
-#endif
-
- // Call GrowStack(backtrack_stackpointer())
- static const int num_arguments = 3;
- __ PrepareCallCFunction(num_arguments);
-#ifdef _WIN64
- // Microsoft passes parameters in rcx, rdx, r8.
- // First argument, backtrack stackpointer, is already in rcx.
- __ lea(rdx, Operand(rbp, kStackHighEnd)); // Second argument
- __ LoadAddress(r8, ExternalReference::isolate_address());
-#else
- // AMD64 ABI passes parameters in rdi, rsi, rdx.
- __ movq(rdi, backtrack_stackpointer()); // First argument.
- __ lea(rsi, Operand(rbp, kStackHighEnd)); // Second argument.
- __ LoadAddress(rdx, ExternalReference::isolate_address());
-#endif
- ExternalReference grow_stack =
- ExternalReference::re_grow_stack(masm_.isolate());
- __ CallCFunction(grow_stack, num_arguments);
- // If return NULL, we have failed to grow the stack, and
- // must exit with a stack-overflow exception.
- __ testq(rax, rax);
- __ j(equal, &exit_with_exception);
- // Otherwise use return value as new stack pointer.
- __ movq(backtrack_stackpointer(), rax);
- // Restore saved registers and continue.
- __ Move(code_object_pointer(), masm_.CodeObject());
-#ifndef _WIN64
- __ pop(rdi);
- __ pop(rsi);
-#endif
- SafeReturn();
- }
-
- if (exit_with_exception.is_linked()) {
- // If any of the code above needed to exit with an exception.
- __ bind(&exit_with_exception);
- // Exit with Result EXCEPTION(-1) to signal thrown exception.
- __ Set(rax, EXCEPTION);
- __ jmp(&return_rax);
- }
-
- FixupCodeRelativePositions();
-
- CodeDesc code_desc;
- masm_.GetCode(&code_desc);
- Isolate* isolate = ISOLATE;
- Handle<Code> code = isolate->factory()->NewCode(
- code_desc, Code::ComputeFlags(Code::REGEXP),
- masm_.CodeObject());
- PROFILE(isolate, RegExpCodeCreateEvent(*code, *source));
- return Handle<HeapObject>::cast(code);
-}
-
-
-void RegExpMacroAssemblerX64::GoTo(Label* to) {
- BranchOrBacktrack(no_condition, to);
-}
-
-
-void RegExpMacroAssemblerX64::IfRegisterGE(int reg,
- int comparand,
- Label* if_ge) {
- __ cmpq(register_location(reg), Immediate(comparand));
- BranchOrBacktrack(greater_equal, if_ge);
-}
-
-
-void RegExpMacroAssemblerX64::IfRegisterLT(int reg,
- int comparand,
- Label* if_lt) {
- __ cmpq(register_location(reg), Immediate(comparand));
- BranchOrBacktrack(less, if_lt);
-}
-
-
-void RegExpMacroAssemblerX64::IfRegisterEqPos(int reg,
- Label* if_eq) {
- __ cmpq(rdi, register_location(reg));
- BranchOrBacktrack(equal, if_eq);
-}
-
-
-RegExpMacroAssembler::IrregexpImplementation
- RegExpMacroAssemblerX64::Implementation() {
- return kX64Implementation;
-}
-
-
-void RegExpMacroAssemblerX64::LoadCurrentCharacter(int cp_offset,
- Label* on_end_of_input,
- bool check_bounds,
- int characters) {
- ASSERT(cp_offset >= -1); // ^ and \b can look behind one character.
- ASSERT(cp_offset < (1<<30)); // Be sane! (And ensure negation works)
- if (check_bounds) {
- CheckPosition(cp_offset + characters - 1, on_end_of_input);
- }
- LoadCurrentCharacterUnchecked(cp_offset, characters);
-}
-
-
-void RegExpMacroAssemblerX64::PopCurrentPosition() {
- Pop(rdi);
-}
-
-
-void RegExpMacroAssemblerX64::PopRegister(int register_index) {
- Pop(rax);
- __ movq(register_location(register_index), rax);
-}
-
-
-void RegExpMacroAssemblerX64::PushBacktrack(Label* label) {
- Push(label);
- CheckStackLimit();
-}
-
-
-void RegExpMacroAssemblerX64::PushCurrentPosition() {
- Push(rdi);
-}
-
-
-void RegExpMacroAssemblerX64::PushRegister(int register_index,
- StackCheckFlag check_stack_limit) {
- __ movq(rax, register_location(register_index));
- Push(rax);
- if (check_stack_limit) CheckStackLimit();
-}
-
-
-void RegExpMacroAssemblerX64::ReadCurrentPositionFromRegister(int reg) {
- __ movq(rdi, register_location(reg));
-}
-
-
-void RegExpMacroAssemblerX64::ReadStackPointerFromRegister(int reg) {
- __ movq(backtrack_stackpointer(), register_location(reg));
- __ addq(backtrack_stackpointer(), Operand(rbp, kStackHighEnd));
-}
-
-
-void RegExpMacroAssemblerX64::SetCurrentPositionFromEnd(int by) {
- Label after_position;
- __ cmpq(rdi, Immediate(-by * char_size()));
- __ j(greater_equal, &after_position, Label::kNear);
- __ movq(rdi, Immediate(-by * char_size()));
- // On RegExp code entry (where this operation is used), the character before
- // the current position is expected to be already loaded.
- // We have advanced the position, so it's safe to read backwards.
- LoadCurrentCharacterUnchecked(-1, 1);
- __ bind(&after_position);
-}
-
-
-void RegExpMacroAssemblerX64::SetRegister(int register_index, int to) {
- ASSERT(register_index >= num_saved_registers_); // Reserved for positions!
- __ movq(register_location(register_index), Immediate(to));
-}
-
-
-bool RegExpMacroAssemblerX64::Succeed() {
- __ jmp(&success_label_);
- return global();
-}
-
-
-void RegExpMacroAssemblerX64::WriteCurrentPositionToRegister(int reg,
- int cp_offset) {
- if (cp_offset == 0) {
- __ movq(register_location(reg), rdi);
- } else {
- __ lea(rax, Operand(rdi, cp_offset * char_size()));
- __ movq(register_location(reg), rax);
- }
-}
-
-
-void RegExpMacroAssemblerX64::ClearRegisters(int reg_from, int reg_to) {
- ASSERT(reg_from <= reg_to);
- __ movq(rax, Operand(rbp, kInputStartMinusOne));
- for (int reg = reg_from; reg <= reg_to; reg++) {
- __ movq(register_location(reg), rax);
- }
-}
-
-
-void RegExpMacroAssemblerX64::WriteStackPointerToRegister(int reg) {
- __ movq(rax, backtrack_stackpointer());
- __ subq(rax, Operand(rbp, kStackHighEnd));
- __ movq(register_location(reg), rax);
-}
-
-
-// Private methods:
-
-void RegExpMacroAssemblerX64::CallCheckStackGuardState() {
- // This function call preserves no register values. Caller should
- // store anything volatile in a C call or overwritten by this function.
- static const int num_arguments = 3;
- __ PrepareCallCFunction(num_arguments);
-#ifdef _WIN64
- // Second argument: Code* of self. (Do this before overwriting r8).
- __ movq(rdx, code_object_pointer());
- // Third argument: RegExp code frame pointer.
- __ movq(r8, rbp);
- // First argument: Next address on the stack (will be address of
- // return address).
- __ lea(rcx, Operand(rsp, -kPointerSize));
-#else
- // Third argument: RegExp code frame pointer.
- __ movq(rdx, rbp);
- // Second argument: Code* of self.
- __ movq(rsi, code_object_pointer());
- // First argument: Next address on the stack (will be address of
- // return address).
- __ lea(rdi, Operand(rsp, -kPointerSize));
-#endif
- ExternalReference stack_check =
- ExternalReference::re_check_stack_guard_state(masm_.isolate());
- __ CallCFunction(stack_check, num_arguments);
-}
-
-
-// Helper function for reading a value out of a stack frame.
-template <typename T>
-static T& frame_entry(Address re_frame, int frame_offset) {
- return reinterpret_cast<T&>(Memory::int32_at(re_frame + frame_offset));
-}
-
-
-int RegExpMacroAssemblerX64::CheckStackGuardState(Address* return_address,
- Code* re_code,
- Address re_frame) {
- Isolate* isolate = frame_entry<Isolate*>(re_frame, kIsolate);
- ASSERT(isolate == Isolate::Current());
- if (isolate->stack_guard()->IsStackOverflow()) {
- isolate->StackOverflow();
- return EXCEPTION;
- }
-
- // If not real stack overflow the stack guard was used to interrupt
- // execution for another purpose.
-
- // If this is a direct call from JavaScript retry the RegExp forcing the call
- // through the runtime system. Currently the direct call cannot handle a GC.
- if (frame_entry<int>(re_frame, kDirectCall) == 1) {
- return RETRY;
- }
-
- // Prepare for possible GC.
- HandleScope handles(isolate);
- Handle<Code> code_handle(re_code);
-
- Handle<String> subject(frame_entry<String*>(re_frame, kInputString));
-
- // Current string.
- bool is_ascii = subject->IsOneByteRepresentationUnderneath();
-
- ASSERT(re_code->instruction_start() <= *return_address);
- ASSERT(*return_address <=
- re_code->instruction_start() + re_code->instruction_size());
-
- MaybeObject* result = Execution::HandleStackGuardInterrupt(isolate);
-
- if (*code_handle != re_code) { // Return address no longer valid
- intptr_t delta = code_handle->address() - re_code->address();
- // Overwrite the return address on the stack.
- *return_address += delta;
- }
-
- if (result->IsException()) {
- return EXCEPTION;
- }
-
- Handle<String> subject_tmp = subject;
- int slice_offset = 0;
-
- // Extract the underlying string and the slice offset.
- if (StringShape(*subject_tmp).IsCons()) {
- subject_tmp = Handle<String>(ConsString::cast(*subject_tmp)->first());
- } else if (StringShape(*subject_tmp).IsSliced()) {
- SlicedString* slice = SlicedString::cast(*subject_tmp);
- subject_tmp = Handle<String>(slice->parent());
- slice_offset = slice->offset();
- }
-
- // String might have changed.
- if (subject_tmp->IsOneByteRepresentation() != is_ascii) {
- // If we changed between an ASCII and an UC16 string, the specialized
- // code cannot be used, and we need to restart regexp matching from
- // scratch (including, potentially, compiling a new version of the code).
- return RETRY;
- }
-
- // Otherwise, the content of the string might have moved. It must still
- // be a sequential or external string with the same content.
- // Update the start and end pointers in the stack frame to the current
- // location (whether it has actually moved or not).
- ASSERT(StringShape(*subject_tmp).IsSequential() ||
- StringShape(*subject_tmp).IsExternal());
-
- // The original start address of the characters to match.
- const byte* start_address = frame_entry<const byte*>(re_frame, kInputStart);
-
- // Find the current start address of the same character at the current string
- // position.
- int start_index = frame_entry<int>(re_frame, kStartIndex);
- const byte* new_address = StringCharacterPosition(*subject_tmp,
- start_index + slice_offset);
-
- if (start_address != new_address) {
- // If there is a difference, update the object pointer and start and end
- // addresses in the RegExp stack frame to match the new value.
- const byte* end_address = frame_entry<const byte* >(re_frame, kInputEnd);
- int byte_length = static_cast<int>(end_address - start_address);
- frame_entry<const String*>(re_frame, kInputString) = *subject;
- frame_entry<const byte*>(re_frame, kInputStart) = new_address;
- frame_entry<const byte*>(re_frame, kInputEnd) = new_address + byte_length;
- } else if (frame_entry<const String*>(re_frame, kInputString) != *subject) {
- // Subject string might have been a ConsString that underwent
- // short-circuiting during GC. That will not change start_address but
- // will change pointer inside the subject handle.
- frame_entry<const String*>(re_frame, kInputString) = *subject;
- }
-
- return 0;
-}
-
-
-Operand RegExpMacroAssemblerX64::register_location(int register_index) {
- ASSERT(register_index < (1<<30));
- if (num_registers_ <= register_index) {
- num_registers_ = register_index + 1;
- }
- return Operand(rbp, kRegisterZero - register_index * kPointerSize);
-}
-
-
-void RegExpMacroAssemblerX64::CheckPosition(int cp_offset,
- Label* on_outside_input) {
- __ cmpl(rdi, Immediate(-cp_offset * char_size()));
- BranchOrBacktrack(greater_equal, on_outside_input);
-}
-
-
-void RegExpMacroAssemblerX64::BranchOrBacktrack(Condition condition,
- Label* to) {
- if (condition < 0) { // No condition
- if (to == NULL) {
- Backtrack();
- return;
- }
- __ jmp(to);
- return;
- }
- if (to == NULL) {
- __ j(condition, &backtrack_label_);
- return;
- }
- __ j(condition, to);
-}
-
-
-void RegExpMacroAssemblerX64::SafeCall(Label* to) {
- __ call(to);
-}
-
-
-void RegExpMacroAssemblerX64::SafeCallTarget(Label* label) {
- __ bind(label);
- __ subq(Operand(rsp, 0), code_object_pointer());
-}
-
-
-void RegExpMacroAssemblerX64::SafeReturn() {
- __ addq(Operand(rsp, 0), code_object_pointer());
- __ ret(0);
-}
-
-
-void RegExpMacroAssemblerX64::Push(Register source) {
- ASSERT(!source.is(backtrack_stackpointer()));
- // Notice: This updates flags, unlike normal Push.
- __ subq(backtrack_stackpointer(), Immediate(kIntSize));
- __ movl(Operand(backtrack_stackpointer(), 0), source);
-}
-
-
-void RegExpMacroAssemblerX64::Push(Immediate value) {
- // Notice: This updates flags, unlike normal Push.
- __ subq(backtrack_stackpointer(), Immediate(kIntSize));
- __ movl(Operand(backtrack_stackpointer(), 0), value);
-}
-
-
-void RegExpMacroAssemblerX64::FixupCodeRelativePositions() {
- for (int i = 0, n = code_relative_fixup_positions_.length(); i < n; i++) {
- int position = code_relative_fixup_positions_[i];
- // The position succeeds a relative label offset from position.
- // Patch the relative offset to be relative to the Code object pointer
- // instead.
- int patch_position = position - kIntSize;
- int offset = masm_.long_at(patch_position);
- masm_.long_at_put(patch_position,
- offset
- + position
- + Code::kHeaderSize
- - kHeapObjectTag);
- }
- code_relative_fixup_positions_.Clear();
-}
-
-
-void RegExpMacroAssemblerX64::Push(Label* backtrack_target) {
- __ subq(backtrack_stackpointer(), Immediate(kIntSize));
- __ movl(Operand(backtrack_stackpointer(), 0), backtrack_target);
- MarkPositionForCodeRelativeFixup();
-}
-
-
-void RegExpMacroAssemblerX64::Pop(Register target) {
- ASSERT(!target.is(backtrack_stackpointer()));
- __ movsxlq(target, Operand(backtrack_stackpointer(), 0));
- // Notice: This updates flags, unlike normal Pop.
- __ addq(backtrack_stackpointer(), Immediate(kIntSize));
-}
-
-
-void RegExpMacroAssemblerX64::Drop() {
- __ addq(backtrack_stackpointer(), Immediate(kIntSize));
-}
-
-
-void RegExpMacroAssemblerX64::CheckPreemption() {
- // Check for preemption.
- Label no_preempt;
- ExternalReference stack_limit =
- ExternalReference::address_of_stack_limit(masm_.isolate());
- __ load_rax(stack_limit);
- __ cmpq(rsp, rax);
- __ j(above, &no_preempt);
-
- SafeCall(&check_preempt_label_);
-
- __ bind(&no_preempt);
-}
-
-
-void RegExpMacroAssemblerX64::CheckStackLimit() {
- Label no_stack_overflow;
- ExternalReference stack_limit =
- ExternalReference::address_of_regexp_stack_limit(masm_.isolate());
- __ load_rax(stack_limit);
- __ cmpq(backtrack_stackpointer(), rax);
- __ j(above, &no_stack_overflow);
-
- SafeCall(&stack_overflow_label_);
-
- __ bind(&no_stack_overflow);
-}
-
-
-void RegExpMacroAssemblerX64::LoadCurrentCharacterUnchecked(int cp_offset,
- int characters) {
- if (mode_ == ASCII) {
- if (characters == 4) {
- __ movl(current_character(), Operand(rsi, rdi, times_1, cp_offset));
- } else if (characters == 2) {
- __ movzxwl(current_character(), Operand(rsi, rdi, times_1, cp_offset));
- } else {
- ASSERT(characters == 1);
- __ movzxbl(current_character(), Operand(rsi, rdi, times_1, cp_offset));
- }
- } else {
- ASSERT(mode_ == UC16);
- if (characters == 2) {
- __ movl(current_character(),
- Operand(rsi, rdi, times_1, cp_offset * sizeof(uc16)));
- } else {
- ASSERT(characters == 1);
- __ movzxwl(current_character(),
- Operand(rsi, rdi, times_1, cp_offset * sizeof(uc16)));
- }
- }
-}
-
-#undef __
-
-#endif // V8_INTERPRETED_REGEXP
-
-}} // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_X64
diff --git a/src/3rdparty/v8/src/x64/regexp-macro-assembler-x64.h b/src/3rdparty/v8/src/x64/regexp-macro-assembler-x64.h
deleted file mode 100644
index a082cf2..0000000
--- a/src/3rdparty/v8/src/x64/regexp-macro-assembler-x64.h
+++ /dev/null
@@ -1,304 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_X64_REGEXP_MACRO_ASSEMBLER_X64_H_
-#define V8_X64_REGEXP_MACRO_ASSEMBLER_X64_H_
-
-#include "x64/assembler-x64.h"
-#include "x64/assembler-x64-inl.h"
-#include "macro-assembler.h"
-#include "code.h"
-#include "x64/macro-assembler-x64.h"
-
-namespace v8 {
-namespace internal {
-
-#ifndef V8_INTERPRETED_REGEXP
-
-class RegExpMacroAssemblerX64: public NativeRegExpMacroAssembler {
- public:
- RegExpMacroAssemblerX64(Mode mode, int registers_to_save, Zone* zone);
- virtual ~RegExpMacroAssemblerX64();
- virtual int stack_limit_slack();
- virtual void AdvanceCurrentPosition(int by);
- virtual void AdvanceRegister(int reg, int by);
- virtual void Backtrack();
- virtual void Bind(Label* label);
- virtual void CheckAtStart(Label* on_at_start);
- virtual void CheckCharacter(uint32_t c, Label* on_equal);
- virtual void CheckCharacterAfterAnd(uint32_t c,
- uint32_t mask,
- Label* on_equal);
- virtual void CheckCharacterGT(uc16 limit, Label* on_greater);
- virtual void CheckCharacterLT(uc16 limit, Label* on_less);
- virtual void CheckCharacters(Vector<const uc16> str,
- int cp_offset,
- Label* on_failure,
- bool check_end_of_string);
- // A "greedy loop" is a loop that is both greedy and with a simple
- // body. It has a particularly simple implementation.
- virtual void CheckGreedyLoop(Label* on_tos_equals_current_position);
- virtual void CheckNotAtStart(Label* on_not_at_start);
- virtual void CheckNotBackReference(int start_reg, Label* on_no_match);
- virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
- Label* on_no_match);
- virtual void CheckNotCharacter(uint32_t c, Label* on_not_equal);
- virtual void CheckNotCharacterAfterAnd(uint32_t c,
- uint32_t mask,
- Label* on_not_equal);
- virtual void CheckNotCharacterAfterMinusAnd(uc16 c,
- uc16 minus,
- uc16 mask,
- Label* on_not_equal);
- virtual void CheckCharacterInRange(uc16 from,
- uc16 to,
- Label* on_in_range);
- virtual void CheckCharacterNotInRange(uc16 from,
- uc16 to,
- Label* on_not_in_range);
- virtual void CheckBitInTable(Handle<ByteArray> table, Label* on_bit_set);
-
- // Checks whether the given offset from the current position is before
- // the end of the string.
- virtual void CheckPosition(int cp_offset, Label* on_outside_input);
- virtual bool CheckSpecialCharacterClass(uc16 type,
- Label* on_no_match);
- virtual void Fail();
- virtual Handle<HeapObject> GetCode(Handle<String> source);
- virtual void GoTo(Label* label);
- virtual void IfRegisterGE(int reg, int comparand, Label* if_ge);
- virtual void IfRegisterLT(int reg, int comparand, Label* if_lt);
- virtual void IfRegisterEqPos(int reg, Label* if_eq);
- virtual IrregexpImplementation Implementation();
- virtual void LoadCurrentCharacter(int cp_offset,
- Label* on_end_of_input,
- bool check_bounds = true,
- int characters = 1);
- virtual void PopCurrentPosition();
- virtual void PopRegister(int register_index);
- virtual void PushBacktrack(Label* label);
- virtual void PushCurrentPosition();
- virtual void PushRegister(int register_index,
- StackCheckFlag check_stack_limit);
- virtual void ReadCurrentPositionFromRegister(int reg);
- virtual void ReadStackPointerFromRegister(int reg);
- virtual void SetCurrentPositionFromEnd(int by);
- virtual void SetRegister(int register_index, int to);
- virtual bool Succeed();
- virtual void WriteCurrentPositionToRegister(int reg, int cp_offset);
- virtual void ClearRegisters(int reg_from, int reg_to);
- virtual void WriteStackPointerToRegister(int reg);
-
- static Result Match(Handle<Code> regexp,
- Handle<String> subject,
- int* offsets_vector,
- int offsets_vector_length,
- int previous_index,
- Isolate* isolate);
-
- static Result Execute(Code* code,
- String* input,
- int start_offset,
- const byte* input_start,
- const byte* input_end,
- int* output,
- bool at_start);
-
- // Called from RegExp if the stack-guard is triggered.
- // If the code object is relocated, the return address is fixed before
- // returning.
- static int CheckStackGuardState(Address* return_address,
- Code* re_code,
- Address re_frame);
-
- private:
- // Offsets from rbp of function parameters and stored registers.
- static const int kFramePointer = 0;
- // Above the frame pointer - function parameters and return address.
- static const int kReturn_eip = kFramePointer + kPointerSize;
- static const int kFrameAlign = kReturn_eip + kPointerSize;
-
-#ifdef _WIN64
- // Parameters (first four passed as registers, but with room on stack).
- // In Microsoft 64-bit Calling Convention, there is room on the callers
- // stack (before the return address) to spill parameter registers. We
- // use this space to store the register passed parameters.
- static const int kInputString = kFrameAlign;
- // StartIndex is passed as 32 bit int.
- static const int kStartIndex = kInputString + kPointerSize;
- static const int kInputStart = kStartIndex + kPointerSize;
- static const int kInputEnd = kInputStart + kPointerSize;
- static const int kRegisterOutput = kInputEnd + kPointerSize;
- // For the case of global regular expression, we have room to store at least
- // one set of capture results. For the case of non-global regexp, we ignore
- // this value. NumOutputRegisters is passed as 32-bit value. The upper
- // 32 bit of this 64-bit stack slot may contain garbage.
- static const int kNumOutputRegisters = kRegisterOutput + kPointerSize;
- static const int kStackHighEnd = kNumOutputRegisters + kPointerSize;
- // DirectCall is passed as 32 bit int (values 0 or 1).
- static const int kDirectCall = kStackHighEnd + kPointerSize;
- static const int kIsolate = kDirectCall + kPointerSize;
-#else
- // In AMD64 ABI Calling Convention, the first six integer parameters
- // are passed as registers, and caller must allocate space on the stack
- // if it wants them stored. We push the parameters after the frame pointer.
- static const int kInputString = kFramePointer - kPointerSize;
- static const int kStartIndex = kInputString - kPointerSize;
- static const int kInputStart = kStartIndex - kPointerSize;
- static const int kInputEnd = kInputStart - kPointerSize;
- static const int kRegisterOutput = kInputEnd - kPointerSize;
- // For the case of global regular expression, we have room to store at least
- // one set of capture results. For the case of non-global regexp, we ignore
- // this value.
- static const int kNumOutputRegisters = kRegisterOutput - kPointerSize;
- static const int kStackHighEnd = kFrameAlign;
- static const int kDirectCall = kStackHighEnd + kPointerSize;
- static const int kIsolate = kDirectCall + kPointerSize;
-#endif
-
-#ifdef _WIN64
- // Microsoft calling convention has three callee-saved registers
- // (that we are using). We push these after the frame pointer.
- static const int kBackup_rsi = kFramePointer - kPointerSize;
- static const int kBackup_rdi = kBackup_rsi - kPointerSize;
- static const int kBackup_rbx = kBackup_rdi - kPointerSize;
- static const int kLastCalleeSaveRegister = kBackup_rbx;
-#else
- // AMD64 Calling Convention has only one callee-save register that
- // we use. We push this after the frame pointer (and after the
- // parameters).
- static const int kBackup_rbx = kNumOutputRegisters - kPointerSize;
- static const int kLastCalleeSaveRegister = kBackup_rbx;
-#endif
-
- static const int kSuccessfulCaptures = kLastCalleeSaveRegister - kPointerSize;
- // When adding local variables remember to push space for them in
- // the frame in GetCode.
- static const int kInputStartMinusOne = kSuccessfulCaptures - kPointerSize;
-
- // First register address. Following registers are below it on the stack.
- static const int kRegisterZero = kInputStartMinusOne - kPointerSize;
-
- // Initial size of code buffer.
- static const size_t kRegExpCodeSize = 1024;
-
- // Load a number of characters at the given offset from the
- // current position, into the current-character register.
- void LoadCurrentCharacterUnchecked(int cp_offset, int character_count);
-
- // Check whether preemption has been requested.
- void CheckPreemption();
-
- // Check whether we are exceeding the stack limit on the backtrack stack.
- void CheckStackLimit();
-
- // Generate a call to CheckStackGuardState.
- void CallCheckStackGuardState();
-
- // The rbp-relative location of a regexp register.
- Operand register_location(int register_index);
-
- // The register containing the current character after LoadCurrentCharacter.
- inline Register current_character() { return rdx; }
-
- // The register containing the backtrack stack top. Provides a meaningful
- // name to the register.
- inline Register backtrack_stackpointer() { return rcx; }
-
- // The registers containing a self pointer to this code's Code object.
- inline Register code_object_pointer() { return r8; }
-
- // Byte size of chars in the string to match (decided by the Mode argument)
- inline int char_size() { return static_cast<int>(mode_); }
-
- // Equivalent to a conditional branch to the label, unless the label
- // is NULL, in which case it is a conditional Backtrack.
- void BranchOrBacktrack(Condition condition, Label* to);
-
- void MarkPositionForCodeRelativeFixup() {
- code_relative_fixup_positions_.Add(masm_.pc_offset(), zone());
- }
-
- void FixupCodeRelativePositions();
-
- // Call and return internally in the generated code in a way that
- // is GC-safe (i.e., doesn't leave absolute code addresses on the stack)
- inline void SafeCall(Label* to);
- inline void SafeCallTarget(Label* label);
- inline void SafeReturn();
-
- // Pushes the value of a register on the backtrack stack. Decrements the
- // stack pointer (rcx) by a word size and stores the register's value there.
- inline void Push(Register source);
-
- // Pushes a value on the backtrack stack. Decrements the stack pointer (rcx)
- // by a word size and stores the value there.
- inline void Push(Immediate value);
-
- // Pushes the Code object relative offset of a label on the backtrack stack
- // (i.e., a backtrack target). Decrements the stack pointer (rcx)
- // by a word size and stores the value there.
- inline void Push(Label* label);
-
- // Pops a value from the backtrack stack. Reads the word at the stack pointer
- // (rcx) and increments it by a word size.
- inline void Pop(Register target);
-
- // Drops the top value from the backtrack stack without reading it.
- // Increments the stack pointer (rcx) by a word size.
- inline void Drop();
-
- MacroAssembler masm_;
- MacroAssembler::NoRootArrayScope no_root_array_scope_;
-
- ZoneList<int> code_relative_fixup_positions_;
-
- // Which mode to generate code for (ASCII or UC16).
- Mode mode_;
-
- // One greater than maximal register index actually used.
- int num_registers_;
-
- // Number of registers to output at the end (the saved registers
- // are always 0..num_saved_registers_-1)
- int num_saved_registers_;
-
- // Labels used internally.
- Label entry_label_;
- Label start_label_;
- Label success_label_;
- Label backtrack_label_;
- Label exit_label_;
- Label check_preempt_label_;
- Label stack_overflow_label_;
-};
-
-#endif // V8_INTERPRETED_REGEXP
-
-}} // namespace v8::internal
-
-#endif // V8_X64_REGEXP_MACRO_ASSEMBLER_X64_H_
diff --git a/src/3rdparty/v8/src/x64/simulator-x64.cc b/src/3rdparty/v8/src/x64/simulator-x64.cc
deleted file mode 100644
index 209aa2d..0000000
--- a/src/3rdparty/v8/src/x64/simulator-x64.cc
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
diff --git a/src/3rdparty/v8/src/x64/simulator-x64.h b/src/3rdparty/v8/src/x64/simulator-x64.h
deleted file mode 100644
index 8aba701..0000000
--- a/src/3rdparty/v8/src/x64/simulator-x64.h
+++ /dev/null
@@ -1,72 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_X64_SIMULATOR_X64_H_
-#define V8_X64_SIMULATOR_X64_H_
-
-#include "allocation.h"
-
-namespace v8 {
-namespace internal {
-
-// Since there is no simulator for the x64 architecture the only thing we can
-// do is to call the entry directly.
-// TODO(X64): Don't pass p0, since it isn't used?
-#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
- (entry(p0, p1, p2, p3, p4))
-
-typedef int (*regexp_matcher)(String*, int, const byte*,
- const byte*, int*, int, Address, int, Isolate*);
-
-// Call the generated regexp code directly. The code at the entry address should
-// expect eight int/pointer sized arguments and return an int.
-#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
- (FUNCTION_CAST<regexp_matcher>(entry)(p0, p1, p2, p3, p4, p5, p6, p7, p8))
-
-#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
- (reinterpret_cast<TryCatch*>(try_catch_address))
-
-// The stack limit beyond which we will throw stack overflow errors in
-// generated code. Because generated code on x64 uses the C stack, we
-// just use the C stack limit.
-class SimulatorStack : public v8::internal::AllStatic {
- public:
- static inline uintptr_t JsLimitFromCLimit(Isolate* isolate,
- uintptr_t c_limit) {
- return c_limit;
- }
-
- static inline uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
- return try_catch_address;
- }
-
- static inline void UnregisterCTryCatch() { }
-};
-
-} } // namespace v8::internal
-
-#endif // V8_X64_SIMULATOR_X64_H_
diff --git a/src/3rdparty/v8/src/x64/stub-cache-x64.cc b/src/3rdparty/v8/src/x64/stub-cache-x64.cc
deleted file mode 100644
index c471569..0000000
--- a/src/3rdparty/v8/src/x64/stub-cache-x64.cc
+++ /dev/null
@@ -1,3613 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_X64)
-
-#include "ic-inl.h"
-#include "codegen.h"
-#include "stub-cache.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-
-static void ProbeTable(Isolate* isolate,
- MacroAssembler* masm,
- Code::Flags flags,
- StubCache::Table table,
- Register receiver,
- Register name,
- // The offset is scaled by 4, based on
- // kHeapObjectTagSize, which is two bits
- Register offset) {
- // We need to scale up the pointer by 2 because the offset is scaled by less
- // than the pointer size.
- ASSERT(kPointerSizeLog2 == kHeapObjectTagSize + 1);
- ScaleFactor scale_factor = times_2;
-
- ASSERT_EQ(24, sizeof(StubCache::Entry));
- // The offset register holds the entry offset times four (due to masking
- // and shifting optimizations).
- ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
- ExternalReference value_offset(isolate->stub_cache()->value_reference(table));
- Label miss;
-
- // Multiply by 3 because there are 3 fields per entry (name, code, map).
- __ lea(offset, Operand(offset, offset, times_2, 0));
-
- __ LoadAddress(kScratchRegister, key_offset);
-
- // Check that the key in the entry matches the name.
- // Multiply entry offset by 16 to get the entry address. Since the
- // offset register already holds the entry offset times four, multiply
- // by a further four.
- __ cmpl(name, Operand(kScratchRegister, offset, scale_factor, 0));
- __ j(not_equal, &miss);
-
- // Get the map entry from the cache.
- // Use key_offset + kPointerSize * 2, rather than loading map_offset.
- __ movq(kScratchRegister,
- Operand(kScratchRegister, offset, scale_factor, kPointerSize * 2));
- __ cmpq(kScratchRegister, FieldOperand(receiver, HeapObject::kMapOffset));
- __ j(not_equal, &miss);
-
- // Get the code entry from the cache.
- __ LoadAddress(kScratchRegister, value_offset);
- __ movq(kScratchRegister,
- Operand(kScratchRegister, offset, scale_factor, 0));
-
- // Check that the flags match what we're looking for.
- __ movl(offset, FieldOperand(kScratchRegister, Code::kFlagsOffset));
- __ and_(offset, Immediate(~Code::kFlagsNotUsedInLookup));
- __ cmpl(offset, Immediate(flags));
- __ j(not_equal, &miss);
-
-#ifdef DEBUG
- if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
- __ jmp(&miss);
- } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
- __ jmp(&miss);
- }
-#endif
-
- // Jump to the first instruction in the code stub.
- __ addq(kScratchRegister, Immediate(Code::kHeaderSize - kHeapObjectTag));
- __ jmp(kScratchRegister);
-
- __ bind(&miss);
-}
-
-
-// Helper function used to check that the dictionary doesn't contain
-// the property. This function may return false negatives, so miss_label
-// must always call a backup property check that is complete.
-// This function is safe to call if the receiver has fast properties.
-// Name must be an internalized string and receiver must be a heap object.
-static void GenerateDictionaryNegativeLookup(MacroAssembler* masm,
- Label* miss_label,
- Register receiver,
- Handle<String> name,
- Register r0,
- Register r1) {
- ASSERT(name->IsInternalizedString());
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->negative_lookups(), 1);
- __ IncrementCounter(counters->negative_lookups_miss(), 1);
-
- __ movq(r0, FieldOperand(receiver, HeapObject::kMapOffset));
-
- const int kInterceptorOrAccessCheckNeededMask =
- (1 << Map::kHasNamedInterceptor) | (1 << Map::kIsAccessCheckNeeded);
-
- // Bail out if the receiver has a named interceptor or requires access checks.
- __ testb(FieldOperand(r0, Map::kBitFieldOffset),
- Immediate(kInterceptorOrAccessCheckNeededMask));
- __ j(not_zero, miss_label);
-
- // Check that receiver is a JSObject.
- __ CmpInstanceType(r0, FIRST_SPEC_OBJECT_TYPE);
- __ j(below, miss_label);
-
- // Load properties array.
- Register properties = r0;
- __ movq(properties, FieldOperand(receiver, JSObject::kPropertiesOffset));
-
- // Check that the properties array is a dictionary.
- __ CompareRoot(FieldOperand(properties, HeapObject::kMapOffset),
- Heap::kHashTableMapRootIndex);
- __ j(not_equal, miss_label);
-
- Label done;
- StringDictionaryLookupStub::GenerateNegativeLookup(masm,
- miss_label,
- &done,
- properties,
- name,
- r1);
- __ bind(&done);
- __ DecrementCounter(counters->negative_lookups_miss(), 1);
-}
-
-
-void StubCache::GenerateProbe(MacroAssembler* masm,
- Code::Flags flags,
- Register receiver,
- Register name,
- Register scratch,
- Register extra,
- Register extra2,
- Register extra3) {
- Isolate* isolate = masm->isolate();
- Label miss;
- USE(extra); // The register extra is not used on the X64 platform.
- USE(extra2); // The register extra2 is not used on the X64 platform.
- USE(extra3); // The register extra2 is not used on the X64 platform.
- // Make sure that code is valid. The multiplying code relies on the
- // entry size being 24.
- ASSERT(sizeof(Entry) == 24);
-
- // Make sure the flags do not name a specific type.
- ASSERT(Code::ExtractTypeFromFlags(flags) == 0);
-
- // Make sure that there are no register conflicts.
- ASSERT(!scratch.is(receiver));
- ASSERT(!scratch.is(name));
-
- // Check scratch register is valid, extra and extra2 are unused.
- ASSERT(!scratch.is(no_reg));
- ASSERT(extra2.is(no_reg));
- ASSERT(extra3.is(no_reg));
-
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1);
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, &miss);
-
- // Get the map of the receiver and compute the hash.
- __ movl(scratch, FieldOperand(name, String::kHashFieldOffset));
- // Use only the low 32 bits of the map pointer.
- __ addl(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
- __ xor_(scratch, Immediate(flags));
- // We mask out the last two bits because they are not part of the hash and
- // they are always 01 for maps. Also in the two 'and' instructions below.
- __ and_(scratch, Immediate((kPrimaryTableSize - 1) << kHeapObjectTagSize));
-
- // Probe the primary table.
- ProbeTable(isolate, masm, flags, kPrimary, receiver, name, scratch);
-
- // Primary miss: Compute hash for secondary probe.
- __ movl(scratch, FieldOperand(name, String::kHashFieldOffset));
- __ addl(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
- __ xor_(scratch, Immediate(flags));
- __ and_(scratch, Immediate((kPrimaryTableSize - 1) << kHeapObjectTagSize));
- __ subl(scratch, name);
- __ addl(scratch, Immediate(flags));
- __ and_(scratch, Immediate((kSecondaryTableSize - 1) << kHeapObjectTagSize));
-
- // Probe the secondary table.
- ProbeTable(isolate, masm, flags, kSecondary, receiver, name, scratch);
-
- // Cache miss: Fall-through and let caller handle the miss by
- // entering the runtime system.
- __ bind(&miss);
- __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1);
-}
-
-
-void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm,
- int index,
- Register prototype) {
- // Load the global or builtins object from the current context.
- __ movq(prototype,
- Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- // Load the native context from the global or builtins object.
- __ movq(prototype,
- FieldOperand(prototype, GlobalObject::kNativeContextOffset));
- // Load the function from the native context.
- __ movq(prototype, Operand(prototype, Context::SlotOffset(index)));
- // Load the initial map. The global functions all have initial maps.
- __ movq(prototype,
- FieldOperand(prototype, JSFunction::kPrototypeOrInitialMapOffset));
- // Load the prototype from the initial map.
- __ movq(prototype, FieldOperand(prototype, Map::kPrototypeOffset));
-}
-
-
-void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
- MacroAssembler* masm,
- int index,
- Register prototype,
- Label* miss) {
- Isolate* isolate = masm->isolate();
- // Check we're still in the same context.
- __ Move(prototype, isolate->global_object());
- __ cmpq(Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)),
- prototype);
- __ j(not_equal, miss);
- // Get the global function with the given index.
- Handle<JSFunction> function(
- JSFunction::cast(isolate->native_context()->get(index)));
- // Load its initial map. The global functions all have initial maps.
- __ Move(prototype, Handle<Map>(function->initial_map()));
- // Load the prototype from the initial map.
- __ movq(prototype, FieldOperand(prototype, Map::kPrototypeOffset));
-}
-
-
-void StubCompiler::GenerateLoadArrayLength(MacroAssembler* masm,
- Register receiver,
- Register scratch,
- Label* miss_label) {
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, miss_label);
-
- // Check that the object is a JS array.
- __ CmpObjectType(receiver, JS_ARRAY_TYPE, scratch);
- __ j(not_equal, miss_label);
-
- // Load length directly from the JS array.
- __ movq(rax, FieldOperand(receiver, JSArray::kLengthOffset));
- __ ret(0);
-}
-
-
-// Generate code to check if an object is a string. If the object is
-// a string, the map's instance type is left in the scratch register.
-static void GenerateStringCheck(MacroAssembler* masm,
- Register receiver,
- Register scratch,
- Label* smi,
- Label* non_string_object) {
- // Check that the object isn't a smi.
- __ JumpIfSmi(receiver, smi);
-
- // Check that the object is a string.
- __ movq(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
- __ movzxbq(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
- STATIC_ASSERT(kNotStringTag != 0);
- __ testl(scratch, Immediate(kNotStringTag));
- __ j(not_zero, non_string_object);
-}
-
-
-void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Label* miss,
- bool support_wrappers) {
- Label check_wrapper;
-
- // Check if the object is a string leaving the instance type in the
- // scratch register.
- GenerateStringCheck(masm, receiver, scratch1, miss,
- support_wrappers ? &check_wrapper : miss);
-
- // Load length directly from the string.
- __ movq(rax, FieldOperand(receiver, String::kLengthOffset));
- __ ret(0);
-
- if (support_wrappers) {
- // Check if the object is a JSValue wrapper.
- __ bind(&check_wrapper);
- __ cmpl(scratch1, Immediate(JS_VALUE_TYPE));
- __ j(not_equal, miss);
-
- // Check if the wrapped value is a string and load the length
- // directly if it is.
- __ movq(scratch2, FieldOperand(receiver, JSValue::kValueOffset));
- GenerateStringCheck(masm, scratch2, scratch1, miss, miss);
- __ movq(rax, FieldOperand(scratch2, String::kLengthOffset));
- __ ret(0);
- }
-}
-
-
-void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm,
- Register receiver,
- Register result,
- Register scratch,
- Label* miss_label) {
- __ TryGetFunctionPrototype(receiver, result, miss_label);
- if (!result.is(rax)) __ movq(rax, result);
- __ ret(0);
-}
-
-
-// Load a fast property out of a holder object (src). In-object properties
-// are loaded directly otherwise the property is loaded from the properties
-// fixed array.
-void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
- Register dst,
- Register src,
- Handle<JSObject> holder,
- PropertyIndex index) {
- DoGenerateFastPropertyLoad(
- masm, dst, src, index.is_inobject(holder), index.translate(holder));
-}
-
-
-void StubCompiler::DoGenerateFastPropertyLoad(MacroAssembler* masm,
- Register dst,
- Register src,
- bool inobject,
- int index) {
- int offset = index * kPointerSize;
- if (!inobject) {
- // Calculate the offset into the properties array.
- offset = offset + FixedArray::kHeaderSize;
- __ movq(dst, FieldOperand(src, JSObject::kPropertiesOffset));
- src = dst;
- }
- __ movq(dst, FieldOperand(src, offset));
-}
-
-
-static void PushInterceptorArguments(MacroAssembler* masm,
- Register receiver,
- Register holder,
- Register name,
- Handle<JSObject> holder_obj) {
- __ push(name);
- Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor());
- ASSERT(!masm->isolate()->heap()->InNewSpace(*interceptor));
- __ Move(kScratchRegister, interceptor);
- __ push(kScratchRegister);
- __ push(receiver);
- __ push(holder);
- __ push(FieldOperand(kScratchRegister, InterceptorInfo::kDataOffset));
- __ PushAddress(ExternalReference::isolate_address());
-}
-
-
-static void CompileCallLoadPropertyWithInterceptor(
- MacroAssembler* masm,
- Register receiver,
- Register holder,
- Register name,
- Handle<JSObject> holder_obj) {
- PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
-
- ExternalReference ref =
- ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorOnly),
- masm->isolate());
- __ Set(rax, 6);
- __ LoadAddress(rbx, ref);
-
- CEntryStub stub(1);
- __ CallStub(&stub);
-}
-
-
-// Number of pointers to be reserved on stack for fast API call.
-static const int kFastApiCallArguments = 4;
-
-
-// Reserves space for the extra arguments to API function in the
-// caller's frame.
-//
-// These arguments are set by CheckPrototypes and GenerateFastApiCall.
-static void ReserveSpaceForFastApiCall(MacroAssembler* masm, Register scratch) {
- // ----------- S t a t e -------------
- // -- rsp[0] : return address
- // -- rsp[8] : last argument in the internal frame of the caller
- // -----------------------------------
- __ movq(scratch, Operand(rsp, 0));
- __ subq(rsp, Immediate(kFastApiCallArguments * kPointerSize));
- __ movq(Operand(rsp, 0), scratch);
- __ Move(scratch, Smi::FromInt(0));
- for (int i = 1; i <= kFastApiCallArguments; i++) {
- __ movq(Operand(rsp, i * kPointerSize), scratch);
- }
-}
-
-
-// Undoes the effects of ReserveSpaceForFastApiCall.
-static void FreeSpaceForFastApiCall(MacroAssembler* masm, Register scratch) {
- // ----------- S t a t e -------------
- // -- rsp[0] : return address.
- // -- rsp[8] : last fast api call extra argument.
- // -- ...
- // -- rsp[kFastApiCallArguments * 8] : first fast api call extra argument.
- // -- rsp[kFastApiCallArguments * 8 + 8] : last argument in the internal
- // frame.
- // -----------------------------------
- __ movq(scratch, Operand(rsp, 0));
- __ movq(Operand(rsp, kFastApiCallArguments * kPointerSize), scratch);
- __ addq(rsp, Immediate(kPointerSize * kFastApiCallArguments));
-}
-
-
-// Generates call to API function.
-static void GenerateFastApiCall(MacroAssembler* masm,
- const CallOptimization& optimization,
- int argc) {
- // ----------- S t a t e -------------
- // -- rsp[0] : return address
- // -- rsp[8] : object passing the type check
- // (last fast api call extra argument,
- // set by CheckPrototypes)
- // -- rsp[16] : api function
- // (first fast api call extra argument)
- // -- rsp[24] : api call data
- // -- rsp[32] : isolate
- // -- rsp[40] : last argument
- // -- ...
- // -- rsp[(argc + 4) * 8] : first argument
- // -- rsp[(argc + 5) * 8] : receiver
- // -----------------------------------
- // Get the function and setup the context.
- Handle<JSFunction> function = optimization.constant_function();
- __ LoadHeapObject(rdi, function);
- __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
-
- // Pass the additional arguments.
- __ movq(Operand(rsp, 2 * kPointerSize), rdi);
- Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
- Handle<Object> call_data(api_call_info->data(), masm->isolate());
- if (masm->isolate()->heap()->InNewSpace(*call_data)) {
- __ Move(rcx, api_call_info);
- __ movq(rbx, FieldOperand(rcx, CallHandlerInfo::kDataOffset));
- __ movq(Operand(rsp, 3 * kPointerSize), rbx);
- } else {
- __ Move(Operand(rsp, 3 * kPointerSize), call_data);
- }
- __ movq(kScratchRegister, ExternalReference::isolate_address());
- __ movq(Operand(rsp, 4 * kPointerSize), kScratchRegister);
-
- // Prepare arguments.
- __ lea(rbx, Operand(rsp, 4 * kPointerSize));
-
-#if defined(__MINGW64__)
- Register arguments_arg = rcx;
-#elif defined(_WIN64)
- // Win64 uses first register--rcx--for returned value.
- Register arguments_arg = rdx;
-#else
- Register arguments_arg = rdi;
-#endif
-
- // Allocate the v8::Arguments structure in the arguments' space since
- // it's not controlled by GC.
- const int kApiStackSpace = 4;
-
- __ PrepareCallApiFunction(kApiStackSpace);
-
- __ movq(StackSpaceOperand(0), rbx); // v8::Arguments::implicit_args_.
- __ addq(rbx, Immediate(argc * kPointerSize));
- __ movq(StackSpaceOperand(1), rbx); // v8::Arguments::values_.
- __ Set(StackSpaceOperand(2), argc); // v8::Arguments::length_.
- // v8::Arguments::is_construct_call_.
- __ Set(StackSpaceOperand(3), 0);
-
- // v8::InvocationCallback's argument.
- __ lea(arguments_arg, StackSpaceOperand(0));
-
- // Function address is a foreign pointer outside V8's heap.
- Address function_address = v8::ToCData<Address>(api_call_info->callback());
- __ CallApiFunctionAndReturn(function_address,
- argc + kFastApiCallArguments + 1);
-}
-
-
-class CallInterceptorCompiler BASE_EMBEDDED {
- public:
- CallInterceptorCompiler(StubCompiler* stub_compiler,
- const ParameterCount& arguments,
- Register name,
- Code::ExtraICState extra_ic_state)
- : stub_compiler_(stub_compiler),
- arguments_(arguments),
- name_(name),
- extra_ic_state_(extra_ic_state) {}
-
- void Compile(MacroAssembler* masm,
- Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<String> name,
- LookupResult* lookup,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* miss) {
- ASSERT(holder->HasNamedInterceptor());
- ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, miss);
-
- CallOptimization optimization(lookup);
- if (optimization.is_constant_call()) {
- CompileCacheable(masm, object, receiver, scratch1, scratch2, scratch3,
- holder, lookup, name, optimization, miss);
- } else {
- CompileRegular(masm, object, receiver, scratch1, scratch2, scratch3,
- name, holder, miss);
- }
- }
-
- private:
- void CompileCacheable(MacroAssembler* masm,
- Handle<JSObject> object,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Handle<JSObject> interceptor_holder,
- LookupResult* lookup,
- Handle<String> name,
- const CallOptimization& optimization,
- Label* miss_label) {
- ASSERT(optimization.is_constant_call());
- ASSERT(!lookup->holder()->IsGlobalObject());
-
- int depth1 = kInvalidProtoDepth;
- int depth2 = kInvalidProtoDepth;
- bool can_do_fast_api_call = false;
- if (optimization.is_simple_api_call() &&
- !lookup->holder()->IsGlobalObject()) {
- depth1 = optimization.GetPrototypeDepthOfExpectedType(
- object, interceptor_holder);
- if (depth1 == kInvalidProtoDepth) {
- depth2 = optimization.GetPrototypeDepthOfExpectedType(
- interceptor_holder, Handle<JSObject>(lookup->holder()));
- }
- can_do_fast_api_call =
- depth1 != kInvalidProtoDepth || depth2 != kInvalidProtoDepth;
- }
-
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->call_const_interceptor(), 1);
-
- if (can_do_fast_api_call) {
- __ IncrementCounter(counters->call_const_interceptor_fast_api(), 1);
- ReserveSpaceForFastApiCall(masm, scratch1);
- }
-
- // Check that the maps from receiver to interceptor's holder
- // haven't changed and thus we can invoke interceptor.
- Label miss_cleanup;
- Label* miss = can_do_fast_api_call ? &miss_cleanup : miss_label;
- Register holder =
- stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
- scratch1, scratch2, scratch3,
- name, depth1, miss);
-
- // Invoke an interceptor and if it provides a value,
- // branch to |regular_invoke|.
- Label regular_invoke;
- LoadWithInterceptor(masm, receiver, holder, interceptor_holder,
- &regular_invoke);
-
- // Interceptor returned nothing for this property. Try to use cached
- // constant function.
-
- // Check that the maps from interceptor's holder to constant function's
- // holder haven't changed and thus we can use cached constant function.
- if (*interceptor_holder != lookup->holder()) {
- stub_compiler_->CheckPrototypes(interceptor_holder, receiver,
- Handle<JSObject>(lookup->holder()),
- scratch1, scratch2, scratch3,
- name, depth2, miss);
- } else {
- // CheckPrototypes has a side effect of fetching a 'holder'
- // for API (object which is instanceof for the signature). It's
- // safe to omit it here, as if present, it should be fetched
- // by the previous CheckPrototypes.
- ASSERT(depth2 == kInvalidProtoDepth);
- }
-
- // Invoke function.
- if (can_do_fast_api_call) {
- GenerateFastApiCall(masm, optimization, arguments_.immediate());
- } else {
- CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
- ? CALL_AS_FUNCTION
- : CALL_AS_METHOD;
- __ InvokeFunction(optimization.constant_function(), arguments_,
- JUMP_FUNCTION, NullCallWrapper(), call_kind);
- }
-
- // Deferred code for fast API call case---clean preallocated space.
- if (can_do_fast_api_call) {
- __ bind(&miss_cleanup);
- FreeSpaceForFastApiCall(masm, scratch1);
- __ jmp(miss_label);
- }
-
- // Invoke a regular function.
- __ bind(&regular_invoke);
- if (can_do_fast_api_call) {
- FreeSpaceForFastApiCall(masm, scratch1);
- }
- }
-
- void CompileRegular(MacroAssembler* masm,
- Handle<JSObject> object,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Handle<String> name,
- Handle<JSObject> interceptor_holder,
- Label* miss_label) {
- Register holder =
- stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
- scratch1, scratch2, scratch3,
- name, miss_label);
-
- FrameScope scope(masm, StackFrame::INTERNAL);
- // Save the name_ register across the call.
- __ push(name_);
-
- PushInterceptorArguments(masm, receiver, holder, name_, interceptor_holder);
-
- __ CallExternalReference(
- ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForCall),
- masm->isolate()),
- 6);
-
- // Restore the name_ register.
- __ pop(name_);
-
- // Leave the internal frame.
- }
-
- void LoadWithInterceptor(MacroAssembler* masm,
- Register receiver,
- Register holder,
- Handle<JSObject> holder_obj,
- Label* interceptor_succeeded) {
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(holder); // Save the holder.
- __ push(name_); // Save the name.
-
- CompileCallLoadPropertyWithInterceptor(masm,
- receiver,
- holder,
- name_,
- holder_obj);
-
- __ pop(name_); // Restore the name.
- __ pop(receiver); // Restore the holder.
- // Leave the internal frame.
- }
-
- __ CompareRoot(rax, Heap::kNoInterceptorResultSentinelRootIndex);
- __ j(not_equal, interceptor_succeeded);
- }
-
- StubCompiler* stub_compiler_;
- const ParameterCount& arguments_;
- Register name_;
- Code::ExtraICState extra_ic_state_;
-};
-
-
-void StubCompiler::GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind) {
- ASSERT(kind == Code::LOAD_IC || kind == Code::KEYED_LOAD_IC);
- Handle<Code> code = (kind == Code::LOAD_IC)
- ? masm->isolate()->builtins()->LoadIC_Miss()
- : masm->isolate()->builtins()->KeyedLoadIC_Miss();
- __ Jump(code, RelocInfo::CODE_TARGET);
-}
-
-
-void StubCompiler::GenerateStoreMiss(MacroAssembler* masm, Code::Kind kind) {
- ASSERT(kind == Code::STORE_IC || kind == Code::KEYED_STORE_IC);
- Handle<Code> code = (kind == Code::STORE_IC)
- ? masm->isolate()->builtins()->StoreIC_Miss()
- : masm->isolate()->builtins()->KeyedStoreIC_Miss();
- __ Jump(code, RelocInfo::CODE_TARGET);
-}
-
-
-void StubCompiler::GenerateKeyedLoadMissForceGeneric(MacroAssembler* masm) {
- Handle<Code> code =
- masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
- __ Jump(code, RelocInfo::CODE_TARGET);
-}
-
-
-// Both name_reg and receiver_reg are preserved on jumps to miss_label,
-// but may be destroyed if store is successful.
-void StubCompiler::GenerateStoreField(MacroAssembler* masm,
- Handle<JSObject> object,
- int index,
- Handle<Map> transition,
- Handle<String> name,
- Register receiver_reg,
- Register name_reg,
- Register scratch1,
- Register scratch2,
- Label* miss_label) {
- LookupResult lookup(masm->isolate());
- object->Lookup(*name, &lookup);
- if (lookup.IsFound() && (lookup.IsReadOnly() || !lookup.IsCacheable())) {
- // In sloppy mode, we could just return the value and be done. However, we
- // might be in strict mode, where we have to throw. Since we cannot tell,
- // go into slow case unconditionally.
- __ jmp(miss_label);
- return;
- }
-
- // Check that the map of the object hasn't changed.
- CompareMapMode mode = transition.is_null() ? ALLOW_ELEMENT_TRANSITION_MAPS
- : REQUIRE_EXACT_MAP;
- __ CheckMap(receiver_reg, Handle<Map>(object->map()),
- miss_label, DO_SMI_CHECK, mode);
-
- // Perform global security token check if needed.
- if (object->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(receiver_reg, scratch1, miss_label);
- }
-
- // Check that we are allowed to write this.
- if (!transition.is_null() && object->GetPrototype()->IsJSObject()) {
- JSObject* holder;
- if (lookup.IsFound()) {
- holder = lookup.holder();
- } else {
- // Find the top object.
- holder = *object;
- do {
- holder = JSObject::cast(holder->GetPrototype());
- } while (holder->GetPrototype()->IsJSObject());
- }
- // We need an extra register, push
- __ push(name_reg);
- Label miss_pop, done_check;
- CheckPrototypes(object, receiver_reg, Handle<JSObject>(holder), name_reg,
- scratch1, scratch2, name, &miss_pop);
- __ jmp(&done_check);
- __ bind(&miss_pop);
- __ pop(name_reg);
- __ jmp(miss_label);
- __ bind(&done_check);
- __ pop(name_reg);
- }
-
- // Stub never generated for non-global objects that require access
- // checks.
- ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
-
- // Perform map transition for the receiver if necessary.
- if (!transition.is_null() && (object->map()->unused_property_fields() == 0)) {
- // The properties must be extended before we can store the value.
- // We jump to a runtime call that extends the properties array.
- __ pop(scratch1); // Return address.
- __ push(receiver_reg);
- __ Push(transition);
- __ push(rax);
- __ push(scratch1);
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage),
- masm->isolate()),
- 3,
- 1);
- return;
- }
-
- if (!transition.is_null()) {
- // Update the map of the object.
- __ Move(scratch1, transition);
- __ movq(FieldOperand(receiver_reg, HeapObject::kMapOffset), scratch1);
-
- // Update the write barrier for the map field and pass the now unused
- // name_reg as scratch register.
- __ RecordWriteField(receiver_reg,
- HeapObject::kMapOffset,
- scratch1,
- name_reg,
- kDontSaveFPRegs,
- OMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- }
-
- // Adjust for the number of properties stored in the object. Even in the
- // face of a transition we can use the old map here because the size of the
- // object and the number of in-object properties is not going to change.
- index -= object->map()->inobject_properties();
-
- if (index < 0) {
- // Set the property straight into the object.
- int offset = object->map()->instance_size() + (index * kPointerSize);
- __ movq(FieldOperand(receiver_reg, offset), rax);
-
- // Update the write barrier for the array address.
- // Pass the value being stored in the now unused name_reg.
- __ movq(name_reg, rax);
- __ RecordWriteField(
- receiver_reg, offset, name_reg, scratch1, kDontSaveFPRegs);
- } else {
- // Write to the properties array.
- int offset = index * kPointerSize + FixedArray::kHeaderSize;
- // Get the properties array (optimistically).
- __ movq(scratch1, FieldOperand(receiver_reg, JSObject::kPropertiesOffset));
- __ movq(FieldOperand(scratch1, offset), rax);
-
- // Update the write barrier for the array address.
- // Pass the value being stored in the now unused name_reg.
- __ movq(name_reg, rax);
- __ RecordWriteField(
- scratch1, offset, name_reg, receiver_reg, kDontSaveFPRegs);
- }
-
- // Return the value (register rax).
- __ ret(0);
-}
-
-
-// Generate code to check that a global property cell is empty. Create
-// the property cell at compilation time if no cell exists for the
-// property.
-static void GenerateCheckPropertyCell(MacroAssembler* masm,
- Handle<GlobalObject> global,
- Handle<String> name,
- Register scratch,
- Label* miss) {
- Handle<JSGlobalPropertyCell> cell =
- GlobalObject::EnsurePropertyCell(global, name);
- ASSERT(cell->value()->IsTheHole());
- __ Move(scratch, cell);
- __ Cmp(FieldOperand(scratch, JSGlobalPropertyCell::kValueOffset),
- masm->isolate()->factory()->the_hole_value());
- __ j(not_equal, miss);
-}
-
-
-// Calls GenerateCheckPropertyCell for each global object in the prototype chain
-// from object to (but not including) holder.
-static void GenerateCheckPropertyCells(MacroAssembler* masm,
- Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<String> name,
- Register scratch,
- Label* miss) {
- Handle<JSObject> current = object;
- while (!current.is_identical_to(holder)) {
- if (current->IsGlobalObject()) {
- GenerateCheckPropertyCell(masm,
- Handle<GlobalObject>::cast(current),
- name,
- scratch,
- miss);
- }
- current = Handle<JSObject>(JSObject::cast(current->GetPrototype()));
- }
-}
-
-#undef __
-#define __ ACCESS_MASM((masm()))
-
-
-void StubCompiler::GenerateTailCall(Handle<Code> code) {
- __ jmp(code, RelocInfo::CODE_TARGET);
-}
-
-
-Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
- Register object_reg,
- Handle<JSObject> holder,
- Register holder_reg,
- Register scratch1,
- Register scratch2,
- Handle<String> name,
- int save_at_depth,
- Label* miss,
- PrototypeCheckType check) {
- Handle<JSObject> first = object;
- // Make sure there's no overlap between holder and object registers.
- ASSERT(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
- ASSERT(!scratch2.is(object_reg) && !scratch2.is(holder_reg)
- && !scratch2.is(scratch1));
-
- // Keep track of the current object in register reg. On the first
- // iteration, reg is an alias for object_reg, on later iterations,
- // it is an alias for holder_reg.
- Register reg = object_reg;
- int depth = 0;
-
- if (save_at_depth == depth) {
- __ movq(Operand(rsp, kPointerSize), object_reg);
- }
-
- // Check the maps in the prototype chain.
- // Traverse the prototype chain from the object and do map checks.
- Handle<JSObject> current = object;
- while (!current.is_identical_to(holder)) {
- ++depth;
-
- // Only global objects and objects that do not require access
- // checks are allowed in stubs.
- ASSERT(current->IsJSGlobalProxy() || !current->IsAccessCheckNeeded());
-
- Handle<JSObject> prototype(JSObject::cast(current->GetPrototype()));
- if (!current->HasFastProperties() &&
- !current->IsJSGlobalObject() &&
- !current->IsJSGlobalProxy()) {
- if (!name->IsInternalizedString()) {
- name = factory()->InternalizeString(name);
- }
- ASSERT(current->property_dictionary()->FindEntry(*name) ==
- StringDictionary::kNotFound);
-
- GenerateDictionaryNegativeLookup(masm(), miss, reg, name,
- scratch1, scratch2);
-
- __ movq(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
- reg = holder_reg; // From now on the object will be in holder_reg.
- __ movq(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
- } else {
- bool in_new_space = heap()->InNewSpace(*prototype);
- Handle<Map> current_map(current->map());
- if (in_new_space) {
- // Save the map in scratch1 for later.
- __ movq(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
- }
- if (!current.is_identical_to(first) || check == CHECK_ALL_MAPS) {
- __ CheckMap(reg, current_map, miss, DONT_DO_SMI_CHECK,
- ALLOW_ELEMENT_TRANSITION_MAPS);
- }
-
- // Check access rights to the global object. This has to happen after
- // the map check so that we know that the object is actually a global
- // object.
- if (current->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(reg, scratch2, miss);
- }
- reg = holder_reg; // From now on the object will be in holder_reg.
-
- if (in_new_space) {
- // The prototype is in new space; we cannot store a reference to it
- // in the code. Load it from the map.
- __ movq(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
- } else {
- // The prototype is in old space; load it directly.
- __ Move(reg, prototype);
- }
- }
-
- if (save_at_depth == depth) {
- __ movq(Operand(rsp, kPointerSize), reg);
- }
-
- // Go to the next object in the prototype chain.
- current = prototype;
- }
- ASSERT(current.is_identical_to(holder));
-
- // Log the check depth.
- LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
-
- if (!holder.is_identical_to(first) || check == CHECK_ALL_MAPS) {
- // Check the holder map.
- __ CheckMap(reg, Handle<Map>(holder->map()),
- miss, DONT_DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
- }
-
- // Perform security check for access to the global object.
- ASSERT(current->IsJSGlobalProxy() || !current->IsAccessCheckNeeded());
- if (current->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(reg, scratch1, miss);
- }
-
- // If we've skipped any global objects, it's not enough to verify that
- // their maps haven't changed. We also need to check that the property
- // cell for the property is still empty.
- GenerateCheckPropertyCells(masm(), object, holder, name, scratch1, miss);
-
- // Return the register containing the holder.
- return reg;
-}
-
-
-void BaseLoadStubCompiler::HandlerFrontendFooter(Label* success,
- Label* miss) {
- if (!miss->is_unused()) {
- __ jmp(success);
- __ bind(miss);
- GenerateLoadMiss(masm(), kind());
- }
-}
-
-
-Register BaseLoadStubCompiler::CallbackHandlerFrontend(
- Handle<JSObject> object,
- Register object_reg,
- Handle<JSObject> holder,
- Handle<String> name,
- Label* success,
- Handle<ExecutableAccessorInfo> callback) {
- Label miss;
-
- Register reg = HandlerFrontendHeader(object, object_reg, holder, name, &miss);
-
- if (!holder->HasFastProperties() && !holder->IsJSGlobalObject()) {
- ASSERT(!reg.is(scratch2()));
- ASSERT(!reg.is(scratch3()));
- ASSERT(!reg.is(scratch4()));
-
- // Load the properties dictionary.
- Register dictionary = scratch4();
- __ movq(dictionary, FieldOperand(reg, JSObject::kPropertiesOffset));
-
- // Probe the dictionary.
- Label probe_done;
- StringDictionaryLookupStub::GeneratePositiveLookup(masm(),
- &miss,
- &probe_done,
- dictionary,
- this->name(),
- scratch2(),
- scratch3());
- __ bind(&probe_done);
-
- // If probing finds an entry in the dictionary, scratch3 contains the
- // index into the dictionary. Check that the value is the callback.
- Register index = scratch3();
- const int kElementsStartOffset =
- StringDictionary::kHeaderSize +
- StringDictionary::kElementsStartIndex * kPointerSize;
- const int kValueOffset = kElementsStartOffset + kPointerSize;
- __ movq(scratch2(),
- Operand(dictionary, index, times_pointer_size,
- kValueOffset - kHeapObjectTag));
- __ movq(scratch3(), callback, RelocInfo::EMBEDDED_OBJECT);
- __ cmpq(scratch2(), scratch3());
- __ j(not_equal, &miss);
- }
-
- HandlerFrontendFooter(success, &miss);
- return reg;
-}
-
-
-void BaseLoadStubCompiler::NonexistentHandlerFrontend(
- Handle<JSObject> object,
- Handle<JSObject> last,
- Handle<String> name,
- Label* success,
- Handle<GlobalObject> global) {
- Label miss;
-
- Register reg = HandlerFrontendHeader(object, receiver(), last, name, &miss);
-
- // If the last object in the prototype chain is a global object,
- // check that the global property cell is empty.
- if (!global.is_null()) {
- GenerateCheckPropertyCell(masm(), global, name, scratch2(), &miss);
- }
-
- if (!last->HasFastProperties()) {
- __ movq(scratch2(), FieldOperand(reg, HeapObject::kMapOffset));
- __ movq(scratch2(), FieldOperand(scratch2(), Map::kPrototypeOffset));
- __ Cmp(scratch2(), isolate()->factory()->null_value());
- __ j(not_equal, &miss);
- }
-
- HandlerFrontendFooter(success, &miss);
-}
-
-
-void BaseLoadStubCompiler::GenerateLoadField(Register reg,
- Handle<JSObject> holder,
- PropertyIndex index) {
- // Get the value from the properties.
- GenerateFastPropertyLoad(masm(), rax, reg, holder, index);
- __ ret(0);
-}
-
-
-void BaseLoadStubCompiler::GenerateLoadCallback(
- Register reg,
- Handle<ExecutableAccessorInfo> callback) {
- // Insert additional parameters into the stack frame above return address.
- ASSERT(!scratch2().is(reg));
- __ pop(scratch2()); // Get return address to place it below.
-
- __ push(receiver()); // receiver
- __ push(reg); // holder
- if (heap()->InNewSpace(callback->data())) {
- __ Move(scratch1(), callback);
- __ push(FieldOperand(scratch1(),
- ExecutableAccessorInfo::kDataOffset)); // data
- } else {
- __ Push(Handle<Object>(callback->data(), isolate()));
- }
- __ PushAddress(ExternalReference::isolate_address()); // isolate
- __ push(name()); // name
- // Save a pointer to where we pushed the arguments pointer. This will be
- // passed as the const ExecutableAccessorInfo& to the C++ callback.
-
-#if defined(__MINGW64__)
- Register accessor_info_arg = rdx;
- Register name_arg = rcx;
-#elif defined(_WIN64)
- // Win64 uses first register--rcx--for returned value.
- Register accessor_info_arg = r8;
- Register name_arg = rdx;
-#else
- Register accessor_info_arg = rsi;
- Register name_arg = rdi;
-#endif
-
- ASSERT(!name_arg.is(scratch2()));
- __ movq(name_arg, rsp);
- __ push(scratch2()); // Restore return address.
-
- // 4 elements array for v8::Arguments::values_ and handler for name.
- const int kStackSpace = 5;
-
- // Allocate v8::AccessorInfo in non-GCed stack space.
- const int kArgStackSpace = 1;
-
- __ PrepareCallApiFunction(kArgStackSpace);
- __ lea(rax, Operand(name_arg, 4 * kPointerSize));
-
- // v8::AccessorInfo::args_.
- __ movq(StackSpaceOperand(0), rax);
-
- // The context register (rsi) has been saved in PrepareCallApiFunction and
- // could be used to pass arguments.
- __ lea(accessor_info_arg, StackSpaceOperand(0));
-
- Address getter_address = v8::ToCData<Address>(callback->getter());
- __ CallApiFunctionAndReturn(getter_address, kStackSpace);
-}
-
-
-void BaseLoadStubCompiler::GenerateLoadConstant(Handle<JSFunction> value) {
- // Return the constant value.
- __ LoadHeapObject(rax, value);
- __ ret(0);
-}
-
-
-void BaseLoadStubCompiler::GenerateLoadInterceptor(
- Register holder_reg,
- Handle<JSObject> object,
- Handle<JSObject> interceptor_holder,
- LookupResult* lookup,
- Handle<String> name) {
- ASSERT(interceptor_holder->HasNamedInterceptor());
- ASSERT(!interceptor_holder->GetNamedInterceptor()->getter()->IsUndefined());
-
- // So far the most popular follow ups for interceptor loads are FIELD
- // and CALLBACKS, so inline only them, other cases may be added
- // later.
- bool compile_followup_inline = false;
- if (lookup->IsFound() && lookup->IsCacheable()) {
- if (lookup->IsField()) {
- compile_followup_inline = true;
- } else if (lookup->type() == CALLBACKS &&
- lookup->GetCallbackObject()->IsExecutableAccessorInfo()) {
- ExecutableAccessorInfo* callback =
- ExecutableAccessorInfo::cast(lookup->GetCallbackObject());
- compile_followup_inline = callback->getter() != NULL &&
- callback->IsCompatibleReceiver(*object);
- }
- }
-
- if (compile_followup_inline) {
- // Compile the interceptor call, followed by inline code to load the
- // property from further up the prototype chain if the call fails.
- // Check that the maps haven't changed.
- ASSERT(holder_reg.is(receiver()) || holder_reg.is(scratch1()));
-
- // Preserve the receiver register explicitly whenever it is different from
- // the holder and it is needed should the interceptor return without any
- // result. The CALLBACKS case needs the receiver to be passed into C++ code,
- // the FIELD case might cause a miss during the prototype check.
- bool must_perfrom_prototype_check = *interceptor_holder != lookup->holder();
- bool must_preserve_receiver_reg = !receiver().is(holder_reg) &&
- (lookup->type() == CALLBACKS || must_perfrom_prototype_check);
-
- // Save necessary data before invoking an interceptor.
- // Requires a frame to make GC aware of pushed pointers.
- {
- FrameScope frame_scope(masm(), StackFrame::INTERNAL);
-
- if (must_preserve_receiver_reg) {
- __ push(receiver());
- }
- __ push(holder_reg);
- __ push(this->name());
-
- // Invoke an interceptor. Note: map checks from receiver to
- // interceptor's holder has been compiled before (see a caller
- // of this method.)
- CompileCallLoadPropertyWithInterceptor(masm(),
- receiver(),
- holder_reg,
- this->name(),
- interceptor_holder);
-
- // Check if interceptor provided a value for property. If it's
- // the case, return immediately.
- Label interceptor_failed;
- __ CompareRoot(rax, Heap::kNoInterceptorResultSentinelRootIndex);
- __ j(equal, &interceptor_failed);
- frame_scope.GenerateLeaveFrame();
- __ ret(0);
-
- __ bind(&interceptor_failed);
- __ pop(this->name());
- __ pop(holder_reg);
- if (must_preserve_receiver_reg) {
- __ pop(receiver());
- }
-
- // Leave the internal frame.
- }
-
- GenerateLoadPostInterceptor(holder_reg, interceptor_holder, name, lookup);
- } else { // !compile_followup_inline
- // Call the runtime system to load the interceptor.
- // Check that the maps haven't changed.
- __ pop(scratch2()); // save old return address
- PushInterceptorArguments(masm(), receiver(), holder_reg,
- this->name(), interceptor_holder);
- __ push(scratch2()); // restore old return address
-
- ExternalReference ref = ExternalReference(
- IC_Utility(IC::kLoadPropertyWithInterceptorForLoad), isolate());
- __ TailCallExternalReference(ref, 6, 1);
- }
-}
-
-
-void CallStubCompiler::GenerateNameCheck(Handle<String> name, Label* miss) {
- if (kind_ == Code::KEYED_CALL_IC) {
- __ Cmp(rcx, name);
- __ j(not_equal, miss);
- }
-}
-
-
-void CallStubCompiler::GenerateGlobalReceiverCheck(Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<String> name,
- Label* miss) {
- ASSERT(holder->IsGlobalObject());
-
- // Get the number of arguments.
- const int argc = arguments().immediate();
-
- // Get the receiver from the stack.
- __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
-
-
- // Check that the maps haven't changed.
- __ JumpIfSmi(rdx, miss);
- CheckPrototypes(object, rdx, holder, rbx, rax, rdi, name, miss);
-}
-
-
-void CallStubCompiler::GenerateLoadFunctionFromCell(
- Handle<JSGlobalPropertyCell> cell,
- Handle<JSFunction> function,
- Label* miss) {
- // Get the value from the cell.
- __ Move(rdi, cell);
- __ movq(rdi, FieldOperand(rdi, JSGlobalPropertyCell::kValueOffset));
-
- // Check that the cell contains the same function.
- if (heap()->InNewSpace(*function)) {
- // We can't embed a pointer to a function in new space so we have
- // to verify that the shared function info is unchanged. This has
- // the nice side effect that multiple closures based on the same
- // function can all use this call IC. Before we load through the
- // function, we have to verify that it still is a function.
- __ JumpIfSmi(rdi, miss);
- __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rax);
- __ j(not_equal, miss);
-
- // Check the shared function info. Make sure it hasn't changed.
- __ Move(rax, Handle<SharedFunctionInfo>(function->shared()));
- __ cmpq(FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset), rax);
- } else {
- __ Cmp(rdi, function);
- }
- __ j(not_equal, miss);
-}
-
-
-void CallStubCompiler::GenerateMissBranch() {
- Handle<Code> code =
- isolate()->stub_cache()->ComputeCallMiss(arguments().immediate(),
- kind_,
- extra_state_);
- __ Jump(code, RelocInfo::CODE_TARGET);
-}
-
-
-Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object,
- Handle<JSObject> holder,
- PropertyIndex index,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // rcx : function name
- // rsp[0] : return address
- // rsp[8] : argument argc
- // rsp[16] : argument argc - 1
- // ...
- // rsp[argc * 8] : argument 1
- // rsp[(argc + 1) * 8] : argument 0 = receiver
- // -----------------------------------
- Label miss;
-
- GenerateNameCheck(name, &miss);
-
- // Get the receiver from the stack.
- const int argc = arguments().immediate();
- __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(rdx, &miss);
-
- // Do the right check and compute the holder register.
- Register reg = CheckPrototypes(object, rdx, holder, rbx, rax, rdi,
- name, &miss);
-
- GenerateFastPropertyLoad(masm(), rdi, reg, holder, index);
-
- // Check that the function really is a function.
- __ JumpIfSmi(rdi, &miss);
- __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rbx);
- __ j(not_equal, &miss);
-
- // Patch the receiver on the stack with the global proxy if
- // necessary.
- if (object->IsGlobalObject()) {
- __ movq(rdx, FieldOperand(rdx, GlobalObject::kGlobalReceiverOffset));
- __ movq(Operand(rsp, (argc + 1) * kPointerSize), rdx);
- }
-
- // Invoke the function.
- CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
- ? CALL_AS_FUNCTION
- : CALL_AS_METHOD;
- __ InvokeFunction(rdi, arguments(), JUMP_FUNCTION,
- NullCallWrapper(), call_kind);
-
- // Handle call cache miss.
- __ bind(&miss);
- GenerateMissBranch();
-
- // Return the generated code.
- return GetCode(Code::FIELD, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileArrayPushCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
- Handle<JSFunction> function,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- rcx : name
- // -- rsp[0] : return address
- // -- rsp[(argc - n) * 8] : arg[n] (zero-based)
- // -- ...
- // -- rsp[(argc + 1) * 8] : receiver
- // -----------------------------------
-
- // If object is not an array, bail out to regular call.
- if (!object->IsJSArray() || !cell.is_null()) return Handle<Code>::null();
-
- Label miss;
- GenerateNameCheck(name, &miss);
-
- // Get the receiver from the stack.
- const int argc = arguments().immediate();
- __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(rdx, &miss);
-
- CheckPrototypes(Handle<JSObject>::cast(object), rdx, holder, rbx, rax, rdi,
- name, &miss);
-
- if (argc == 0) {
- // Noop, return the length.
- __ movq(rax, FieldOperand(rdx, JSArray::kLengthOffset));
- __ ret((argc + 1) * kPointerSize);
- } else {
- Label call_builtin;
-
- if (argc == 1) { // Otherwise fall through to call builtin.
- Label attempt_to_grow_elements, with_write_barrier, check_double;
-
- // Get the elements array of the object.
- __ movq(rdi, FieldOperand(rdx, JSArray::kElementsOffset));
-
- // Check that the elements are in fast mode and writable.
- __ Cmp(FieldOperand(rdi, HeapObject::kMapOffset),
- factory()->fixed_array_map());
- __ j(not_equal, &check_double);
-
- // Get the array's length into rax and calculate new length.
- __ SmiToInteger32(rax, FieldOperand(rdx, JSArray::kLengthOffset));
- STATIC_ASSERT(FixedArray::kMaxLength < Smi::kMaxValue);
- __ addl(rax, Immediate(argc));
-
- // Get the elements' length into rcx.
- __ SmiToInteger32(rcx, FieldOperand(rdi, FixedArray::kLengthOffset));
-
- // Check if we could survive without allocation.
- __ cmpl(rax, rcx);
- __ j(greater, &attempt_to_grow_elements);
-
- // Check if value is a smi.
- __ movq(rcx, Operand(rsp, argc * kPointerSize));
- __ JumpIfNotSmi(rcx, &with_write_barrier);
-
- // Save new length.
- __ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rax);
-
- // Store the value.
- __ movq(FieldOperand(rdi,
- rax,
- times_pointer_size,
- FixedArray::kHeaderSize - argc * kPointerSize),
- rcx);
-
- __ Integer32ToSmi(rax, rax); // Return new length as smi.
- __ ret((argc + 1) * kPointerSize);
-
- __ bind(&check_double);
-
- // Check that the elements are in double mode.
- __ Cmp(FieldOperand(rdi, HeapObject::kMapOffset),
- factory()->fixed_double_array_map());
- __ j(not_equal, &call_builtin);
-
- // Get the array's length into rax and calculate new length.
- __ SmiToInteger32(rax, FieldOperand(rdx, JSArray::kLengthOffset));
- STATIC_ASSERT(FixedArray::kMaxLength < Smi::kMaxValue);
- __ addl(rax, Immediate(argc));
-
- // Get the elements' length into rcx.
- __ SmiToInteger32(rcx, FieldOperand(rdi, FixedArray::kLengthOffset));
-
- // Check if we could survive without allocation.
- __ cmpl(rax, rcx);
- __ j(greater, &call_builtin);
-
- __ movq(rcx, Operand(rsp, argc * kPointerSize));
- __ StoreNumberToDoubleElements(
- rcx, rdi, rax, xmm0, &call_builtin, argc * kDoubleSize);
-
- // Save new length.
- __ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rax);
- __ Integer32ToSmi(rax, rax); // Return new length as smi.
- __ ret((argc + 1) * kPointerSize);
-
- __ bind(&with_write_barrier);
-
- __ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
-
- if (FLAG_smi_only_arrays && !FLAG_trace_elements_transitions) {
- Label fast_object, not_fast_object;
- __ CheckFastObjectElements(rbx, &not_fast_object, Label::kNear);
- __ jmp(&fast_object);
- // In case of fast smi-only, convert to fast object, otherwise bail out.
- __ bind(&not_fast_object);
- __ CheckFastSmiElements(rbx, &call_builtin);
- __ Cmp(FieldOperand(rcx, HeapObject::kMapOffset),
- factory()->heap_number_map());
- __ j(equal, &call_builtin);
- // rdx: receiver
- // rbx: map
-
- Label try_holey_map;
- __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- FAST_ELEMENTS,
- rbx,
- rdi,
- &try_holey_map);
-
- ElementsTransitionGenerator::
- GenerateMapChangeElementsTransition(masm(),
- DONT_TRACK_ALLOCATION_SITE,
- NULL);
- // Restore edi.
- __ movq(rdi, FieldOperand(rdx, JSArray::kElementsOffset));
- __ jmp(&fast_object);
-
- __ bind(&try_holey_map);
- __ LoadTransitionedArrayMapConditional(FAST_HOLEY_SMI_ELEMENTS,
- FAST_HOLEY_ELEMENTS,
- rbx,
- rdi,
- &call_builtin);
- ElementsTransitionGenerator::
- GenerateMapChangeElementsTransition(masm(),
- DONT_TRACK_ALLOCATION_SITE,
- NULL);
- __ movq(rdi, FieldOperand(rdx, JSArray::kElementsOffset));
- __ bind(&fast_object);
- } else {
- __ CheckFastObjectElements(rbx, &call_builtin);
- }
-
- // Save new length.
- __ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rax);
-
- // Store the value.
- __ lea(rdx, FieldOperand(rdi,
- rax, times_pointer_size,
- FixedArray::kHeaderSize - argc * kPointerSize));
- __ movq(Operand(rdx, 0), rcx);
-
- __ RecordWrite(rdi, rdx, rcx, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
-
- __ Integer32ToSmi(rax, rax); // Return new length as smi.
- __ ret((argc + 1) * kPointerSize);
-
- __ bind(&attempt_to_grow_elements);
- if (!FLAG_inline_new) {
- __ jmp(&call_builtin);
- }
-
- __ movq(rbx, Operand(rsp, argc * kPointerSize));
- // Growing elements that are SMI-only requires special handling in case
- // the new element is non-Smi. For now, delegate to the builtin.
- Label no_fast_elements_check;
- __ JumpIfSmi(rbx, &no_fast_elements_check);
- __ movq(rcx, FieldOperand(rdx, HeapObject::kMapOffset));
- __ CheckFastObjectElements(rcx, &call_builtin, Label::kFar);
- __ bind(&no_fast_elements_check);
-
- ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address(isolate());
- ExternalReference new_space_allocation_limit =
- ExternalReference::new_space_allocation_limit_address(isolate());
-
- const int kAllocationDelta = 4;
- // Load top.
- __ Load(rcx, new_space_allocation_top);
-
- // Check if it's the end of elements.
- __ lea(rdx, FieldOperand(rdi,
- rax, times_pointer_size,
- FixedArray::kHeaderSize - argc * kPointerSize));
- __ cmpq(rdx, rcx);
- __ j(not_equal, &call_builtin);
- __ addq(rcx, Immediate(kAllocationDelta * kPointerSize));
- Operand limit_operand =
- masm()->ExternalOperand(new_space_allocation_limit);
- __ cmpq(rcx, limit_operand);
- __ j(above, &call_builtin);
-
- // We fit and could grow elements.
- __ Store(new_space_allocation_top, rcx);
-
- // Push the argument...
- __ movq(Operand(rdx, 0), rbx);
- // ... and fill the rest with holes.
- __ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
- for (int i = 1; i < kAllocationDelta; i++) {
- __ movq(Operand(rdx, i * kPointerSize), kScratchRegister);
- }
-
- // We know the elements array is in new space so we don't need the
- // remembered set, but we just pushed a value onto it so we may have to
- // tell the incremental marker to rescan the object that we just grew. We
- // don't need to worry about the holes because they are in old space and
- // already marked black.
- __ RecordWrite(rdi, rdx, rbx, kDontSaveFPRegs, OMIT_REMEMBERED_SET);
-
- // Restore receiver to rdx as finish sequence assumes it's here.
- __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
-
- // Increment element's and array's sizes.
- __ SmiAddConstant(FieldOperand(rdi, FixedArray::kLengthOffset),
- Smi::FromInt(kAllocationDelta));
-
- // Make new length a smi before returning it.
- __ Integer32ToSmi(rax, rax);
- __ movq(FieldOperand(rdx, JSArray::kLengthOffset), rax);
-
- __ ret((argc + 1) * kPointerSize);
- }
-
- __ bind(&call_builtin);
- __ TailCallExternalReference(ExternalReference(Builtins::c_ArrayPush,
- isolate()),
- argc + 1,
- 1);
- }
-
- __ bind(&miss);
- GenerateMissBranch();
-
- // Return the generated code.
- return GetCode(function);
-}
-
-
-Handle<Code> CallStubCompiler::CompileArrayPopCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
- Handle<JSFunction> function,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- rcx : name
- // -- rsp[0] : return address
- // -- rsp[(argc - n) * 8] : arg[n] (zero-based)
- // -- ...
- // -- rsp[(argc + 1) * 8] : receiver
- // -----------------------------------
-
- // If object is not an array, bail out to regular call.
- if (!object->IsJSArray() || !cell.is_null()) return Handle<Code>::null();
-
- Label miss, return_undefined, call_builtin;
- GenerateNameCheck(name, &miss);
-
- // Get the receiver from the stack.
- const int argc = arguments().immediate();
- __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(rdx, &miss);
-
- CheckPrototypes(Handle<JSObject>::cast(object), rdx, holder, rbx, rax, rdi,
- name, &miss);
-
- // Get the elements array of the object.
- __ movq(rbx, FieldOperand(rdx, JSArray::kElementsOffset));
-
- // Check that the elements are in fast mode and writable.
- __ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
- Heap::kFixedArrayMapRootIndex);
- __ j(not_equal, &call_builtin);
-
- // Get the array's length into rcx and calculate new length.
- __ SmiToInteger32(rcx, FieldOperand(rdx, JSArray::kLengthOffset));
- __ subl(rcx, Immediate(1));
- __ j(negative, &return_undefined);
-
- // Get the last element.
- __ LoadRoot(r9, Heap::kTheHoleValueRootIndex);
- __ movq(rax, FieldOperand(rbx,
- rcx, times_pointer_size,
- FixedArray::kHeaderSize));
- // Check if element is already the hole.
- __ cmpq(rax, r9);
- // If so, call slow-case to also check prototypes for value.
- __ j(equal, &call_builtin);
-
- // Set the array's length.
- __ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rcx);
-
- // Fill with the hole and return original value.
- __ movq(FieldOperand(rbx,
- rcx, times_pointer_size,
- FixedArray::kHeaderSize),
- r9);
- __ ret((argc + 1) * kPointerSize);
-
- __ bind(&return_undefined);
- __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
- __ ret((argc + 1) * kPointerSize);
-
- __ bind(&call_builtin);
- __ TailCallExternalReference(
- ExternalReference(Builtins::c_ArrayPop, isolate()),
- argc + 1,
- 1);
-
- __ bind(&miss);
- GenerateMissBranch();
-
- // Return the generated code.
- return GetCode(function);
-}
-
-
-Handle<Code> CallStubCompiler::CompileStringCharCodeAtCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
- Handle<JSFunction> function,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- rcx : function name
- // -- rsp[0] : return address
- // -- rsp[(argc - n) * 8] : arg[n] (zero-based)
- // -- ...
- // -- rsp[(argc + 1) * 8] : receiver
- // -----------------------------------
-
- // If object is not a string, bail out to regular call.
- if (!object->IsString() || !cell.is_null()) return Handle<Code>::null();
-
- const int argc = arguments().immediate();
-
- Label miss;
- Label name_miss;
- Label index_out_of_range;
- Label* index_out_of_range_label = &index_out_of_range;
- if (kind_ == Code::CALL_IC &&
- (CallICBase::StringStubState::decode(extra_state_) ==
- DEFAULT_STRING_STUB)) {
- index_out_of_range_label = &miss;
- }
- GenerateNameCheck(name, &name_miss);
-
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(masm(),
- Context::STRING_FUNCTION_INDEX,
- rax,
- &miss);
- ASSERT(!object.is_identical_to(holder));
- CheckPrototypes(
- Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
- rax, holder, rbx, rdx, rdi, name, &miss);
-
- Register receiver = rbx;
- Register index = rdi;
- Register result = rax;
- __ movq(receiver, Operand(rsp, (argc + 1) * kPointerSize));
- if (argc > 0) {
- __ movq(index, Operand(rsp, (argc - 0) * kPointerSize));
- } else {
- __ LoadRoot(index, Heap::kUndefinedValueRootIndex);
- }
-
- StringCharCodeAtGenerator generator(receiver,
- index,
- result,
- &miss, // When not a string.
- &miss, // When not a number.
- index_out_of_range_label,
- STRING_INDEX_IS_NUMBER);
- generator.GenerateFast(masm());
- __ ret((argc + 1) * kPointerSize);
-
- StubRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm(), call_helper);
-
- if (index_out_of_range.is_linked()) {
- __ bind(&index_out_of_range);
- __ LoadRoot(rax, Heap::kNanValueRootIndex);
- __ ret((argc + 1) * kPointerSize);
- }
-
- __ bind(&miss);
- // Restore function name in rcx.
- __ Move(rcx, name);
- __ bind(&name_miss);
- GenerateMissBranch();
-
- // Return the generated code.
- return GetCode(function);
-}
-
-
-Handle<Code> CallStubCompiler::CompileStringCharAtCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
- Handle<JSFunction> function,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- rcx : function name
- // -- rsp[0] : return address
- // -- rsp[(argc - n) * 8] : arg[n] (zero-based)
- // -- ...
- // -- rsp[(argc + 1) * 8] : receiver
- // -----------------------------------
-
- // If object is not a string, bail out to regular call.
- if (!object->IsString() || !cell.is_null()) return Handle<Code>::null();
-
- const int argc = arguments().immediate();
- Label miss;
- Label name_miss;
- Label index_out_of_range;
- Label* index_out_of_range_label = &index_out_of_range;
- if (kind_ == Code::CALL_IC &&
- (CallICBase::StringStubState::decode(extra_state_) ==
- DEFAULT_STRING_STUB)) {
- index_out_of_range_label = &miss;
- }
- GenerateNameCheck(name, &name_miss);
-
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(masm(),
- Context::STRING_FUNCTION_INDEX,
- rax,
- &miss);
- ASSERT(!object.is_identical_to(holder));
- CheckPrototypes(
- Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
- rax, holder, rbx, rdx, rdi, name, &miss);
-
- Register receiver = rax;
- Register index = rdi;
- Register scratch = rdx;
- Register result = rax;
- __ movq(receiver, Operand(rsp, (argc + 1) * kPointerSize));
- if (argc > 0) {
- __ movq(index, Operand(rsp, (argc - 0) * kPointerSize));
- } else {
- __ LoadRoot(index, Heap::kUndefinedValueRootIndex);
- }
-
- StringCharAtGenerator generator(receiver,
- index,
- scratch,
- result,
- &miss, // When not a string.
- &miss, // When not a number.
- index_out_of_range_label,
- STRING_INDEX_IS_NUMBER);
- generator.GenerateFast(masm());
- __ ret((argc + 1) * kPointerSize);
-
- StubRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm(), call_helper);
-
- if (index_out_of_range.is_linked()) {
- __ bind(&index_out_of_range);
- __ LoadRoot(rax, Heap::kempty_stringRootIndex);
- __ ret((argc + 1) * kPointerSize);
- }
- __ bind(&miss);
- // Restore function name in rcx.
- __ Move(rcx, name);
- __ bind(&name_miss);
- GenerateMissBranch();
-
- // Return the generated code.
- return GetCode(function);
-}
-
-
-Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
- Handle<JSFunction> function,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- rcx : function name
- // -- rsp[0] : return address
- // -- rsp[(argc - n) * 8] : arg[n] (zero-based)
- // -- ...
- // -- rsp[(argc + 1) * 8] : receiver
- // -----------------------------------
-
- // If the object is not a JSObject or we got an unexpected number of
- // arguments, bail out to the regular call.
- const int argc = arguments().immediate();
- if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
-
- Label miss;
- GenerateNameCheck(name, &miss);
-
- if (cell.is_null()) {
- __ movq(rdx, Operand(rsp, 2 * kPointerSize));
- __ JumpIfSmi(rdx, &miss);
- CheckPrototypes(Handle<JSObject>::cast(object), rdx, holder, rbx, rax, rdi,
- name, &miss);
- } else {
- ASSERT(cell->value() == *function);
- GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
- &miss);
- GenerateLoadFunctionFromCell(cell, function, &miss);
- }
-
- // Load the char code argument.
- Register code = rbx;
- __ movq(code, Operand(rsp, 1 * kPointerSize));
-
- // Check the code is a smi.
- Label slow;
- __ JumpIfNotSmi(code, &slow);
-
- // Convert the smi code to uint16.
- __ SmiAndConstant(code, code, Smi::FromInt(0xffff));
-
- StringCharFromCodeGenerator generator(code, rax);
- generator.GenerateFast(masm());
- __ ret(2 * kPointerSize);
-
- StubRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm(), call_helper);
-
- // Tail call the full function. We do not have to patch the receiver
- // because the function makes no use of it.
- __ bind(&slow);
- CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
- ? CALL_AS_FUNCTION
- : CALL_AS_METHOD;
- __ InvokeFunction(function, arguments(), JUMP_FUNCTION,
- NullCallWrapper(), call_kind);
-
- __ bind(&miss);
- // rcx: function name.
- GenerateMissBranch();
-
- // Return the generated code.
- return cell.is_null() ? GetCode(function) : GetCode(Code::NORMAL, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileMathFloorCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
- Handle<JSFunction> function,
- Handle<String> name) {
- // TODO(872): implement this.
- return Handle<Code>::null();
-}
-
-
-Handle<Code> CallStubCompiler::CompileMathAbsCall(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
- Handle<JSFunction> function,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- rcx : function name
- // -- rsp[0] : return address
- // -- rsp[(argc - n) * 8] : arg[n] (zero-based)
- // -- ...
- // -- rsp[(argc + 1) * 8] : receiver
- // -----------------------------------
-
- // If the object is not a JSObject or we got an unexpected number of
- // arguments, bail out to the regular call.
- const int argc = arguments().immediate();
- if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
-
- Label miss;
- GenerateNameCheck(name, &miss);
-
- if (cell.is_null()) {
- __ movq(rdx, Operand(rsp, 2 * kPointerSize));
- __ JumpIfSmi(rdx, &miss);
- CheckPrototypes(Handle<JSObject>::cast(object), rdx, holder, rbx, rax, rdi,
- name, &miss);
- } else {
- ASSERT(cell->value() == *function);
- GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
- &miss);
- GenerateLoadFunctionFromCell(cell, function, &miss);
- }
- // Load the (only) argument into rax.
- __ movq(rax, Operand(rsp, 1 * kPointerSize));
-
- // Check if the argument is a smi.
- Label not_smi;
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfNotSmi(rax, &not_smi);
- __ SmiToInteger32(rax, rax);
-
- // Set ebx to 1...1 (== -1) if the argument is negative, or to 0...0
- // otherwise.
- __ movl(rbx, rax);
- __ sarl(rbx, Immediate(kBitsPerInt - 1));
-
- // Do bitwise not or do nothing depending on ebx.
- __ xorl(rax, rbx);
-
- // Add 1 or do nothing depending on ebx.
- __ subl(rax, rbx);
-
- // If the result is still negative, go to the slow case.
- // This only happens for the most negative smi.
- Label slow;
- __ j(negative, &slow);
-
- // Smi case done.
- __ Integer32ToSmi(rax, rax);
- __ ret(2 * kPointerSize);
-
- // Check if the argument is a heap number and load its value.
- __ bind(&not_smi);
- __ CheckMap(rax, factory()->heap_number_map(), &slow, DONT_DO_SMI_CHECK);
- __ movq(rbx, FieldOperand(rax, HeapNumber::kValueOffset));
-
- // Check the sign of the argument. If the argument is positive,
- // just return it.
- Label negative_sign;
- const int sign_mask_shift =
- (HeapNumber::kExponentOffset - HeapNumber::kValueOffset) * kBitsPerByte;
- __ movq(rdi, static_cast<int64_t>(HeapNumber::kSignMask) << sign_mask_shift,
- RelocInfo::NONE64);
- __ testq(rbx, rdi);
- __ j(not_zero, &negative_sign);
- __ ret(2 * kPointerSize);
-
- // If the argument is negative, clear the sign, and return a new
- // number. We still have the sign mask in rdi.
- __ bind(&negative_sign);
- __ xor_(rbx, rdi);
- __ AllocateHeapNumber(rax, rdx, &slow);
- __ movq(FieldOperand(rax, HeapNumber::kValueOffset), rbx);
- __ ret(2 * kPointerSize);
-
- // Tail call the full function. We do not have to patch the receiver
- // because the function makes no use of it.
- __ bind(&slow);
- CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
- ? CALL_AS_FUNCTION
- : CALL_AS_METHOD;
- __ InvokeFunction(function, arguments(), JUMP_FUNCTION,
- NullCallWrapper(), call_kind);
-
- __ bind(&miss);
- // rcx: function name.
- GenerateMissBranch();
-
- // Return the generated code.
- return cell.is_null() ? GetCode(function) : GetCode(Code::NORMAL, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileFastApiCall(
- const CallOptimization& optimization,
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<JSGlobalPropertyCell> cell,
- Handle<JSFunction> function,
- Handle<String> name) {
- ASSERT(optimization.is_simple_api_call());
- // Bail out if object is a global object as we don't want to
- // repatch it to global receiver.
- if (object->IsGlobalObject()) return Handle<Code>::null();
- if (!cell.is_null()) return Handle<Code>::null();
- if (!object->IsJSObject()) return Handle<Code>::null();
- int depth = optimization.GetPrototypeDepthOfExpectedType(
- Handle<JSObject>::cast(object), holder);
- if (depth == kInvalidProtoDepth) return Handle<Code>::null();
-
- Label miss, miss_before_stack_reserved;
- GenerateNameCheck(name, &miss_before_stack_reserved);
-
- // Get the receiver from the stack.
- const int argc = arguments().immediate();
- __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(rdx, &miss_before_stack_reserved);
-
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->call_const(), 1);
- __ IncrementCounter(counters->call_const_fast_api(), 1);
-
- // Allocate space for v8::Arguments implicit values. Must be initialized
- // before calling any runtime function.
- __ subq(rsp, Immediate(kFastApiCallArguments * kPointerSize));
-
- // Check that the maps haven't changed and find a Holder as a side effect.
- CheckPrototypes(Handle<JSObject>::cast(object), rdx, holder, rbx, rax, rdi,
- name, depth, &miss);
-
- // Move the return address on top of the stack.
- __ movq(rax, Operand(rsp, 4 * kPointerSize));
- __ movq(Operand(rsp, 0 * kPointerSize), rax);
-
- GenerateFastApiCall(masm(), optimization, argc);
-
- __ bind(&miss);
- __ addq(rsp, Immediate(kFastApiCallArguments * kPointerSize));
-
- __ bind(&miss_before_stack_reserved);
- GenerateMissBranch();
-
- // Return the generated code.
- return GetCode(function);
-}
-
-
-void CallStubCompiler::CompileHandlerFrontend(Handle<Object> object,
- Handle<JSObject> holder,
- Handle<String> name,
- CheckType check,
- Label* success) {
- // ----------- S t a t e -------------
- // rcx : function name
- // rsp[0] : return address
- // rsp[8] : argument argc
- // rsp[16] : argument argc - 1
- // ...
- // rsp[argc * 8] : argument 1
- // rsp[(argc + 1) * 8] : argument 0 = receiver
- // -----------------------------------
- Label miss;
- GenerateNameCheck(name, &miss);
-
- // Get the receiver from the stack.
- const int argc = arguments().immediate();
- __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
-
- // Check that the receiver isn't a smi.
- if (check != NUMBER_CHECK) {
- __ JumpIfSmi(rdx, &miss);
- }
-
- // Make sure that it's okay not to patch the on stack receiver
- // unless we're doing a receiver map check.
- ASSERT(!object->IsGlobalObject() || check == RECEIVER_MAP_CHECK);
-
- Counters* counters = isolate()->counters();
- switch (check) {
- case RECEIVER_MAP_CHECK:
- __ IncrementCounter(counters->call_const(), 1);
-
- // Check that the maps haven't changed.
- CheckPrototypes(Handle<JSObject>::cast(object), rdx, holder, rbx, rax,
- rdi, name, &miss);
-
- // Patch the receiver on the stack with the global proxy if
- // necessary.
- if (object->IsGlobalObject()) {
- __ movq(rdx, FieldOperand(rdx, GlobalObject::kGlobalReceiverOffset));
- __ movq(Operand(rsp, (argc + 1) * kPointerSize), rdx);
- }
- break;
-
- case STRING_CHECK:
- // Check that the object is a string.
- __ CmpObjectType(rdx, FIRST_NONSTRING_TYPE, rax);
- __ j(above_equal, &miss);
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::STRING_FUNCTION_INDEX, rax, &miss);
- CheckPrototypes(
- Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
- rax, holder, rbx, rdx, rdi, name, &miss);
- break;
-
- case SYMBOL_CHECK:
- // Check that the object is a symbol.
- __ CmpObjectType(rdx, SYMBOL_TYPE, rax);
- __ j(not_equal, &miss);
- break;
-
- case NUMBER_CHECK: {
- Label fast;
- // Check that the object is a smi or a heap number.
- __ JumpIfSmi(rdx, &fast);
- __ CmpObjectType(rdx, HEAP_NUMBER_TYPE, rax);
- __ j(not_equal, &miss);
- __ bind(&fast);
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::NUMBER_FUNCTION_INDEX, rax, &miss);
- CheckPrototypes(
- Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
- rax, holder, rbx, rdx, rdi, name, &miss);
- break;
- }
- case BOOLEAN_CHECK: {
- Label fast;
- // Check that the object is a boolean.
- __ CompareRoot(rdx, Heap::kTrueValueRootIndex);
- __ j(equal, &fast);
- __ CompareRoot(rdx, Heap::kFalseValueRootIndex);
- __ j(not_equal, &miss);
- __ bind(&fast);
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::BOOLEAN_FUNCTION_INDEX, rax, &miss);
- CheckPrototypes(
- Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
- rax, holder, rbx, rdx, rdi, name, &miss);
- break;
- }
- }
-
- __ jmp(success);
-
- // Handle call cache miss.
- __ bind(&miss);
- GenerateMissBranch();
-}
-
-
-void CallStubCompiler::CompileHandlerBackend(Handle<JSFunction> function) {
- CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
- ? CALL_AS_FUNCTION
- : CALL_AS_METHOD;
- __ InvokeFunction(function, arguments(), JUMP_FUNCTION,
- NullCallWrapper(), call_kind);
-}
-
-
-Handle<Code> CallStubCompiler::CompileCallConstant(
- Handle<Object> object,
- Handle<JSObject> holder,
- Handle<String> name,
- CheckType check,
- Handle<JSFunction> function) {
- if (HasCustomCallGenerator(function)) {
- Handle<Code> code = CompileCustomCall(object, holder,
- Handle<JSGlobalPropertyCell>::null(),
- function, name);
- // A null handle means bail out to the regular compiler code below.
- if (!code.is_null()) return code;
- }
-
- Label success;
-
- CompileHandlerFrontend(object, holder, name, check, &success);
- __ bind(&success);
- CompileHandlerBackend(function);
-
- // Return the generated code.
- return GetCode(function);
-}
-
-
-Handle<Code> CallStubCompiler::CompileCallInterceptor(Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // rcx : function name
- // rsp[0] : return address
- // rsp[8] : argument argc
- // rsp[16] : argument argc - 1
- // ...
- // rsp[argc * 8] : argument 1
- // rsp[(argc + 1) * 8] : argument 0 = receiver
- // -----------------------------------
- Label miss;
- GenerateNameCheck(name, &miss);
-
- // Get the number of arguments.
- const int argc = arguments().immediate();
-
- LookupResult lookup(isolate());
- LookupPostInterceptor(holder, name, &lookup);
-
- // Get the receiver from the stack.
- __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
-
- CallInterceptorCompiler compiler(this, arguments(), rcx, extra_state_);
- compiler.Compile(masm(), object, holder, name, &lookup, rdx, rbx, rdi, rax,
- &miss);
-
- // Restore receiver.
- __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
-
- // Check that the function really is a function.
- __ JumpIfSmi(rax, &miss);
- __ CmpObjectType(rax, JS_FUNCTION_TYPE, rbx);
- __ j(not_equal, &miss);
-
- // Patch the receiver on the stack with the global proxy if
- // necessary.
- if (object->IsGlobalObject()) {
- __ movq(rdx, FieldOperand(rdx, GlobalObject::kGlobalReceiverOffset));
- __ movq(Operand(rsp, (argc + 1) * kPointerSize), rdx);
- }
-
- // Invoke the function.
- __ movq(rdi, rax);
- CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
- ? CALL_AS_FUNCTION
- : CALL_AS_METHOD;
- __ InvokeFunction(rdi, arguments(), JUMP_FUNCTION,
- NullCallWrapper(), call_kind);
-
- // Handle load cache miss.
- __ bind(&miss);
- GenerateMissBranch();
-
- // Return the generated code.
- return GetCode(Code::INTERCEPTOR, name);
-}
-
-
-Handle<Code> CallStubCompiler::CompileCallGlobal(
- Handle<JSObject> object,
- Handle<GlobalObject> holder,
- Handle<JSGlobalPropertyCell> cell,
- Handle<JSFunction> function,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // rcx : function name
- // rsp[0] : return address
- // rsp[8] : argument argc
- // rsp[16] : argument argc - 1
- // ...
- // rsp[argc * 8] : argument 1
- // rsp[(argc + 1) * 8] : argument 0 = receiver
- // -----------------------------------
-
- if (HasCustomCallGenerator(function)) {
- Handle<Code> code = CompileCustomCall(object, holder, cell, function, name);
- // A null handle means bail out to the regular compiler code below.
- if (!code.is_null()) return code;
- }
-
- Label miss;
- GenerateNameCheck(name, &miss);
-
- // Get the number of arguments.
- const int argc = arguments().immediate();
- GenerateGlobalReceiverCheck(object, holder, name, &miss);
- GenerateLoadFunctionFromCell(cell, function, &miss);
-
- // Patch the receiver on the stack with the global proxy.
- if (object->IsGlobalObject()) {
- __ movq(rdx, FieldOperand(rdx, GlobalObject::kGlobalReceiverOffset));
- __ movq(Operand(rsp, (argc + 1) * kPointerSize), rdx);
- }
-
- // Set up the context (function already in rdi).
- __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
-
- // Jump to the cached code (tail call).
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->call_global_inline(), 1);
- ParameterCount expected(function->shared()->formal_parameter_count());
- CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
- ? CALL_AS_FUNCTION
- : CALL_AS_METHOD;
- // We call indirectly through the code field in the function to
- // allow recompilation to take effect without changing any of the
- // call sites.
- __ movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
- __ InvokeCode(rdx, expected, arguments(), JUMP_FUNCTION,
- NullCallWrapper(), call_kind);
-
- // Handle call cache miss.
- __ bind(&miss);
- __ IncrementCounter(counters->call_global_inline_miss(), 1);
- GenerateMissBranch();
-
- // Return the generated code.
- return GetCode(Code::NORMAL, name);
-}
-
-
-Handle<Code> StoreStubCompiler::CompileStoreField(Handle<JSObject> object,
- int index,
- Handle<Map> transition,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : name
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- Label miss;
-
- // Generate store field code. Preserves receiver and name on jump to miss.
- GenerateStoreField(masm(),
- object,
- index,
- transition,
- name,
- rdx, rcx, rbx, rdi,
- &miss);
-
- // Handle store cache miss.
- __ bind(&miss);
- Handle<Code> ic = isolate()->builtins()->StoreIC_Miss();
- __ Jump(ic, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode(transition.is_null()
- ? Code::FIELD
- : Code::MAP_TRANSITION, name);
-}
-
-
-Handle<Code> StoreStubCompiler::CompileStoreCallback(
- Handle<String> name,
- Handle<JSObject> receiver,
- Handle<JSObject> holder,
- Handle<ExecutableAccessorInfo> callback) {
- // ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : name
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- Label miss;
- // Check that the maps haven't changed.
- __ JumpIfSmi(rdx, &miss);
- CheckPrototypes(receiver, rdx, holder, rbx, r8, rdi, name, &miss);
-
- // Stub never generated for non-global objects that require access checks.
- ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
-
- __ pop(rbx); // remove the return address
- __ push(rdx); // receiver
- __ Push(callback); // callback info
- __ push(rcx); // name
- __ push(rax); // value
- __ push(rbx); // restore return address
-
- // Do tail-call to the runtime system.
- ExternalReference store_callback_property =
- ExternalReference(IC_Utility(IC::kStoreCallbackProperty), isolate());
- __ TailCallExternalReference(store_callback_property, 4, 1);
-
- // Handle store cache miss.
- __ bind(&miss);
- Handle<Code> ic = isolate()->builtins()->StoreIC_Miss();
- __ Jump(ic, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode(Code::CALLBACKS, name);
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm)
-
-
-void StoreStubCompiler::GenerateStoreViaSetter(
- MacroAssembler* masm,
- Handle<JSFunction> setter) {
- // ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : name
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Save value register, so we can restore it later.
- __ push(rax);
-
- if (!setter.is_null()) {
- // Call the JavaScript setter with receiver and value on the stack.
- __ push(rdx);
- __ push(rax);
- ParameterCount actual(1);
- __ InvokeFunction(setter, actual, CALL_FUNCTION, NullCallWrapper(),
- CALL_AS_METHOD);
- } else {
- // If we generate a global code snippet for deoptimization only, remember
- // the place to continue after deoptimization.
- masm->isolate()->heap()->SetSetterStubDeoptPCOffset(masm->pc_offset());
- }
-
- // We have to return the passed value, not the return value of the setter.
- __ pop(rax);
-
- // Restore context register.
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- }
- __ ret(0);
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm())
-
-
-Handle<Code> StoreStubCompiler::CompileStoreViaSetter(
- Handle<String> name,
- Handle<JSObject> receiver,
- Handle<JSObject> holder,
- Handle<JSFunction> setter) {
- // ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : name
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- Label miss;
-
- // Check that the maps haven't changed.
- __ JumpIfSmi(rdx, &miss);
- CheckPrototypes(receiver, rdx, holder, rbx, r8, rdi, name, &miss);
-
- GenerateStoreViaSetter(masm(), setter);
-
- __ bind(&miss);
- Handle<Code> ic = isolate()->builtins()->StoreIC_Miss();
- __ Jump(ic, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode(Code::CALLBACKS, name);
-}
-
-
-Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
- Handle<JSObject> receiver,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : name
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- Label miss;
-
- // Check that the map of the object hasn't changed.
- __ CheckMap(rdx, Handle<Map>(receiver->map()), &miss,
- DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
-
- // Perform global security token check if needed.
- if (receiver->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(rdx, rbx, &miss);
- }
-
- // Stub never generated for non-global objects that require access
- // checks.
- ASSERT(receiver->IsJSGlobalProxy() || !receiver->IsAccessCheckNeeded());
-
- __ pop(rbx); // remove the return address
- __ push(rdx); // receiver
- __ push(rcx); // name
- __ push(rax); // value
- __ Push(Smi::FromInt(strict_mode_));
- __ push(rbx); // restore return address
-
- // Do tail-call to the runtime system.
- ExternalReference store_ic_property =
- ExternalReference(IC_Utility(IC::kStoreInterceptorProperty), isolate());
- __ TailCallExternalReference(store_ic_property, 4, 1);
-
- // Handle store cache miss.
- __ bind(&miss);
- Handle<Code> ic = isolate()->builtins()->StoreIC_Miss();
- __ Jump(ic, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode(Code::INTERCEPTOR, name);
-}
-
-
-Handle<Code> StoreStubCompiler::CompileStoreGlobal(
- Handle<GlobalObject> object,
- Handle<JSGlobalPropertyCell> cell,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : name
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- Label miss;
-
- // Check that the map of the global has not changed.
- __ Cmp(FieldOperand(rdx, HeapObject::kMapOffset),
- Handle<Map>(object->map()));
- __ j(not_equal, &miss);
-
- // Compute the cell operand to use.
- __ Move(rbx, cell);
- Operand cell_operand = FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset);
-
- // Check that the value in the cell is not the hole. If it is, this
- // cell could have been deleted and reintroducing the global needs
- // to update the property details in the property dictionary of the
- // global object. We bail out to the runtime system to do that.
- __ CompareRoot(cell_operand, Heap::kTheHoleValueRootIndex);
- __ j(equal, &miss);
-
- // Store the value in the cell.
- __ movq(cell_operand, rax);
- // Cells are always rescanned, so no write barrier here.
-
- // Return the value (register rax).
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->named_store_global_inline(), 1);
- __ ret(0);
-
- // Handle store cache miss.
- __ bind(&miss);
- __ IncrementCounter(counters->named_store_global_inline_miss(), 1);
- Handle<Code> ic = isolate()->builtins()->StoreIC_Miss();
- __ Jump(ic, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode(Code::NORMAL, name);
-}
-
-
-Handle<Code> KeyedStoreStubCompiler::CompileStoreField(Handle<JSObject> object,
- int index,
- Handle<Map> transition,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- Label miss;
-
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->keyed_store_field(), 1);
-
- // Check that the name has not changed.
- __ Cmp(rcx, name);
- __ j(not_equal, &miss);
-
- // Generate store field code. Preserves receiver and name on jump to miss.
- GenerateStoreField(masm(),
- object,
- index,
- transition,
- name,
- rdx, rcx, rbx, rdi,
- &miss);
-
- // Handle store cache miss.
- __ bind(&miss);
- __ DecrementCounter(counters->keyed_store_field(), 1);
- Handle<Code> ic = isolate()->builtins()->KeyedStoreIC_Miss();
- __ Jump(ic, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode(transition.is_null()
- ? Code::FIELD
- : Code::MAP_TRANSITION, name);
-}
-
-
-Handle<Code> KeyedStoreStubCompiler::CompileStoreElement(
- Handle<Map> receiver_map) {
- // ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
-
- ElementsKind elements_kind = receiver_map->elements_kind();
- bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
- Handle<Code> stub =
- KeyedStoreElementStub(is_js_array,
- elements_kind,
- grow_mode_).GetCode(isolate());
-
- __ DispatchMap(rdx, receiver_map, stub, DO_SMI_CHECK);
-
- Handle<Code> ic = isolate()->builtins()->KeyedStoreIC_Miss();
- __ jmp(ic, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode(Code::NORMAL, factory()->empty_string());
-}
-
-
-Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic(
- MapHandleList* receiver_maps,
- CodeHandleList* handler_stubs,
- MapHandleList* transitioned_maps) {
- // ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- Label miss;
- __ JumpIfSmi(rdx, &miss, Label::kNear);
-
- __ movq(rdi, FieldOperand(rdx, HeapObject::kMapOffset));
- int receiver_count = receiver_maps->length();
- for (int i = 0; i < receiver_count; ++i) {
- // Check map and tail call if there's a match
- __ Cmp(rdi, receiver_maps->at(i));
- if (transitioned_maps->at(i).is_null()) {
- __ j(equal, handler_stubs->at(i), RelocInfo::CODE_TARGET);
- } else {
- Label next_map;
- __ j(not_equal, &next_map, Label::kNear);
- __ movq(rbx, transitioned_maps->at(i), RelocInfo::EMBEDDED_OBJECT);
- __ jmp(handler_stubs->at(i), RelocInfo::CODE_TARGET);
- __ bind(&next_map);
- }
- }
-
- __ bind(&miss);
- Handle<Code> ic = isolate()->builtins()->KeyedStoreIC_Miss();
- __ jmp(ic, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode(Code::NORMAL, factory()->empty_string(), POLYMORPHIC);
-}
-
-
-Handle<Code> LoadStubCompiler::CompileLoadNonexistent(
- Handle<JSObject> object,
- Handle<JSObject> last,
- Handle<String> name,
- Handle<GlobalObject> global) {
- Label success;
-
- NonexistentHandlerFrontend(object, last, name, &success, global);
-
- __ bind(&success);
- // Return undefined if maps of the full prototype chain are still the
- // same and no global property with this name contains a value.
- __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
- __ ret(0);
-
- // Return the generated code.
- return GetCode(Code::HANDLER_FRAGMENT, Code::NONEXISTENT, name);
-}
-
-
-Register* LoadStubCompiler::registers() {
- // receiver, name, scratch1, scratch2, scratch3, scratch4.
- static Register registers[] = { rax, rcx, rdx, rbx, rdi, r8 };
- return registers;
-}
-
-
-Register* KeyedLoadStubCompiler::registers() {
- // receiver, name, scratch1, scratch2, scratch3, scratch4.
- static Register registers[] = { rdx, rax, rbx, rcx, rdi, r8 };
- return registers;
-}
-
-
-void KeyedLoadStubCompiler::GenerateNameCheck(Handle<String> name,
- Register name_reg,
- Label* miss) {
- __ Cmp(name_reg, name);
- __ j(not_equal, miss);
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm)
-
-
-void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
- Handle<JSFunction> getter) {
- // ----------- S t a t e -------------
- // -- rax : receiver
- // -- rcx : name
- // -- rsp[0] : return address
- // -----------------------------------
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- if (!getter.is_null()) {
- // Call the JavaScript getter with the receiver on the stack.
- __ push(rax);
- ParameterCount actual(0);
- __ InvokeFunction(getter, actual, CALL_FUNCTION, NullCallWrapper(),
- CALL_AS_METHOD);
- } else {
- // If we generate a global code snippet for deoptimization only, remember
- // the place to continue after deoptimization.
- masm->isolate()->heap()->SetGetterStubDeoptPCOffset(masm->pc_offset());
- }
-
- // Restore context register.
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- }
- __ ret(0);
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm())
-
-
-Handle<Code> LoadStubCompiler::CompileLoadGlobal(
- Handle<JSObject> object,
- Handle<GlobalObject> global,
- Handle<JSGlobalPropertyCell> cell,
- Handle<String> name,
- bool is_dont_delete) {
- Label success, miss;
- // TODO(verwaest): Directly store to rax. Currently we cannot do this, since
- // rax is used as receiver(), which we would otherwise clobber before a
- // potential miss.
-
- __ CheckMap(receiver(), Handle<Map>(object->map()), &miss, DO_SMI_CHECK);
- HandlerFrontendHeader(
- object, receiver(), Handle<JSObject>::cast(global), name, &miss);
-
- // Get the value from the cell.
- __ Move(rbx, cell);
- __ movq(rbx, FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset));
-
- // Check for deleted property if property can actually be deleted.
- if (!is_dont_delete) {
- __ CompareRoot(rbx, Heap::kTheHoleValueRootIndex);
- __ j(equal, &miss);
- } else if (FLAG_debug_code) {
- __ CompareRoot(rbx, Heap::kTheHoleValueRootIndex);
- __ Check(not_equal, "DontDelete cells can't contain the hole");
- }
-
- HandlerFrontendFooter(&success, &miss);
- __ bind(&success);
-
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->named_load_global_stub(), 1);
- __ movq(rax, rbx);
- __ ret(0);
-
- // Return the generated code.
- return GetCode(Code::IC_FRAGMENT, Code::NORMAL, name);
-}
-
-
-Handle<Code> KeyedLoadStubCompiler::CompileLoadElement(
- Handle<Map> receiver_map) {
- // ----------- S t a t e -------------
- // -- rax : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- ElementsKind elements_kind = receiver_map->elements_kind();
- if (receiver_map->has_fast_elements() ||
- receiver_map->has_external_array_elements()) {
- Handle<Code> stub = KeyedLoadFastElementStub(
- receiver_map->instance_type() == JS_ARRAY_TYPE,
- elements_kind).GetCode(isolate());
- __ DispatchMap(rdx, receiver_map, stub, DO_SMI_CHECK);
- } else {
- Handle<Code> stub =
- KeyedLoadDictionaryElementStub().GetCode(isolate());
- __ DispatchMap(rdx, receiver_map, stub, DO_SMI_CHECK);
- }
-
- GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
-
- // Return the generated code.
- return GetCode(Code::IC_FRAGMENT, Code::NORMAL, factory()->empty_string());
-}
-
-
-Handle<Code> BaseLoadStubCompiler::CompilePolymorphicIC(
- MapHandleList* receiver_maps,
- CodeHandleList* handlers,
- Handle<String> name,
- Code::StubType type,
- IcCheckType check) {
- Label miss;
-
- if (check == PROPERTY) {
- GenerateNameCheck(name, this->name(), &miss);
- }
-
- __ JumpIfSmi(receiver(), &miss);
- Register map_reg = scratch1();
- __ movq(map_reg, FieldOperand(receiver(), HeapObject::kMapOffset));
- int receiver_count = receiver_maps->length();
- for (int current = 0; current < receiver_count; ++current) {
- // Check map and tail call if there's a match
- __ Cmp(map_reg, receiver_maps->at(current));
- __ j(equal, handlers->at(current), RelocInfo::CODE_TARGET);
- }
-
- __ bind(&miss);
- GenerateLoadMiss(masm(), kind());
-
- // Return the generated code.
- InlineCacheState state =
- receiver_maps->length() > 1 ? POLYMORPHIC : MONOMORPHIC;
- return GetCode(Code::IC_FRAGMENT, type, name, state);
-}
-
-
-// Specialized stub for constructing objects from functions which only have only
-// simple assignments of the form this.x = ...; in their body.
-Handle<Code> ConstructStubCompiler::CompileConstructStub(
- Handle<JSFunction> function) {
- // ----------- S t a t e -------------
- // -- rax : argc
- // -- rdi : constructor
- // -- rsp[0] : return address
- // -- rsp[4] : last argument
- // -----------------------------------
- Label generic_stub_call;
-
- // Use r8 for holding undefined which is used in several places below.
- __ Move(r8, factory()->undefined_value());
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- // Check to see whether there are any break points in the function code. If
- // there are jump to the generic constructor stub which calls the actual
- // code for the function thereby hitting the break points.
- __ movq(rbx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
- __ movq(rbx, FieldOperand(rbx, SharedFunctionInfo::kDebugInfoOffset));
- __ cmpq(rbx, r8);
- __ j(not_equal, &generic_stub_call);
-#endif
-
- // Load the initial map and verify that it is in fact a map.
- // rdi: constructor
- __ movq(rbx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
- // Will both indicate a NULL and a Smi.
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfSmi(rbx, &generic_stub_call);
- __ CmpObjectType(rbx, MAP_TYPE, rcx);
- __ j(not_equal, &generic_stub_call);
-
-#ifdef DEBUG
- // Cannot construct functions this way.
- // rbx: initial map
- __ CmpInstanceType(rbx, JS_FUNCTION_TYPE);
- __ Check(not_equal, "Function constructed by construct stub.");
-#endif
-
- // Now allocate the JSObject in new space.
- // rbx: initial map
- ASSERT(function->has_initial_map());
- int instance_size = function->initial_map()->instance_size();
-#ifdef DEBUG
- __ movzxbq(rcx, FieldOperand(rbx, Map::kInstanceSizeOffset));
- __ shl(rcx, Immediate(kPointerSizeLog2));
- __ cmpq(rcx, Immediate(instance_size));
- __ Check(equal, "Instance size of initial map changed.");
-#endif
- __ AllocateInNewSpace(instance_size, rdx, rcx, no_reg,
- &generic_stub_call, NO_ALLOCATION_FLAGS);
-
- // Allocated the JSObject, now initialize the fields and add the heap tag.
- // rbx: initial map
- // rdx: JSObject (untagged)
- __ movq(Operand(rdx, JSObject::kMapOffset), rbx);
- __ Move(rbx, factory()->empty_fixed_array());
- __ movq(Operand(rdx, JSObject::kPropertiesOffset), rbx);
- __ movq(Operand(rdx, JSObject::kElementsOffset), rbx);
-
- // rax: argc
- // rdx: JSObject (untagged)
- // Load the address of the first in-object property into r9.
- __ lea(r9, Operand(rdx, JSObject::kHeaderSize));
- // Calculate the location of the first argument. The stack contains only the
- // return address on top of the argc arguments.
- __ lea(rcx, Operand(rsp, rax, times_pointer_size, 0));
-
- // rax: argc
- // rcx: first argument
- // rdx: JSObject (untagged)
- // r8: undefined
- // r9: first in-object property of the JSObject
- // Fill the initialized properties with a constant value or a passed argument
- // depending on the this.x = ...; assignment in the function.
- Handle<SharedFunctionInfo> shared(function->shared());
- for (int i = 0; i < shared->this_property_assignments_count(); i++) {
- if (shared->IsThisPropertyAssignmentArgument(i)) {
- // Check if the argument assigned to the property is actually passed.
- // If argument is not passed the property is set to undefined,
- // otherwise find it on the stack.
- int arg_number = shared->GetThisPropertyAssignmentArgument(i);
- __ movq(rbx, r8);
- __ cmpq(rax, Immediate(arg_number));
- __ cmovq(above, rbx, Operand(rcx, arg_number * -kPointerSize));
- // Store value in the property.
- __ movq(Operand(r9, i * kPointerSize), rbx);
- } else {
- // Set the property to the constant value.
- Handle<Object> constant(shared->GetThisPropertyAssignmentConstant(i),
- isolate());
- __ Move(Operand(r9, i * kPointerSize), constant);
- }
- }
-
- // Fill the unused in-object property fields with undefined.
- for (int i = shared->this_property_assignments_count();
- i < function->initial_map()->inobject_properties();
- i++) {
- __ movq(Operand(r9, i * kPointerSize), r8);
- }
-
- // rax: argc
- // rdx: JSObject (untagged)
- // Move argc to rbx and the JSObject to return to rax and tag it.
- __ movq(rbx, rax);
- __ movq(rax, rdx);
- __ or_(rax, Immediate(kHeapObjectTag));
-
- // rax: JSObject
- // rbx: argc
- // Remove caller arguments and receiver from the stack and return.
- __ pop(rcx);
- __ lea(rsp, Operand(rsp, rbx, times_pointer_size, 1 * kPointerSize));
- __ push(rcx);
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->constructed_objects(), 1);
- __ IncrementCounter(counters->constructed_objects_stub(), 1);
- __ ret(0);
-
- // Jump to the generic stub in case the specialized code cannot handle the
- // construction.
- __ bind(&generic_stub_call);
- Handle<Code> code = isolate()->builtins()->JSConstructStubGeneric();
- __ Jump(code, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode();
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm)
-
-
-void KeyedLoadStubCompiler::GenerateLoadDictionaryElement(
- MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rax : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- Label slow, miss_force_generic;
-
- // This stub is meant to be tail-jumped to, the receiver must already
- // have been verified by the caller to not be a smi.
-
- __ JumpIfNotSmi(rax, &miss_force_generic);
- __ SmiToInteger32(rbx, rax);
- __ movq(rcx, FieldOperand(rdx, JSObject::kElementsOffset));
-
- // Check whether the elements is a number dictionary.
- // rdx: receiver
- // rax: key
- // rbx: key as untagged int32
- // rcx: elements
- __ LoadFromNumberDictionary(&slow, rcx, rax, rbx, r9, rdi, rax);
- __ ret(0);
-
- __ bind(&slow);
- // ----------- S t a t e -------------
- // -- rax : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- Handle<Code> slow_ic =
- masm->isolate()->builtins()->KeyedLoadIC_Slow();
- __ jmp(slow_ic, RelocInfo::CODE_TARGET);
-
- __ bind(&miss_force_generic);
- // ----------- S t a t e -------------
- // -- rax : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- Handle<Code> miss_ic =
- masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
- __ jmp(miss_ic, RelocInfo::CODE_TARGET);
-}
-
-
-static void GenerateSmiKeyCheck(MacroAssembler* masm,
- Register key,
- Register scratch,
- XMMRegister xmm_scratch0,
- XMMRegister xmm_scratch1,
- Label* fail) {
- // Check that key is a smi or a heap number containing a smi and branch
- // if the check fails.
- Label key_ok;
- __ JumpIfSmi(key, &key_ok);
- __ CheckMap(key,
- masm->isolate()->factory()->heap_number_map(),
- fail,
- DONT_DO_SMI_CHECK);
- __ movsd(xmm_scratch0, FieldOperand(key, HeapNumber::kValueOffset));
- __ cvttsd2si(scratch, xmm_scratch0);
- __ cvtlsi2sd(xmm_scratch1, scratch);
- __ ucomisd(xmm_scratch1, xmm_scratch0);
- __ j(not_equal, fail);
- __ j(parity_even, fail); // NaN.
- __ Integer32ToSmi(key, scratch);
- __ bind(&key_ok);
-}
-
-
-void KeyedStoreStubCompiler::GenerateStoreExternalArray(
- MacroAssembler* masm,
- ElementsKind elements_kind) {
- // ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- Label slow, miss_force_generic;
-
- // This stub is meant to be tail-jumped to, the receiver must already
- // have been verified by the caller to not be a smi.
-
- // Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, rcx, rbx, xmm0, xmm1, &miss_force_generic);
-
- // Check that the index is in range.
- __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
- __ SmiToInteger32(rdi, rcx); // Untag the index.
- __ cmpq(rcx, FieldOperand(rbx, ExternalArray::kLengthOffset));
- // Unsigned comparison catches both negative and too-large values.
- __ j(above_equal, &miss_force_generic);
-
- // Handle both smis and HeapNumbers in the fast path. Go to the
- // runtime for all other kinds of values.
- // rax: value
- // rcx: key (a smi)
- // rdx: receiver (a JSObject)
- // rbx: elements array
- // rdi: untagged key
- Label check_heap_number;
- if (elements_kind == EXTERNAL_PIXEL_ELEMENTS) {
- // Float to pixel conversion is only implemented in the runtime for now.
- __ JumpIfNotSmi(rax, &slow);
- } else {
- __ JumpIfNotSmi(rax, &check_heap_number, Label::kNear);
- }
- // No more branches to slow case on this path. Key and receiver not needed.
- __ SmiToInteger32(rdx, rax);
- __ movq(rbx, FieldOperand(rbx, ExternalArray::kExternalPointerOffset));
- // rbx: base pointer of external storage
- switch (elements_kind) {
- case EXTERNAL_PIXEL_ELEMENTS:
- { // Clamp the value to [0..255].
- Label done;
- __ testl(rdx, Immediate(0xFFFFFF00));
- __ j(zero, &done, Label::kNear);
- __ setcc(negative, rdx); // 1 if negative, 0 if positive.
- __ decb(rdx); // 0 if negative, 255 if positive.
- __ bind(&done);
- }
- __ movb(Operand(rbx, rdi, times_1, 0), rdx);
- break;
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- __ movb(Operand(rbx, rdi, times_1, 0), rdx);
- break;
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- __ movw(Operand(rbx, rdi, times_2, 0), rdx);
- break;
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- __ movl(Operand(rbx, rdi, times_4, 0), rdx);
- break;
- case EXTERNAL_FLOAT_ELEMENTS:
- // Need to perform int-to-float conversion.
- __ cvtlsi2ss(xmm0, rdx);
- __ movss(Operand(rbx, rdi, times_4, 0), xmm0);
- break;
- case EXTERNAL_DOUBLE_ELEMENTS:
- // Need to perform int-to-float conversion.
- __ cvtlsi2sd(xmm0, rdx);
- __ movsd(Operand(rbx, rdi, times_8, 0), xmm0);
- break;
- case FAST_ELEMENTS:
- case FAST_SMI_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- UNREACHABLE();
- break;
- }
- __ ret(0);
-
- // TODO(danno): handle heap number -> pixel array conversion
- if (elements_kind != EXTERNAL_PIXEL_ELEMENTS) {
- __ bind(&check_heap_number);
- // rax: value
- // rcx: key (a smi)
- // rdx: receiver (a JSObject)
- // rbx: elements array
- // rdi: untagged key
- __ CmpObjectType(rax, HEAP_NUMBER_TYPE, kScratchRegister);
- __ j(not_equal, &slow);
- // No more branches to slow case on this path.
-
- // The WebGL specification leaves the behavior of storing NaN and
- // +/-Infinity into integer arrays basically undefined. For more
- // reproducible behavior, convert these to zero.
- __ movsd(xmm0, FieldOperand(rax, HeapNumber::kValueOffset));
- __ movq(rbx, FieldOperand(rbx, ExternalArray::kExternalPointerOffset));
- // rdi: untagged index
- // rbx: base pointer of external storage
- // top of FPU stack: value
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
- __ cvtsd2ss(xmm0, xmm0);
- __ movss(Operand(rbx, rdi, times_4, 0), xmm0);
- __ ret(0);
- } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
- __ movsd(Operand(rbx, rdi, times_8, 0), xmm0);
- __ ret(0);
- } else {
- // Perform float-to-int conversion with truncation (round-to-zero)
- // behavior.
- // Fast path: use machine instruction to convert to int64. If that
- // fails (out-of-range), go into the runtime.
- __ cvttsd2siq(r8, xmm0);
- __ Set(kScratchRegister, V8_UINT64_C(0x8000000000000000));
- __ cmpq(r8, kScratchRegister);
- __ j(equal, &slow);
-
- // rdx: value (converted to an untagged integer)
- // rdi: untagged index
- // rbx: base pointer of external storage
- switch (elements_kind) {
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- __ movb(Operand(rbx, rdi, times_1, 0), r8);
- break;
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- __ movw(Operand(rbx, rdi, times_2, 0), r8);
- break;
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- __ movl(Operand(rbx, rdi, times_4, 0), r8);
- break;
- case EXTERNAL_PIXEL_ELEMENTS:
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_SMI_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- UNREACHABLE();
- break;
- }
- __ ret(0);
- }
- }
-
- // Slow case: call runtime.
- __ bind(&slow);
-
- // ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
-
- Handle<Code> ic = masm->isolate()->builtins()->KeyedStoreIC_Slow();
- __ jmp(ic, RelocInfo::CODE_TARGET);
-
- // Miss case: call runtime.
- __ bind(&miss_force_generic);
-
- // ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
-
- Handle<Code> miss_ic =
- masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
- __ jmp(miss_ic, RelocInfo::CODE_TARGET);
-}
-
-
-void KeyedStoreStubCompiler::GenerateStoreFastElement(
- MacroAssembler* masm,
- bool is_js_array,
- ElementsKind elements_kind,
- KeyedAccessGrowMode grow_mode) {
- // ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- Label miss_force_generic, transition_elements_kind, finish_store, grow;
- Label check_capacity, slow;
-
- // This stub is meant to be tail-jumped to, the receiver must already
- // have been verified by the caller to not be a smi.
-
- // Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, rcx, rbx, xmm0, xmm1, &miss_force_generic);
-
- if (IsFastSmiElementsKind(elements_kind)) {
- __ JumpIfNotSmi(rax, &transition_elements_kind);
- }
-
- // Get the elements array and make sure it is a fast element array, not 'cow'.
- __ movq(rdi, FieldOperand(rdx, JSObject::kElementsOffset));
- // Check that the key is within bounds.
- if (is_js_array) {
- __ SmiCompare(rcx, FieldOperand(rdx, JSArray::kLengthOffset));
- if (grow_mode == ALLOW_JSARRAY_GROWTH) {
- __ j(above_equal, &grow);
- } else {
- __ j(above_equal, &miss_force_generic);
- }
- } else {
- __ SmiCompare(rcx, FieldOperand(rdi, FixedArray::kLengthOffset));
- __ j(above_equal, &miss_force_generic);
- }
-
- __ CompareRoot(FieldOperand(rdi, HeapObject::kMapOffset),
- Heap::kFixedArrayMapRootIndex);
- __ j(not_equal, &miss_force_generic);
-
- __ bind(&finish_store);
- if (IsFastSmiElementsKind(elements_kind)) {
- __ SmiToInteger32(rcx, rcx);
- __ movq(FieldOperand(rdi, rcx, times_pointer_size, FixedArray::kHeaderSize),
- rax);
- } else {
- // Do the store and update the write barrier.
- ASSERT(IsFastObjectElementsKind(elements_kind));
- __ SmiToInteger32(rcx, rcx);
- __ lea(rcx,
- FieldOperand(rdi, rcx, times_pointer_size, FixedArray::kHeaderSize));
- __ movq(Operand(rcx, 0), rax);
- // Make sure to preserve the value in register rax.
- __ movq(rbx, rax);
- __ RecordWrite(rdi, rcx, rbx, kDontSaveFPRegs);
- }
-
- // Done.
- __ ret(0);
-
- // Handle store cache miss.
- __ bind(&miss_force_generic);
- Handle<Code> ic_force_generic =
- masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
- __ jmp(ic_force_generic, RelocInfo::CODE_TARGET);
-
- __ bind(&transition_elements_kind);
- Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss();
- __ jmp(ic_miss, RelocInfo::CODE_TARGET);
-
- if (is_js_array && grow_mode == ALLOW_JSARRAY_GROWTH) {
- // Grow the array by a single element if possible.
- __ bind(&grow);
-
- // Make sure the array is only growing by a single element, anything else
- // must be handled by the runtime. Flags are already set by previous
- // compare.
- __ j(not_equal, &miss_force_generic);
-
- // Check for the empty array, and preallocate a small backing store if
- // possible.
- __ movq(rdi, FieldOperand(rdx, JSObject::kElementsOffset));
- __ CompareRoot(rdi, Heap::kEmptyFixedArrayRootIndex);
- __ j(not_equal, &check_capacity);
-
- int size = FixedArray::SizeFor(JSArray::kPreallocatedArrayElements);
- __ AllocateInNewSpace(size, rdi, rbx, r8, &slow, TAG_OBJECT);
-
- // rax: value
- // rcx: key
- // rdx: receiver
- // rdi: elements
- // Make sure that the backing store can hold additional elements.
- __ Move(FieldOperand(rdi, JSObject::kMapOffset),
- masm->isolate()->factory()->fixed_array_map());
- __ Move(FieldOperand(rdi, FixedArray::kLengthOffset),
- Smi::FromInt(JSArray::kPreallocatedArrayElements));
- __ LoadRoot(rbx, Heap::kTheHoleValueRootIndex);
- for (int i = 1; i < JSArray::kPreallocatedArrayElements; ++i) {
- __ movq(FieldOperand(rdi, FixedArray::SizeFor(i)), rbx);
- }
-
- // Store the element at index zero.
- __ movq(FieldOperand(rdi, FixedArray::SizeFor(0)), rax);
-
- // Install the new backing store in the JSArray.
- __ movq(FieldOperand(rdx, JSObject::kElementsOffset), rdi);
- __ RecordWriteField(rdx, JSObject::kElementsOffset, rdi, rbx,
- kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-
- // Increment the length of the array.
- __ Move(FieldOperand(rdx, JSArray::kLengthOffset), Smi::FromInt(1));
- __ ret(0);
-
- __ bind(&check_capacity);
- // Check for cow elements, in general they are not handled by this stub.
- __ CompareRoot(FieldOperand(rdi, HeapObject::kMapOffset),
- Heap::kFixedCOWArrayMapRootIndex);
- __ j(equal, &miss_force_generic);
-
- // rax: value
- // rcx: key
- // rdx: receiver
- // rdi: elements
- // Make sure that the backing store can hold additional elements.
- __ cmpq(rcx, FieldOperand(rdi, FixedArray::kLengthOffset));
- __ j(above_equal, &slow);
-
- // Grow the array and finish the store.
- __ SmiAddConstant(FieldOperand(rdx, JSArray::kLengthOffset),
- Smi::FromInt(1));
- __ jmp(&finish_store);
-
- __ bind(&slow);
- Handle<Code> ic_slow = masm->isolate()->builtins()->KeyedStoreIC_Slow();
- __ jmp(ic_slow, RelocInfo::CODE_TARGET);
- }
-}
-
-
-void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
- MacroAssembler* masm,
- bool is_js_array,
- KeyedAccessGrowMode grow_mode) {
- // ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- Label miss_force_generic, transition_elements_kind, finish_store;
- Label grow, slow, check_capacity, restore_key_transition_elements_kind;
-
- // This stub is meant to be tail-jumped to, the receiver must already
- // have been verified by the caller to not be a smi.
-
- // Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, rcx, rbx, xmm0, xmm1, &miss_force_generic);
-
- // Get the elements array.
- __ movq(rdi, FieldOperand(rdx, JSObject::kElementsOffset));
- __ AssertFastElements(rdi);
-
- // Check that the key is within bounds.
- if (is_js_array) {
- __ SmiCompare(rcx, FieldOperand(rdx, JSArray::kLengthOffset));
- if (grow_mode == ALLOW_JSARRAY_GROWTH) {
- __ j(above_equal, &grow);
- } else {
- __ j(above_equal, &miss_force_generic);
- }
- } else {
- __ SmiCompare(rcx, FieldOperand(rdi, FixedDoubleArray::kLengthOffset));
- __ j(above_equal, &miss_force_generic);
- }
-
- // Handle smi values specially
- __ bind(&finish_store);
- __ SmiToInteger32(rcx, rcx);
- __ StoreNumberToDoubleElements(rax, rdi, rcx, xmm0,
- &restore_key_transition_elements_kind);
- __ ret(0);
-
- // Handle store cache miss, replacing the ic with the generic stub.
- __ bind(&miss_force_generic);
- Handle<Code> ic_force_generic =
- masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
- __ jmp(ic_force_generic, RelocInfo::CODE_TARGET);
-
- __ bind(&restore_key_transition_elements_kind);
- // Restore smi-tagging of rcx.
- __ Integer32ToSmi(rcx, rcx);
- __ bind(&transition_elements_kind);
- Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss();
- __ jmp(ic_miss, RelocInfo::CODE_TARGET);
-
- if (is_js_array && grow_mode == ALLOW_JSARRAY_GROWTH) {
- // Grow the array by a single element if possible.
- __ bind(&grow);
-
- // Make sure the array is only growing by a single element, anything else
- // must be handled by the runtime. Flags are already set by previous
- // compare.
- __ j(not_equal, &miss_force_generic);
-
- // Transition on values that can't be stored in a FixedDoubleArray.
- Label value_is_smi;
- __ JumpIfSmi(rax, &value_is_smi);
- __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
- Heap::kHeapNumberMapRootIndex);
- __ j(not_equal, &transition_elements_kind);
- __ bind(&value_is_smi);
-
- // Check for the empty array, and preallocate a small backing store if
- // possible.
- __ movq(rdi, FieldOperand(rdx, JSObject::kElementsOffset));
- __ CompareRoot(rdi, Heap::kEmptyFixedArrayRootIndex);
- __ j(not_equal, &check_capacity);
-
- int size = FixedDoubleArray::SizeFor(JSArray::kPreallocatedArrayElements);
- __ AllocateInNewSpace(size, rdi, rbx, r8, &slow, TAG_OBJECT);
-
- // rax: value
- // rcx: key
- // rdx: receiver
- // rdi: elements
- // Initialize the new FixedDoubleArray. Leave elements unitialized for
- // efficiency, they are guaranteed to be initialized before use.
- __ Move(FieldOperand(rdi, JSObject::kMapOffset),
- masm->isolate()->factory()->fixed_double_array_map());
- __ Move(FieldOperand(rdi, FixedDoubleArray::kLengthOffset),
- Smi::FromInt(JSArray::kPreallocatedArrayElements));
-
- // Increment the length of the array.
- __ SmiToInteger32(rcx, rcx);
- __ StoreNumberToDoubleElements(rax, rdi, rcx, xmm0,
- &restore_key_transition_elements_kind);
-
- __ movq(r8, BitCast<int64_t, uint64_t>(kHoleNanInt64), RelocInfo::NONE64);
- for (int i = 1; i < JSArray::kPreallocatedArrayElements; i++) {
- __ movq(FieldOperand(rdi, FixedDoubleArray::OffsetOfElementAt(i)), r8);
- }
-
- // Install the new backing store in the JSArray.
- __ movq(FieldOperand(rdx, JSObject::kElementsOffset), rdi);
- __ RecordWriteField(rdx, JSObject::kElementsOffset, rdi, rbx,
- kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-
- // Increment the length of the array.
- __ Move(FieldOperand(rdx, JSArray::kLengthOffset), Smi::FromInt(1));
- __ movq(rdi, FieldOperand(rdx, JSObject::kElementsOffset));
- __ ret(0);
-
- __ bind(&check_capacity);
- // rax: value
- // rcx: key
- // rdx: receiver
- // rdi: elements
- // Make sure that the backing store can hold additional elements.
- __ cmpq(rcx, FieldOperand(rdi, FixedDoubleArray::kLengthOffset));
- __ j(above_equal, &slow);
-
- // Grow the array and finish the store.
- __ SmiAddConstant(FieldOperand(rdx, JSArray::kLengthOffset),
- Smi::FromInt(1));
- __ jmp(&finish_store);
-
- __ bind(&slow);
- Handle<Code> ic_slow = masm->isolate()->builtins()->KeyedStoreIC_Slow();
- __ jmp(ic_slow, RelocInfo::CODE_TARGET);
- }
-}
-
-
-#undef __
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_X64