summaryrefslogtreecommitdiffstats
path: root/src/3rdparty/javascriptcore/JavaScriptCore/jit/JIT.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/3rdparty/javascriptcore/JavaScriptCore/jit/JIT.cpp')
-rw-r--r--src/3rdparty/javascriptcore/JavaScriptCore/jit/JIT.cpp499
1 files changed, 84 insertions, 415 deletions
diff --git a/src/3rdparty/javascriptcore/JavaScriptCore/jit/JIT.cpp b/src/3rdparty/javascriptcore/JavaScriptCore/jit/JIT.cpp
index a0e462b612..bf3a4180cc 100644
--- a/src/3rdparty/javascriptcore/JavaScriptCore/jit/JIT.cpp
+++ b/src/3rdparty/javascriptcore/JavaScriptCore/jit/JIT.cpp
@@ -37,6 +37,7 @@ JSC::MacroAssemblerX86Common::SSE2CheckState JSC::MacroAssemblerX86Common::s_sse
#include "CodeBlock.h"
#include "Interpreter.h"
#include "JITInlineMethods.h"
+#include "JITStubs.h"
#include "JITStubCall.h"
#include "JSArray.h"
#include "JSFunction.h"
@@ -79,51 +80,67 @@ JIT::JIT(JSGlobalData* globalData, CodeBlock* codeBlock)
, m_propertyAccessCompilationInfo(codeBlock ? codeBlock->numberOfStructureStubInfos() : 0)
, m_callStructureStubCompilationInfo(codeBlock ? codeBlock->numberOfCallLinkInfos() : 0)
, m_bytecodeIndex((unsigned)-1)
+#if USE(JSVALUE32_64)
+ , m_jumpTargetIndex(0)
+ , m_mappedBytecodeIndex((unsigned)-1)
+ , m_mappedVirtualRegisterIndex((unsigned)-1)
+ , m_mappedTag((RegisterID)-1)
+ , m_mappedPayload((RegisterID)-1)
+#else
, m_lastResultBytecodeRegister(std::numeric_limits<int>::max())
, m_jumpTargetsPosition(0)
+#endif
{
}
-void JIT::compileOpStrictEq(Instruction* currentInstruction, CompileOpStrictEqType type)
+#if USE(JSVALUE32_64)
+void JIT::emitTimeoutCheck()
{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned src1 = currentInstruction[2].u.operand;
- unsigned src2 = currentInstruction[3].u.operand;
-
- emitGetVirtualRegisters(src1, regT0, src2, regT1);
-
- // Jump to a slow case if either operand is a number, or if both are JSCell*s.
- move(regT0, regT2);
- orPtr(regT1, regT2);
- addSlowCase(emitJumpIfJSCell(regT2));
- addSlowCase(emitJumpIfImmediateNumber(regT2));
-
- if (type == OpStrictEq)
- set32(Equal, regT1, regT0, regT0);
- else
- set32(NotEqual, regT1, regT0, regT0);
- emitTagAsBoolImmediate(regT0);
-
- emitPutVirtualRegister(dst);
+ Jump skipTimeout = branchSub32(NonZero, Imm32(1), timeoutCheckRegister);
+ JITStubCall stubCall(this, cti_timeout_check);
+ stubCall.addArgument(regT1, regT0); // save last result registers.
+ stubCall.call(timeoutCheckRegister);
+ stubCall.getArgument(0, regT1, regT0); // reload last result registers.
+ skipTimeout.link(this);
}
-
+#else
void JIT::emitTimeoutCheck()
{
Jump skipTimeout = branchSub32(NonZero, Imm32(1), timeoutCheckRegister);
- JITStubCall(this, JITStubs::cti_timeout_check).call(timeoutCheckRegister);
+ JITStubCall(this, cti_timeout_check).call(timeoutCheckRegister);
skipTimeout.link(this);
killLastResultRegister();
}
-
+#endif
#define NEXT_OPCODE(name) \
m_bytecodeIndex += OPCODE_LENGTH(name); \
break;
+#if USE(JSVALUE32_64)
#define DEFINE_BINARY_OP(name) \
case name: { \
- JITStubCall stubCall(this, JITStubs::cti_##name); \
+ JITStubCall stubCall(this, cti_##name); \
+ stubCall.addArgument(currentInstruction[2].u.operand); \
+ stubCall.addArgument(currentInstruction[3].u.operand); \
+ stubCall.call(currentInstruction[1].u.operand); \
+ NEXT_OPCODE(name); \
+ }
+
+#define DEFINE_UNARY_OP(name) \
+ case name: { \
+ JITStubCall stubCall(this, cti_##name); \
+ stubCall.addArgument(currentInstruction[2].u.operand); \
+ stubCall.call(currentInstruction[1].u.operand); \
+ NEXT_OPCODE(name); \
+ }
+
+#else // USE(JSVALUE32_64)
+
+#define DEFINE_BINARY_OP(name) \
+ case name: { \
+ JITStubCall stubCall(this, cti_##name); \
stubCall.addArgument(currentInstruction[2].u.operand, regT2); \
stubCall.addArgument(currentInstruction[3].u.operand, regT2); \
stubCall.call(currentInstruction[1].u.operand); \
@@ -132,11 +149,12 @@ void JIT::emitTimeoutCheck()
#define DEFINE_UNARY_OP(name) \
case name: { \
- JITStubCall stubCall(this, JITStubs::cti_##name); \
+ JITStubCall stubCall(this, cti_##name); \
stubCall.addArgument(currentInstruction[2].u.operand, regT2); \
stubCall.call(currentInstruction[1].u.operand); \
NEXT_OPCODE(name); \
}
+#endif // USE(JSVALUE32_64)
#define DEFINE_OP(name) \
case name: { \
@@ -168,14 +186,18 @@ void JIT::privateCompileMainPass()
sampleInstruction(currentInstruction);
#endif
+#if !USE(JSVALUE32_64)
if (m_labels[m_bytecodeIndex].isUsed())
killLastResultRegister();
-
+#endif
+
m_labels[m_bytecodeIndex] = label();
switch (m_interpreter->getOpcodeID(currentInstruction->u.opcode)) {
DEFINE_BINARY_OP(op_del_by_val)
+#if !USE(JSVALUE32_64)
DEFINE_BINARY_OP(op_div)
+#endif
DEFINE_BINARY_OP(op_in)
DEFINE_BINARY_OP(op_less)
DEFINE_BINARY_OP(op_lesseq)
@@ -187,7 +209,9 @@ void JIT::privateCompileMainPass()
DEFINE_UNARY_OP(op_is_object)
DEFINE_UNARY_OP(op_is_string)
DEFINE_UNARY_OP(op_is_undefined)
+#if !USE(JSVALUE32_64)
DEFINE_UNARY_OP(op_negate)
+#endif
DEFINE_UNARY_OP(op_typeof)
DEFINE_OP(op_add)
@@ -206,6 +230,9 @@ void JIT::privateCompileMainPass()
DEFINE_OP(op_create_arguments)
DEFINE_OP(op_debug)
DEFINE_OP(op_del_by_id)
+#if USE(JSVALUE32_64)
+ DEFINE_OP(op_div)
+#endif
DEFINE_OP(op_end)
DEFINE_OP(op_enter)
DEFINE_OP(op_enter_with_activation)
@@ -236,6 +263,9 @@ void JIT::privateCompileMainPass()
DEFINE_OP(op_mod)
DEFINE_OP(op_mov)
DEFINE_OP(op_mul)
+#if USE(JSVALUE32_64)
+ DEFINE_OP(op_negate)
+#endif
DEFINE_OP(op_neq)
DEFINE_OP(op_neq_null)
DEFINE_OP(op_new_array)
@@ -265,7 +295,6 @@ void JIT::privateCompileMainPass()
DEFINE_OP(op_put_setter)
DEFINE_OP(op_resolve)
DEFINE_OP(op_resolve_base)
- DEFINE_OP(op_resolve_func)
DEFINE_OP(op_resolve_global)
DEFINE_OP(op_resolve_skip)
DEFINE_OP(op_resolve_with_base)
@@ -322,11 +351,15 @@ void JIT::privateCompileSlowCases()
Instruction* instructionsBegin = m_codeBlock->instructions().begin();
m_propertyAccessInstructionIndex = 0;
+#if USE(JSVALUE32_64)
+ m_globalResolveInfoIndex = 0;
+#endif
m_callLinkInfoIndex = 0;
for (Vector<SlowCaseEntry>::iterator iter = m_slowCases.begin(); iter != m_slowCases.end();) {
- // FIXME: enable peephole optimizations for slow cases when applicable
+#if !USE(JSVALUE32_64)
killLastResultRegister();
+#endif
m_bytecodeIndex = iter->to;
#ifndef NDEBUG
@@ -346,6 +379,9 @@ void JIT::privateCompileSlowCases()
DEFINE_SLOWCASE_OP(op_construct)
DEFINE_SLOWCASE_OP(op_construct_verify)
DEFINE_SLOWCASE_OP(op_convert_this)
+#if USE(JSVALUE32_64)
+ DEFINE_SLOWCASE_OP(op_div)
+#endif
DEFINE_SLOWCASE_OP(op_eq)
DEFINE_SLOWCASE_OP(op_get_by_id)
DEFINE_SLOWCASE_OP(op_get_by_val)
@@ -358,9 +394,12 @@ void JIT::privateCompileSlowCases()
DEFINE_SLOWCASE_OP(op_loop_if_lesseq)
DEFINE_SLOWCASE_OP(op_loop_if_true)
DEFINE_SLOWCASE_OP(op_lshift)
+ DEFINE_SLOWCASE_OP(op_method_check)
DEFINE_SLOWCASE_OP(op_mod)
DEFINE_SLOWCASE_OP(op_mul)
- DEFINE_SLOWCASE_OP(op_method_check)
+#if USE(JSVALUE32_64)
+ DEFINE_SLOWCASE_OP(op_negate)
+#endif
DEFINE_SLOWCASE_OP(op_neq)
DEFINE_SLOWCASE_OP(op_not)
DEFINE_SLOWCASE_OP(op_nstricteq)
@@ -370,6 +409,9 @@ void JIT::privateCompileSlowCases()
DEFINE_SLOWCASE_OP(op_pre_inc)
DEFINE_SLOWCASE_OP(op_put_by_id)
DEFINE_SLOWCASE_OP(op_put_by_val)
+#if USE(JSVALUE32_64)
+ DEFINE_SLOWCASE_OP(op_resolve_global)
+#endif
DEFINE_SLOWCASE_OP(op_rshift)
DEFINE_SLOWCASE_OP(op_stricteq)
DEFINE_SLOWCASE_OP(op_sub)
@@ -396,7 +438,7 @@ void JIT::privateCompileSlowCases()
#endif
}
-void JIT::privateCompile()
+JITCode JIT::privateCompile()
{
sampleCodeBlock(m_codeBlock);
#if ENABLE(OPCODE_SAMPLING)
@@ -427,7 +469,7 @@ void JIT::privateCompile()
if (m_codeBlock->codeType() == FunctionCode) {
slowRegisterFileCheck.link(this);
m_bytecodeIndex = 0;
- JITStubCall(this, JITStubs::cti_register_file_check).call();
+ JITStubCall(this, cti_register_file_check).call();
#ifndef NDEBUG
m_bytecodeIndex = (unsigned)-1; // Reset this, in order to guard its use with ASSERTs.
#endif
@@ -510,389 +552,10 @@ void JIT::privateCompile()
info.callReturnLocation = m_codeBlock->structureStubInfo(m_methodCallCompilationInfo[i].propertyAccessIndex).callReturnLocation;
}
- m_codeBlock->setJITCode(patchBuffer.finalizeCode());
-}
-
-void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executablePool, JSGlobalData* globalData, CodePtr* ctiArrayLengthTrampoline, CodePtr* ctiStringLengthTrampoline, CodePtr* ctiVirtualCallPreLink, CodePtr* ctiVirtualCallLink, CodePtr* ctiVirtualCall, CodePtr* ctiNativeCallThunk)
-{
-#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
- // (1) The first function provides fast property access for array length
- Label arrayLengthBegin = align();
-
- // Check eax is an array
- Jump array_failureCases1 = emitJumpIfNotJSCell(regT0);
- Jump array_failureCases2 = branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr));
-
- // Checks out okay! - get the length from the storage
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT0);
- load32(Address(regT0, OBJECT_OFFSETOF(ArrayStorage, m_length)), regT0);
-
- Jump array_failureCases3 = branch32(Above, regT0, Imm32(JSImmediate::maxImmediateInt));
-
- // regT0 contains a 64 bit value (is positive, is zero extended) so we don't need sign extend here.
- emitFastArithIntToImmNoCheck(regT0, regT0);
-
- ret();
-
- // (2) The second function provides fast property access for string length
- Label stringLengthBegin = align();
-
- // Check eax is a string
- Jump string_failureCases1 = emitJumpIfNotJSCell(regT0);
- Jump string_failureCases2 = branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsStringVPtr));
-
- // Checks out okay! - get the length from the Ustring.
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSString, m_value) + OBJECT_OFFSETOF(UString, m_rep)), regT0);
- load32(Address(regT0, OBJECT_OFFSETOF(UString::Rep, len)), regT0);
-
- Jump string_failureCases3 = branch32(Above, regT0, Imm32(JSImmediate::maxImmediateInt));
-
- // regT0 contains a 64 bit value (is positive, is zero extended) so we don't need sign extend here.
- emitFastArithIntToImmNoCheck(regT0, regT0);
-
- ret();
-#endif
-
- // (3) Trampolines for the slow cases of op_call / op_call_eval / op_construct.
- COMPILE_ASSERT(sizeof(CodeType) == 4, CodeTypeEnumMustBe32Bit);
-
- Label virtualCallPreLinkBegin = align();
-
- // Load the callee CodeBlock* into eax
- loadPtr(Address(regT2, OBJECT_OFFSETOF(JSFunction, m_body)), regT3);
- loadPtr(Address(regT3, OBJECT_OFFSETOF(FunctionBodyNode, m_code)), regT0);
- Jump hasCodeBlock1 = branchTestPtr(NonZero, regT0);
- preserveReturnAddressAfterCall(regT3);
- restoreArgumentReference();
- Call callJSFunction1 = call();
- emitGetJITStubArg(1, regT2);
- emitGetJITStubArg(3, regT1);
- restoreReturnAddressBeforeReturn(regT3);
- hasCodeBlock1.link(this);
-
- Jump isNativeFunc1 = branch32(Equal, Address(regT0, OBJECT_OFFSETOF(CodeBlock, m_codeType)), Imm32(NativeCode));
-
- // Check argCount matches callee arity.
- Jump arityCheckOkay1 = branch32(Equal, Address(regT0, OBJECT_OFFSETOF(CodeBlock, m_numParameters)), regT1);
- preserveReturnAddressAfterCall(regT3);
- emitPutJITStubArg(regT3, 2);
- emitPutJITStubArg(regT0, 4);
- restoreArgumentReference();
- Call callArityCheck1 = call();
- move(regT1, callFrameRegister);
- emitGetJITStubArg(1, regT2);
- emitGetJITStubArg(3, regT1);
- restoreReturnAddressBeforeReturn(regT3);
- arityCheckOkay1.link(this);
- isNativeFunc1.link(this);
-
- compileOpCallInitializeCallFrame();
-
- preserveReturnAddressAfterCall(regT3);
- emitPutJITStubArg(regT3, 2);
- restoreArgumentReference();
- Call callDontLazyLinkCall = call();
- emitGetJITStubArg(1, regT2);
- restoreReturnAddressBeforeReturn(regT3);
-
- jump(regT0);
-
- Label virtualCallLinkBegin = align();
-
- // Load the callee CodeBlock* into eax
- loadPtr(Address(regT2, OBJECT_OFFSETOF(JSFunction, m_body)), regT3);
- loadPtr(Address(regT3, OBJECT_OFFSETOF(FunctionBodyNode, m_code)), regT0);
- Jump hasCodeBlock2 = branchTestPtr(NonZero, regT0);
- preserveReturnAddressAfterCall(regT3);
- restoreArgumentReference();
- Call callJSFunction2 = call();
- emitGetJITStubArg(1, regT2);
- emitGetJITStubArg(3, regT1);
- restoreReturnAddressBeforeReturn(regT3);
- hasCodeBlock2.link(this);
-
- Jump isNativeFunc2 = branch32(Equal, Address(regT0, OBJECT_OFFSETOF(CodeBlock, m_codeType)), Imm32(NativeCode));
-
- // Check argCount matches callee arity.
- Jump arityCheckOkay2 = branch32(Equal, Address(regT0, OBJECT_OFFSETOF(CodeBlock, m_numParameters)), regT1);
- preserveReturnAddressAfterCall(regT3);
- emitPutJITStubArg(regT3, 2);
- emitPutJITStubArg(regT0, 4);
- restoreArgumentReference();
- Call callArityCheck2 = call();
- move(regT1, callFrameRegister);
- emitGetJITStubArg(1, regT2);
- emitGetJITStubArg(3, regT1);
- restoreReturnAddressBeforeReturn(regT3);
- arityCheckOkay2.link(this);
- isNativeFunc2.link(this);
-
- compileOpCallInitializeCallFrame();
-
- preserveReturnAddressAfterCall(regT3);
- emitPutJITStubArg(regT3, 2);
- restoreArgumentReference();
- Call callLazyLinkCall = call();
- restoreReturnAddressBeforeReturn(regT3);
-
- jump(regT0);
-
- Label virtualCallBegin = align();
-
- // Load the callee CodeBlock* into eax
- loadPtr(Address(regT2, OBJECT_OFFSETOF(JSFunction, m_body)), regT3);
- loadPtr(Address(regT3, OBJECT_OFFSETOF(FunctionBodyNode, m_code)), regT0);
- Jump hasCodeBlock3 = branchTestPtr(NonZero, regT0);
- preserveReturnAddressAfterCall(regT3);
- restoreArgumentReference();
- Call callJSFunction3 = call();
- emitGetJITStubArg(1, regT2);
- emitGetJITStubArg(3, regT1);
- restoreReturnAddressBeforeReturn(regT3);
- loadPtr(Address(regT2, OBJECT_OFFSETOF(JSFunction, m_body)), regT3); // reload the function body nody, so we can reload the code pointer.
- hasCodeBlock3.link(this);
-
- Jump isNativeFunc3 = branch32(Equal, Address(regT0, OBJECT_OFFSETOF(CodeBlock, m_codeType)), Imm32(NativeCode));
-
- // Check argCount matches callee arity.
- Jump arityCheckOkay3 = branch32(Equal, Address(regT0, OBJECT_OFFSETOF(CodeBlock, m_numParameters)), regT1);
- preserveReturnAddressAfterCall(regT3);
- emitPutJITStubArg(regT3, 2);
- emitPutJITStubArg(regT0, 4);
- restoreArgumentReference();
- Call callArityCheck3 = call();
- move(regT1, callFrameRegister);
- emitGetJITStubArg(1, regT2);
- emitGetJITStubArg(3, regT1);
- restoreReturnAddressBeforeReturn(regT3);
- loadPtr(Address(regT2, OBJECT_OFFSETOF(JSFunction, m_body)), regT3); // reload the function body nody, so we can reload the code pointer.
- arityCheckOkay3.link(this);
- isNativeFunc3.link(this);
-
- // load ctiCode from the new codeBlock.
- loadPtr(Address(regT3, OBJECT_OFFSETOF(FunctionBodyNode, m_jitCode)), regT0);
-
- compileOpCallInitializeCallFrame();
- jump(regT0);
-
-
- Label nativeCallThunk = align();
- preserveReturnAddressAfterCall(regT0);
- emitPutToCallFrameHeader(regT0, RegisterFile::ReturnPC); // Push return address
-
- // Load caller frame's scope chain into this callframe so that whatever we call can
- // get to its global data.
- emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, regT1);
- emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT1, regT1);
- emitPutToCallFrameHeader(regT1, RegisterFile::ScopeChain);
-
-
-#if PLATFORM(X86_64)
- emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, X86::ecx);
-
- // Allocate stack space for our arglist
- subPtr(Imm32(sizeof(ArgList)), stackPointerRegister);
- COMPILE_ASSERT((sizeof(ArgList) & 0xf) == 0, ArgList_should_by_16byte_aligned);
-
- // Set up arguments
- subPtr(Imm32(1), X86::ecx); // Don't include 'this' in argcount
-
- // Push argcount
- storePtr(X86::ecx, Address(stackPointerRegister, OBJECT_OFFSETOF(ArgList, m_argCount)));
-
- // Calculate the start of the callframe header, and store in edx
- addPtr(Imm32(-RegisterFile::CallFrameHeaderSize * (int32_t)sizeof(Register)), callFrameRegister, X86::edx);
-
- // Calculate start of arguments as callframe header - sizeof(Register) * argcount (ecx)
- mul32(Imm32(sizeof(Register)), X86::ecx, X86::ecx);
- subPtr(X86::ecx, X86::edx);
-
- // push pointer to arguments
- storePtr(X86::edx, Address(stackPointerRegister, OBJECT_OFFSETOF(ArgList, m_args)));
-
- // ArgList is passed by reference so is stackPointerRegister
- move(stackPointerRegister, X86::ecx);
-
- // edx currently points to the first argument, edx-sizeof(Register) points to 'this'
- loadPtr(Address(X86::edx, -(int32_t)sizeof(Register)), X86::edx);
-
- emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, X86::esi);
-
- move(callFrameRegister, X86::edi);
-
- call(Address(X86::esi, OBJECT_OFFSETOF(JSFunction, m_data)));
-
- addPtr(Imm32(sizeof(ArgList)), stackPointerRegister);
-#elif PLATFORM(X86)
- emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT0);
-
- /* We have two structs that we use to describe the stackframe we set up for our
- * call to native code. NativeCallFrameStructure describes the how we set up the stack
- * in advance of the call. NativeFunctionCalleeSignature describes the callframe
- * as the native code expects it. We do this as we are using the fastcall calling
- * convention which results in the callee popping its arguments off the stack, but
- * not the rest of the callframe so we need a nice way to ensure we increment the
- * stack pointer by the right amount after the call.
- */
-#if COMPILER(MSVC) || PLATFORM(LINUX)
- struct NativeCallFrameStructure {
- // CallFrame* callFrame; // passed in EDX
- JSObject* callee;
- JSValue thisValue;
- ArgList* argPointer;
- ArgList args;
- JSValue result;
- };
- struct NativeFunctionCalleeSignature {
- JSObject* callee;
- JSValue thisValue;
- ArgList* argPointer;
- };
-#else
- struct NativeCallFrameStructure {
- // CallFrame* callFrame; // passed in ECX
- // JSObject* callee; // passed in EDX
- JSValue thisValue;
- ArgList* argPointer;
- ArgList args;
- };
- struct NativeFunctionCalleeSignature {
- JSValue thisValue;
- ArgList* argPointer;
- };
-#endif
- const int NativeCallFrameSize = (sizeof(NativeCallFrameStructure) + 15) & ~15;
- // Allocate system stack frame
- subPtr(Imm32(NativeCallFrameSize), stackPointerRegister);
-
- // Set up arguments
- subPtr(Imm32(1), regT0); // Don't include 'this' in argcount
-
- // push argcount
- storePtr(regT0, Address(stackPointerRegister, OBJECT_OFFSETOF(NativeCallFrameStructure, args) + OBJECT_OFFSETOF(ArgList, m_argCount)));
-
- // Calculate the start of the callframe header, and store in regT1
- addPtr(Imm32(-RegisterFile::CallFrameHeaderSize * (int)sizeof(Register)), callFrameRegister, regT1);
-
- // Calculate start of arguments as callframe header - sizeof(Register) * argcount (regT0)
- mul32(Imm32(sizeof(Register)), regT0, regT0);
- subPtr(regT0, regT1);
- storePtr(regT1, Address(stackPointerRegister, OBJECT_OFFSETOF(NativeCallFrameStructure, args) + OBJECT_OFFSETOF(ArgList, m_args)));
-
- // ArgList is passed by reference so is stackPointerRegister + 4 * sizeof(Register)
- addPtr(Imm32(OBJECT_OFFSETOF(NativeCallFrameStructure, args)), stackPointerRegister, regT0);
- storePtr(regT0, Address(stackPointerRegister, OBJECT_OFFSETOF(NativeCallFrameStructure, argPointer)));
-
- // regT1 currently points to the first argument, regT1 - sizeof(Register) points to 'this'
- loadPtr(Address(regT1, -(int)sizeof(Register)), regT1);
- storePtr(regT1, Address(stackPointerRegister, OBJECT_OFFSETOF(NativeCallFrameStructure, thisValue)));
-
-#if COMPILER(MSVC) || PLATFORM(LINUX)
- // ArgList is passed by reference so is stackPointerRegister + 4 * sizeof(Register)
- addPtr(Imm32(OBJECT_OFFSETOF(NativeCallFrameStructure, result)), stackPointerRegister, X86::ecx);
-
- // Plant callee
- emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, X86::eax);
- storePtr(X86::eax, Address(stackPointerRegister, OBJECT_OFFSETOF(NativeCallFrameStructure, callee)));
-
- // Plant callframe
- move(callFrameRegister, X86::edx);
-
- call(Address(X86::eax, OBJECT_OFFSETOF(JSFunction, m_data)));
-
- // JSValue is a non-POD type
- loadPtr(Address(X86::eax), X86::eax);
-#else
- // Plant callee
- emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, X86::edx);
-
- // Plant callframe
- move(callFrameRegister, X86::ecx);
- call(Address(X86::edx, OBJECT_OFFSETOF(JSFunction, m_data)));
-#endif
-
- // We've put a few temporaries on the stack in addition to the actual arguments
- // so pull them off now
- addPtr(Imm32(NativeCallFrameSize - sizeof(NativeFunctionCalleeSignature)), stackPointerRegister);
-
-#elif ENABLE(JIT_OPTIMIZE_NATIVE_CALL)
-#error "JIT_OPTIMIZE_NATIVE_CALL not yet supported on this platform."
-#else
- breakpoint();
-#endif
-
- // Check for an exception
- loadPtr(&(globalData->exception), regT2);
- Jump exceptionHandler = branchTestPtr(NonZero, regT2);
-
- // Grab the return address.
- emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, regT1);
-
- // Restore our caller's "r".
- emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister);
-
- // Return.
- restoreReturnAddressBeforeReturn(regT1);
- ret();
-
- // Handle an exception
- exceptionHandler.link(this);
- // Grab the return address.
- emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, regT1);
- move(ImmPtr(&globalData->exceptionLocation), regT2);
- storePtr(regT1, regT2);
- move(ImmPtr(reinterpret_cast<void*>(ctiVMThrowTrampoline)), regT2);
- emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister);
- poke(callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof (void*));
- restoreReturnAddressBeforeReturn(regT2);
- ret();
-
-
-#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
- Call array_failureCases1Call = makeTailRecursiveCall(array_failureCases1);
- Call array_failureCases2Call = makeTailRecursiveCall(array_failureCases2);
- Call array_failureCases3Call = makeTailRecursiveCall(array_failureCases3);
- Call string_failureCases1Call = makeTailRecursiveCall(string_failureCases1);
- Call string_failureCases2Call = makeTailRecursiveCall(string_failureCases2);
- Call string_failureCases3Call = makeTailRecursiveCall(string_failureCases3);
-#endif
-
- // All trampolines constructed! copy the code, link up calls, and set the pointers on the Machine object.
- LinkBuffer patchBuffer(this, m_globalData->executableAllocator.poolForSize(m_assembler.size()));
-
-#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
- patchBuffer.link(array_failureCases1Call, FunctionPtr(JITStubs::cti_op_get_by_id_array_fail));
- patchBuffer.link(array_failureCases2Call, FunctionPtr(JITStubs::cti_op_get_by_id_array_fail));
- patchBuffer.link(array_failureCases3Call, FunctionPtr(JITStubs::cti_op_get_by_id_array_fail));
- patchBuffer.link(string_failureCases1Call, FunctionPtr(JITStubs::cti_op_get_by_id_string_fail));
- patchBuffer.link(string_failureCases2Call, FunctionPtr(JITStubs::cti_op_get_by_id_string_fail));
- patchBuffer.link(string_failureCases3Call, FunctionPtr(JITStubs::cti_op_get_by_id_string_fail));
-#endif
- patchBuffer.link(callArityCheck1, FunctionPtr(JITStubs::cti_op_call_arityCheck));
- patchBuffer.link(callArityCheck2, FunctionPtr(JITStubs::cti_op_call_arityCheck));
- patchBuffer.link(callArityCheck3, FunctionPtr(JITStubs::cti_op_call_arityCheck));
- patchBuffer.link(callJSFunction1, FunctionPtr(JITStubs::cti_op_call_JSFunction));
- patchBuffer.link(callJSFunction2, FunctionPtr(JITStubs::cti_op_call_JSFunction));
- patchBuffer.link(callJSFunction3, FunctionPtr(JITStubs::cti_op_call_JSFunction));
- patchBuffer.link(callDontLazyLinkCall, FunctionPtr(JITStubs::cti_vm_dontLazyLinkCall));
- patchBuffer.link(callLazyLinkCall, FunctionPtr(JITStubs::cti_vm_lazyLinkCall));
-
- CodeRef finalCode = patchBuffer.finalizeCode();
- *executablePool = finalCode.m_executablePool;
-
- *ctiVirtualCallPreLink = trampolineAt(finalCode, virtualCallPreLinkBegin);
- *ctiVirtualCallLink = trampolineAt(finalCode, virtualCallLinkBegin);
- *ctiVirtualCall = trampolineAt(finalCode, virtualCallBegin);
- *ctiNativeCallThunk = trampolineAt(finalCode, nativeCallThunk);
-#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
- *ctiArrayLengthTrampoline = trampolineAt(finalCode, arrayLengthBegin);
- *ctiStringLengthTrampoline = trampolineAt(finalCode, stringLengthBegin);
-#else
- UNUSED_PARAM(ctiArrayLengthTrampoline);
- UNUSED_PARAM(ctiStringLengthTrampoline);
-#endif
+ return patchBuffer.finalizeCode();
}
+#if !USE(JSVALUE32_64)
void JIT::emitGetVariableObjectRegister(RegisterID variableObject, int index, RegisterID dst)
{
loadPtr(Address(variableObject, OBJECT_OFFSETOF(JSVariableObject, d)), dst);
@@ -906,24 +569,29 @@ void JIT::emitPutVariableObjectRegister(RegisterID src, RegisterID variableObjec
loadPtr(Address(variableObject, OBJECT_OFFSETOF(JSVariableObject::JSVariableObjectData, registers)), variableObject);
storePtr(src, Address(variableObject, index * sizeof(Register)));
}
+#endif
+#if ENABLE(JIT_OPTIMIZE_CALL)
void JIT::unlinkCall(CallLinkInfo* callLinkInfo)
{
// When the JSFunction is deleted the pointer embedded in the instruction stream will no longer be valid
// (and, if a new JSFunction happened to be constructed at the same location, we could get a false positive
// match). Reset the check so it no longer matches.
- RepatchBuffer repatchBuffer(callLinkInfo->ownerCodeBlock);
+ RepatchBuffer repatchBuffer(callLinkInfo->ownerCodeBlock.get());
+#if USE(JSVALUE32_64)
+ repatchBuffer.repatch(callLinkInfo->hotPathBegin, 0);
+#else
repatchBuffer.repatch(callLinkInfo->hotPathBegin, JSValue::encode(JSValue()));
+#endif
}
void JIT::linkCall(JSFunction* callee, CodeBlock* callerCodeBlock, CodeBlock* calleeCodeBlock, JITCode& code, CallLinkInfo* callLinkInfo, int callerArgCount, JSGlobalData* globalData)
{
- ASSERT(calleeCodeBlock);
RepatchBuffer repatchBuffer(callerCodeBlock);
// Currently we only link calls with the exact number of arguments.
// If this is a native call calleeCodeBlock is null so the number of parameters is unimportant
- if (callerArgCount == calleeCodeBlock->m_numParameters || calleeCodeBlock->codeType() == NativeCode) {
+ if (!calleeCodeBlock || (callerArgCount == calleeCodeBlock->m_numParameters)) {
ASSERT(!callLinkInfo->isLinked());
if (calleeCodeBlock)
@@ -936,6 +604,7 @@ void JIT::linkCall(JSFunction* callee, CodeBlock* callerCodeBlock, CodeBlock* ca
// patch the call so we do not continue to try to link.
repatchBuffer.relink(callLinkInfo->callReturnLocation, globalData->jitStubs.ctiVirtualCall());
}
+#endif // ENABLE(JIT_OPTIMIZE_CALL)
} // namespace JSC