summaryrefslogtreecommitdiffstats
path: root/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.cpp
diff options
context:
space:
mode:
authorSimon Hausmann <simon.hausmann@digia.com>2012-10-17 16:21:14 +0200
committerSimon Hausmann <simon.hausmann@digia.com>2012-10-17 16:21:14 +0200
commit8995b83bcbfbb68245f779b64e5517627c6cc6ea (patch)
tree17985605dab9263cc2444bd4d45f189e142cca7c /Source/JavaScriptCore/dfg/DFGSpeculativeJIT.cpp
parentb9c9652036d5e9f1e29c574f40bc73a35c81ace6 (diff)
Imported WebKit commit cf4f8fc6f19b0629f51860cb2d4b25e139d07e00 (http://svn.webkit.org/repository/webkit/trunk@131592)
New snapshot that includes the build fixes for Mac OS X 10.6 and earlier as well as the previously cherry-picked changes
Diffstat (limited to 'Source/JavaScriptCore/dfg/DFGSpeculativeJIT.cpp')
-rw-r--r--Source/JavaScriptCore/dfg/DFGSpeculativeJIT.cpp370
1 files changed, 249 insertions, 121 deletions
diff --git a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.cpp b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.cpp
index 05b1e741e..850d5aa74 100644
--- a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.cpp
+++ b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.cpp
@@ -29,6 +29,7 @@
#if ENABLE(DFG_JIT)
#include "Arguments.h"
+#include "DFGCallArrayAllocatorSlowPathGenerator.h"
#include "DFGSlowPathGenerator.h"
#include "LinkBuffer.h"
@@ -56,6 +57,37 @@ SpeculativeJIT::~SpeculativeJIT()
WTF::deleteAllValues(m_slowPathGenerators);
}
+void SpeculativeJIT::emitAllocateJSArray(Structure* structure, GPRReg resultGPR, GPRReg storageGPR, unsigned numElements)
+{
+ ASSERT(hasContiguous(structure->indexingType()));
+
+ GPRTemporary scratch(this);
+ GPRReg scratchGPR = scratch.gpr();
+
+ unsigned vectorLength = std::max(BASE_VECTOR_LEN, numElements);
+
+ JITCompiler::JumpList slowCases;
+ slowCases.append(
+ emitAllocateBasicStorage(TrustedImm32(vectorLength * sizeof(JSValue) + sizeof(IndexingHeader)), storageGPR));
+ m_jit.subPtr(TrustedImm32(vectorLength * sizeof(JSValue)), storageGPR);
+ emitAllocateBasicJSObject<JSArray, MarkedBlock::None>(
+ TrustedImmPtr(structure), resultGPR, scratchGPR,
+ storageGPR, sizeof(JSArray), slowCases);
+
+ // I'm assuming that two 32-bit stores are better than a 64-bit store.
+ // I have no idea if that's true. And it probably doesn't matter anyway.
+ m_jit.store32(TrustedImm32(numElements), MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()));
+ m_jit.store32(TrustedImm32(vectorLength), MacroAssembler::Address(storageGPR, Butterfly::offsetOfVectorLength()));
+
+ // I want a slow path that also loads out the storage pointer, and that's
+ // what this custom CallArrayAllocatorSlowPathGenerator gives me. It's a lot
+ // of work for a very small piece of functionality. :-/
+ addSlowPathGenerator(adoptPtr(
+ new CallArrayAllocatorSlowPathGenerator(
+ slowCases, this, operationNewArrayWithSize, resultGPR, storageGPR,
+ structure, numElements)));
+}
+
void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, NodeIndex nodeIndex, MacroAssembler::Jump jumpToFail)
{
if (!m_compileOkay)
@@ -70,7 +102,7 @@ void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource
speculationCheck(kind, jsValueSource, nodeUse.index(), jumpToFail);
}
-void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, NodeIndex nodeIndex, MacroAssembler::JumpList& jumpsToFail)
+void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, NodeIndex nodeIndex, const MacroAssembler::JumpList& jumpsToFail)
{
ASSERT(at(m_compileIndex).canExit() || m_isCheckingArgumentTypes);
Vector<MacroAssembler::Jump, 16> jumpVector = jumpsToFail.jumps();
@@ -78,7 +110,7 @@ void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource
speculationCheck(kind, jsValueSource, nodeIndex, jumpVector[i]);
}
-void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::JumpList& jumpsToFail)
+void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, const MacroAssembler::JumpList& jumpsToFail)
{
ASSERT(at(m_compileIndex).canExit() || m_isCheckingArgumentTypes);
speculationCheck(kind, jsValueSource, nodeUse.index(), jumpsToFail);
@@ -190,7 +222,7 @@ void SpeculativeJIT::forwardSpeculationCheck(ExitKind kind, JSValueSource jsValu
convertLastOSRExitToForward(valueRecovery);
}
-void SpeculativeJIT::forwardSpeculationCheck(ExitKind kind, JSValueSource jsValueSource, NodeIndex nodeIndex, MacroAssembler::JumpList& jumpsToFail, const ValueRecovery& valueRecovery)
+void SpeculativeJIT::forwardSpeculationCheck(ExitKind kind, JSValueSource jsValueSource, NodeIndex nodeIndex, const MacroAssembler::JumpList& jumpsToFail, const ValueRecovery& valueRecovery)
{
ASSERT(at(m_compileIndex).canExit() || m_isCheckingArgumentTypes);
Vector<MacroAssembler::Jump, 16> jumpVector = jumpsToFail.jumps();
@@ -295,6 +327,66 @@ const TypedArrayDescriptor* SpeculativeJIT::typedArrayDescriptor(Array::Mode arr
}
}
+JITCompiler::JumpList SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, Array::Mode arrayMode)
+{
+ JITCompiler::JumpList result;
+
+ switch (arrayMode) {
+ case NON_ARRAY_CONTIGUOUS_MODES: {
+ m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
+ result.append(
+ m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(ContiguousShape)));
+ break;
+ }
+ case ARRAY_WITH_CONTIGUOUS_MODES: {
+ m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
+ result.append(
+ m_jit.branch32(
+ MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | ContiguousShape)));
+ break;
+ }
+ case NON_ARRAY_ARRAY_STORAGE_MODES: {
+ m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
+ if (isSlowPutAccess(arrayMode)) {
+ m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
+ result.append(
+ m_jit.branch32(
+ MacroAssembler::Above, tempGPR,
+ TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
+ } else {
+ result.append(
+ m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(ArrayStorageShape)));
+ }
+ break;
+ }
+ case Array::ArrayWithArrayStorage:
+ case Array::ArrayWithArrayStorageToHole:
+ case Array::ArrayWithArrayStorageOutOfBounds: {
+ m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
+ result.append(
+ m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(ArrayStorageShape)));
+ break;
+ }
+ case Array::ArrayWithSlowPutArrayStorage: {
+ result.append(
+ m_jit.branchTest32(
+ MacroAssembler::Zero, tempGPR, MacroAssembler::TrustedImm32(IsArray)));
+ m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
+ m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
+ result.append(
+ m_jit.branch32(
+ MacroAssembler::Above, tempGPR,
+ TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
+ break;
+ }
+ default:
+ CRASH();
+ break;
+ }
+
+ return result;
+}
+
void SpeculativeJIT::checkArray(Node& node)
{
ASSERT(modeIsSpecific(node.arrayMode()));
@@ -315,39 +407,18 @@ void SpeculativeJIT::checkArray(Node& node)
case Array::String:
expectedClassInfo = &JSString::s_info;
break;
- case NON_ARRAY_ARRAY_STORAGE_MODES: {
- GPRTemporary temp(this);
- m_jit.loadPtr(
- MacroAssembler::Address(baseReg, JSCell::structureOffset()), temp.gpr());
- speculationCheck(
- Uncountable, JSValueRegs(), NoNode,
- m_jit.branchTest8(
- MacroAssembler::Zero,
- MacroAssembler::Address(temp.gpr(), Structure::indexingTypeOffset()),
- MacroAssembler::TrustedImm32(
- isSlowPutAccess(node.arrayMode()) ? (HasArrayStorage | HasSlowPutArrayStorage) : HasArrayStorage)));
-
- noResult(m_compileIndex);
- return;
- }
+ case NON_ARRAY_CONTIGUOUS_MODES:
+ case ARRAY_WITH_CONTIGUOUS_MODES:
+ case NON_ARRAY_ARRAY_STORAGE_MODES:
case ARRAY_WITH_ARRAY_STORAGE_MODES: {
GPRTemporary temp(this);
GPRReg tempGPR = temp.gpr();
m_jit.loadPtr(
MacroAssembler::Address(baseReg, JSCell::structureOffset()), tempGPR);
m_jit.load8(MacroAssembler::Address(tempGPR, Structure::indexingTypeOffset()), tempGPR);
- // FIXME: This can be turned into a single branch. But we currently have no evidence
- // that doing so would be profitable, nor do I feel comfortable with the present test
- // coverage for this code path.
speculationCheck(
Uncountable, JSValueRegs(), NoNode,
- m_jit.branchTest32(
- MacroAssembler::Zero, tempGPR, MacroAssembler::TrustedImm32(IsArray)));
- speculationCheck(
- Uncountable, JSValueRegs(), NoNode,
- m_jit.branchTest32(
- MacroAssembler::Zero, tempGPR, MacroAssembler::TrustedImm32(
- isSlowPutAccess(node.arrayMode()) ? (HasArrayStorage | HasSlowPutArrayStorage) : HasArrayStorage)));
+ jumpSlowForUnwantedArrayMode(tempGPR, node.arrayMode()));
noResult(m_compileIndex);
return;
@@ -384,78 +455,122 @@ void SpeculativeJIT::checkArray(Node& node)
noResult(m_compileIndex);
}
-void SpeculativeJIT::arrayify(Node& node)
+void SpeculativeJIT::arrayify(Node& node, GPRReg baseReg, GPRReg propertyReg)
{
- ASSERT(modeIsSpecific(node.arrayMode()));
- ASSERT(!modeAlreadyChecked(m_state.forNode(node.child1()), node.arrayMode()));
-
- SpeculateCellOperand base(this, node.child1());
- GPRReg baseReg = base.gpr();
+ Array::Mode desiredArrayMode;
switch (node.arrayMode()) {
- case EFFECTFUL_NON_ARRAY_ARRAY_STORAGE_MODES: {
- GPRTemporary structure(this);
- GPRTemporary temp(this);
- GPRReg structureGPR = structure.gpr();
- GPRReg tempGPR = temp.gpr();
-
- m_jit.loadPtr(
- MacroAssembler::Address(baseReg, JSCell::structureOffset()), structureGPR);
+ case Array::ToContiguous:
+ desiredArrayMode = Array::Contiguous;
+ break;
+ case Array::ToArrayStorage:
+ desiredArrayMode = Array::ArrayStorage;
+ break;
+ case Array::ToSlowPutArrayStorage:
+ desiredArrayMode = Array::SlowPutArrayStorage;
+ break;
+ case Array::ArrayToArrayStorage:
+ desiredArrayMode = Array::ArrayWithArrayStorage;
+ break;
+ case Array::PossiblyArrayToArrayStorage:
+ desiredArrayMode = Array::PossiblyArrayWithArrayStorage;
+ break;
+ default:
+ CRASH();
+ desiredArrayMode = Array::Undecided;
+ break;
+ }
+
+ GPRTemporary structure(this);
+ GPRTemporary temp(this);
+ GPRReg structureGPR = structure.gpr();
+ GPRReg tempGPR = temp.gpr();
- // We can skip all that comes next if we already have array storage.
- IndexingType desiredIndexingTypeMask =
- isSlowPutAccess(node.arrayMode()) ? (HasArrayStorage | HasSlowPutArrayStorage) : HasArrayStorage;
- MacroAssembler::Jump slowCase = m_jit.branchTest8(
- MacroAssembler::Zero,
- MacroAssembler::Address(structureGPR, Structure::indexingTypeOffset()),
- MacroAssembler::TrustedImm32(desiredIndexingTypeMask));
+ m_jit.loadPtr(
+ MacroAssembler::Address(baseReg, JSCell::structureOffset()), structureGPR);
+
+ m_jit.load8(
+ MacroAssembler::Address(structureGPR, Structure::indexingTypeOffset()), tempGPR);
- m_jit.loadPtr(
- MacroAssembler::Address(baseReg, JSObject::butterflyOffset()), tempGPR);
+ // We can skip all that comes next if we already have array storage.
+ MacroAssembler::JumpList slowCases =
+ jumpSlowForUnwantedArrayMode(tempGPR, desiredArrayMode);
- MacroAssembler::Jump done = m_jit.jump();
+ m_jit.loadPtr(
+ MacroAssembler::Address(baseReg, JSObject::butterflyOffset()), tempGPR);
- slowCase.link(&m_jit);
+ MacroAssembler::Jump done = m_jit.jump();
- // Next check that the object does not intercept indexed accesses. If it does,
- // then this mode won't work.
+ slowCases.link(&m_jit);
+
+ // If we're allegedly creating contiguous storage and the index is bogus, then
+ // just don't.
+ if (node.arrayMode() == Array::ToContiguous && propertyReg != InvalidGPRReg) {
speculationCheck(
Uncountable, JSValueRegs(), NoNode,
- m_jit.branchTest8(
- MacroAssembler::NonZero,
- MacroAssembler::Address(structureGPR, Structure::typeInfoFlagsOffset()),
- MacroAssembler::TrustedImm32(InterceptsGetOwnPropertySlotByIndexEvenWhenLengthIsNotZero)));
+ m_jit.branch32(
+ MacroAssembler::AboveOrEqual, propertyReg, TrustedImm32(MIN_SPARSE_ARRAY_INDEX)));
+ }
+
+ // Next check that the object does not intercept indexed accesses. If it does,
+ // then this mode won't work.
+ speculationCheck(
+ Uncountable, JSValueRegs(), NoNode,
+ m_jit.branchTest8(
+ MacroAssembler::NonZero,
+ MacroAssembler::Address(structureGPR, Structure::typeInfoFlagsOffset()),
+ MacroAssembler::TrustedImm32(InterceptsGetOwnPropertySlotByIndexEvenWhenLengthIsNotZero)));
- // Now call out to create the array storage.
- silentSpillAllRegisters(tempGPR);
+ // Now call out to create the array storage.
+ silentSpillAllRegisters(tempGPR);
+ switch (node.arrayMode()) {
+ case ALL_EFFECTFUL_CONTIGUOUS_MODES:
+ callOperation(operationEnsureContiguous, tempGPR, baseReg);
+ break;
+ case ALL_EFFECTFUL_ARRAY_STORAGE_MODES:
callOperation(operationEnsureArrayStorage, tempGPR, baseReg);
- silentFillAllRegisters(tempGPR);
-
- // Alas, we need to reload the structure because silent spilling does not save
- // temporaries. Nor would it be useful for it to do so. Either way we're talking
- // about a load.
- m_jit.loadPtr(
- MacroAssembler::Address(baseReg, JSCell::structureOffset()), structureGPR);
-
- // Finally, check that we have the kind of array storage that we wanted to get.
- // Note that this is a backwards speculation check, which will result in the
- // bytecode operation corresponding to this arrayification being reexecuted.
- // That's fine, since arrayification is not user-visible.
- speculationCheck(
- Uncountable, JSValueRegs(), NoNode,
- m_jit.branchTest8(
- MacroAssembler::Zero,
- MacroAssembler::Address(structureGPR, Structure::indexingTypeOffset()),
- MacroAssembler::TrustedImm32(desiredIndexingTypeMask)));
-
- done.link(&m_jit);
- storageResult(tempGPR, m_compileIndex);
break;
- }
default:
- ASSERT_NOT_REACHED();
+ CRASH();
break;
}
+ silentFillAllRegisters(tempGPR);
+
+ // Alas, we need to reload the structure because silent spilling does not save
+ // temporaries. Nor would it be useful for it to do so. Either way we're talking
+ // about a load.
+ m_jit.loadPtr(
+ MacroAssembler::Address(baseReg, JSCell::structureOffset()), structureGPR);
+
+ // Finally, check that we have the kind of array storage that we wanted to get.
+ // Note that this is a backwards speculation check, which will result in the
+ // bytecode operation corresponding to this arrayification being reexecuted.
+ // That's fine, since arrayification is not user-visible.
+ m_jit.load8(
+ MacroAssembler::Address(structureGPR, Structure::indexingTypeOffset()), structureGPR);
+ speculationCheck(
+ Uncountable, JSValueRegs(), NoNode,
+ jumpSlowForUnwantedArrayMode(structureGPR, desiredArrayMode));
+
+ done.link(&m_jit);
+ storageResult(tempGPR, m_compileIndex);
+}
+
+void SpeculativeJIT::arrayify(Node& node)
+{
+ ASSERT(modeIsSpecific(node.arrayMode()));
+ ASSERT(!modeAlreadyChecked(m_state.forNode(node.child1()), node.arrayMode()));
+
+ SpeculateCellOperand base(this, node.child1());
+
+ if (!node.child2()) {
+ arrayify(node, base.gpr(), InvalidGPRReg);
+ return;
+ }
+
+ SpeculateIntegerOperand property(this, node.child2());
+
+ arrayify(node, base.gpr(), property.gpr());
}
GPRReg SpeculativeJIT::fillStorage(NodeIndex nodeIndex)
@@ -1367,7 +1482,7 @@ void SpeculativeJIT::compile(BasicBlock& block)
ASSERT(m_arguments.size() == block.variablesAtHead.numberOfArguments());
for (size_t i = 0; i < m_arguments.size(); ++i) {
- ValueSource valueSource = ValueSource(ValueInRegisterFile);
+ ValueSource valueSource = ValueSource(ValueInJSStack);
m_arguments[i] = valueSource;
m_stream->appendAndLog(VariableEvent::setLocal(argumentToOperand(i), valueSource.dataFormat()));
}
@@ -1384,11 +1499,11 @@ void SpeculativeJIT::compile(BasicBlock& block)
else if (at(nodeIndex).variableAccessData()->isArgumentsAlias())
valueSource = ValueSource(ArgumentsSource);
else if (at(nodeIndex).variableAccessData()->isCaptured())
- valueSource = ValueSource(ValueInRegisterFile);
+ valueSource = ValueSource(ValueInJSStack);
else if (!at(nodeIndex).refCount())
valueSource = ValueSource(SourceIsDead);
else if (at(nodeIndex).variableAccessData()->shouldUseDoubleFormat())
- valueSource = ValueSource(DoubleInRegisterFile);
+ valueSource = ValueSource(DoubleInJSStack);
else
valueSource = ValueSource::forSpeculation(at(nodeIndex).variableAccessData()->argumentAwarePrediction());
m_variables[i] = valueSource;
@@ -1440,25 +1555,25 @@ void SpeculativeJIT::compile(BasicBlock& block)
for (int i = 0; i < argumentCountIncludingThis; ++i) {
ValueRecovery recovery;
if (codeBlock->isCaptured(argumentToOperand(i)))
- recovery = ValueRecovery::alreadyInRegisterFile();
+ recovery = ValueRecovery::alreadyInJSStack();
else {
ArgumentPosition& argumentPosition =
m_jit.graph().m_argumentPositions[argumentPositionStart + i];
ValueSource valueSource;
if (argumentPosition.shouldUseDoubleFormat())
- valueSource = ValueSource(DoubleInRegisterFile);
+ valueSource = ValueSource(DoubleInJSStack);
else if (isInt32Speculation(argumentPosition.prediction()))
- valueSource = ValueSource(Int32InRegisterFile);
+ valueSource = ValueSource(Int32InJSStack);
else if (isCellSpeculation(argumentPosition.prediction()))
- valueSource = ValueSource(CellInRegisterFile);
+ valueSource = ValueSource(CellInJSStack);
else if (isBooleanSpeculation(argumentPosition.prediction()))
- valueSource = ValueSource(BooleanInRegisterFile);
+ valueSource = ValueSource(BooleanInJSStack);
else
- valueSource = ValueSource(ValueInRegisterFile);
+ valueSource = ValueSource(ValueInJSStack);
recovery = computeValueRecoveryFor(valueSource);
}
// The recovery should refer either to something that has already been
- // stored into the register file at the right place, or to a constant,
+ // stored into the stack at the right place, or to a constant,
// since the Arguments code isn't smart enough to handle anything else.
// The exception is the this argument, which we don't really need to be
// able to recover.
@@ -1550,9 +1665,9 @@ void SpeculativeJIT::checkArgumentTypes()
m_codeOriginForOSR = CodeOrigin(0);
for (size_t i = 0; i < m_arguments.size(); ++i)
- m_arguments[i] = ValueSource(ValueInRegisterFile);
+ m_arguments[i] = ValueSource(ValueInJSStack);
for (size_t i = 0; i < m_variables.size(); ++i)
- m_variables[i] = ValueSource(ValueInRegisterFile);
+ m_variables[i] = ValueSource(ValueInJSStack);
for (int i = 0; i < m_jit.codeBlock()->numParameters(); ++i) {
NodeIndex nodeIndex = m_jit.graph().m_arguments[i];
@@ -1649,7 +1764,7 @@ void SpeculativeJIT::linkOSREntries(LinkBuffer& linkBuffer)
ValueRecovery SpeculativeJIT::computeValueRecoveryFor(const ValueSource& valueSource)
{
- if (valueSource.isInRegisterFile())
+ if (valueSource.isInJSStack())
return valueSource.valueRecovery();
ASSERT(valueSource.kind() == HaveNode);
@@ -1942,16 +2057,17 @@ void SpeculativeJIT::compileUInt32ToNumber(Node& node)
}
IntegerOperand op1(this, node.child1());
- GPRTemporary result(this, op1);
+ GPRTemporary result(this); // For the benefit of OSR exit, force these to be in different registers. In reality the OSR exit compiler could find cases where you have uint32(%r1) followed by int32(%r1) and then use different registers, but that seems like too much effort.
+
+ m_jit.move(op1.gpr(), result.gpr());
// Test the operand is positive. This is a very special speculation check - we actually
// use roll-forward speculation here, where if this fails, we jump to the baseline
// instruction that follows us, rather than the one we're executing right now. We have
// to do this because by this point, the original values necessary to compile whatever
// operation the UInt32ToNumber originated from might be dead.
- forwardSpeculationCheck(Overflow, JSValueRegs(), NoNode, m_jit.branch32(MacroAssembler::LessThan, op1.gpr(), TrustedImm32(0)), ValueRecovery::uint32InGPR(op1.gpr()));
+ forwardSpeculationCheck(Overflow, JSValueRegs(), NoNode, m_jit.branch32(MacroAssembler::LessThan, result.gpr(), TrustedImm32(0)), ValueRecovery::uint32InGPR(result.gpr()));
- m_jit.move(op1.gpr(), result.gpr());
integerResult(result.gpr(), m_compileIndex, op1.format());
}
@@ -2140,7 +2256,7 @@ void SpeculativeJIT::compileGetByValOnIntTypedArray(const TypedArrayDescriptor&
m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
break;
default:
- ASSERT_NOT_REACHED();
+ CRASH();
}
if (elementSize < 4 || signedness == SignedTypedArray) {
integerResult(resultReg, m_compileIndex);
@@ -2250,7 +2366,7 @@ void SpeculativeJIT::compilePutByValForIntTypedArray(const TypedArrayDescriptor&
m_jit.store32(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
break;
default:
- ASSERT_NOT_REACHED();
+ CRASH();
}
if (node.op() == PutByVal)
outOfBounds.link(&m_jit);
@@ -3205,12 +3321,23 @@ void SpeculativeJIT::compileGetArrayLength(Node& node)
const TypedArrayDescriptor* descriptor = typedArrayDescriptor(node.arrayMode());
switch (node.arrayMode()) {
- case ARRAY_WITH_ARRAY_STORAGE_MODES: {
+ case ARRAY_WITH_CONTIGUOUS_MODES: {
+ StorageOperand storage(this, node.child2());
+ GPRTemporary result(this, storage);
+ GPRReg storageReg = storage.gpr();
+ GPRReg resultReg = result.gpr();
+ m_jit.load32(MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()), resultReg);
+
+ integerResult(resultReg, m_compileIndex);
+ break;
+ }
+ case ARRAY_WITH_ARRAY_STORAGE_MODES:
+ case ARRAY_EFFECTFUL_MODES: {
StorageOperand storage(this, node.child2());
GPRTemporary result(this, storage);
GPRReg storageReg = storage.gpr();
GPRReg resultReg = result.gpr();
- m_jit.load32(MacroAssembler::Address(storageReg, ArrayStorage::lengthOffset()), resultReg);
+ m_jit.load32(MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()), resultReg);
speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branch32(MacroAssembler::LessThan, resultReg, MacroAssembler::TrustedImm32(0)));
@@ -3328,14 +3455,11 @@ void SpeculativeJIT::compileAllocatePropertyStorage(Node& node)
ASSERT(!node.structureTransitionData().previousStructure->outOfLineCapacity());
ASSERT(initialOutOfLineCapacity == node.structureTransitionData().newStructure->outOfLineCapacity());
- size_t newSize = initialOutOfLineCapacity * sizeof(JSValue);
- CopiedAllocator* copiedAllocator = &m_jit.globalData()->heap.storageAllocator();
-
- m_jit.loadPtr(&copiedAllocator->m_currentRemaining, scratchGPR);
- JITCompiler::Jump slowPath = m_jit.branchSubPtr(JITCompiler::Signed, JITCompiler::TrustedImm32(newSize), scratchGPR);
- m_jit.storePtr(scratchGPR, &copiedAllocator->m_currentRemaining);
- m_jit.negPtr(scratchGPR);
- m_jit.addPtr(JITCompiler::AbsoluteAddress(&copiedAllocator->m_currentPayloadEnd), scratchGPR);
+
+ JITCompiler::Jump slowPath =
+ emitAllocateBasicStorage(
+ TrustedImm32(initialOutOfLineCapacity * sizeof(JSValue)), scratchGPR);
+
m_jit.addPtr(JITCompiler::TrustedImm32(sizeof(JSValue)), scratchGPR);
addSlowPathGenerator(
@@ -3376,15 +3500,9 @@ void SpeculativeJIT::compileReallocatePropertyStorage(Node& node)
GPRReg scratchGPR1 = scratch1.gpr();
GPRReg scratchGPR2 = scratch2.gpr();
- JITCompiler::Jump slowPath;
-
- CopiedAllocator* copiedAllocator = &m_jit.globalData()->heap.storageAllocator();
-
- m_jit.loadPtr(&copiedAllocator->m_currentRemaining, scratchGPR2);
- slowPath = m_jit.branchSubPtr(JITCompiler::Signed, JITCompiler::TrustedImm32(newSize), scratchGPR2);
- m_jit.storePtr(scratchGPR2, &copiedAllocator->m_currentRemaining);
- m_jit.negPtr(scratchGPR2);
- m_jit.addPtr(JITCompiler::AbsoluteAddress(&copiedAllocator->m_currentPayloadEnd), scratchGPR2);
+ JITCompiler::Jump slowPath =
+ emitAllocateBasicStorage(TrustedImm32(newSize), scratchGPR2);
+
m_jit.addPtr(JITCompiler::TrustedImm32(sizeof(JSValue)), scratchGPR2);
addSlowPathGenerator(
@@ -3399,6 +3517,16 @@ void SpeculativeJIT::compileReallocatePropertyStorage(Node& node)
storageResult(scratchGPR2, m_compileIndex);
}
+GPRReg SpeculativeJIT::temporaryRegisterForPutByVal(GPRTemporary& temporary, Array::Mode arrayMode)
+{
+ if (!putByValWillNeedExtraRegister(arrayMode))
+ return InvalidGPRReg;
+
+ GPRTemporary realTemporary(this);
+ temporary.adopt(realTemporary);
+ return temporary.gpr();
+}
+
} } // namespace JSC::DFG
#endif