aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorShawn Rutledge <shawn.rutledge@qt.io>2017-02-02 18:40:56 +0100
committerShawn Rutledge <shawn.rutledge@qt.io>2017-02-02 18:43:20 +0100
commit52d5131ce52ec687d5c35219b8096a2f42f8d557 (patch)
tree4b54b410bbd1efde297e6fee06bdd75809855b8c
parent9b5fc80af28580e9672792dd511d876a93947882 (diff)
parentb63393c7aac7c337672b87a914cb059f60091584 (diff)
Merge remote-tracking branch 'origin/dev' into wip/pointerhandler
-rw-r--r--.qmake.conf2
-rw-r--r--examples/quick/demos/stocqt/doc/src/stocqt.qdoc2
-rw-r--r--examples/quick/window/CurrentScreen.qml7
-rw-r--r--src/3rdparty/masm/assembler/ARMv7Assembler.h88
-rw-r--r--src/3rdparty/masm/assembler/AbstractMacroAssembler.h42
-rw-r--r--src/3rdparty/masm/assembler/LinkBuffer.cpp134
-rw-r--r--src/3rdparty/masm/assembler/LinkBuffer.h292
-rw-r--r--src/3rdparty/masm/assembler/MacroAssembler.h171
-rw-r--r--src/3rdparty/masm/assembler/MacroAssemblerARM.h2
-rw-r--r--src/3rdparty/masm/assembler/MacroAssemblerARM64.h2
-rw-r--r--src/3rdparty/masm/assembler/MacroAssemblerARMv7.h84
-rw-r--r--src/3rdparty/masm/assembler/MacroAssemblerMIPS.h2
-rw-r--r--src/3rdparty/masm/assembler/MacroAssemblerSH4.h2
-rw-r--r--src/3rdparty/masm/assembler/MacroAssemblerX86.h34
-rw-r--r--src/3rdparty/masm/assembler/MacroAssemblerX86_64.h29
-rw-r--r--src/3rdparty/masm/assembler/X86Assembler.h40
-rw-r--r--src/3rdparty/masm/masm.pri1
-rw-r--r--src/3rdparty/masm/stubs/ExecutableAllocator.h8
-rw-r--r--src/3rdparty/masm/wtf/OSAllocatorIntegrity.cpp232
-rw-r--r--src/3rdparty/masm/wtf/Platform.h4
-rw-r--r--src/3rdparty/masm/yarr/YarrJIT.cpp6
-rw-r--r--src/imports/imports.pro1
-rw-r--r--src/imports/sharedimage/plugin.cpp134
-rw-r--r--src/imports/sharedimage/qmldir3
-rw-r--r--src/imports/sharedimage/qsharedimageloader.cpp265
-rw-r--r--src/imports/sharedimage/qsharedimageloader_p.h81
-rw-r--r--src/imports/sharedimage/sharedimage.pro17
-rw-r--r--src/imports/sharedimage/sharedimageprovider.cpp156
-rw-r--r--src/imports/sharedimage/sharedimageprovider.h58
-rw-r--r--src/plugins/scenegraph/openvg/qsgopenvgglyphnode.cpp3
-rw-r--r--src/plugins/scenegraph/openvg/qsgopenvginternalrectanglenode.cpp6
-rw-r--r--src/qml/compiler/compiler.pri8
-rw-r--r--src/qml/compiler/qqmlirbuilder.cpp152
-rw-r--r--src/qml/compiler/qqmlirbuilder_p.h15
-rw-r--r--src/qml/compiler/qv4codegen.cpp4
-rw-r--r--src/qml/compiler/qv4compileddata.cpp234
-rw-r--r--src/qml/compiler/qv4compileddata_p.h85
-rw-r--r--src/qml/compiler/qv4instr_moth_p.h3
-rw-r--r--src/qml/compiler/qv4isel_moth.cpp70
-rw-r--r--src/qml/compiler/qv4isel_moth_p.h6
-rw-r--r--src/qml/compiler/qv4jsir_p.h62
-rw-r--r--src/qml/compiler/qv4ssa.cpp14
-rw-r--r--src/qml/compiler/qv4ssa_p.h35
-rw-r--r--src/qml/jit/qv4assembler.cpp213
-rw-r--r--src/qml/jit/qv4assembler_p.h890
-rw-r--r--src/qml/jit/qv4binop.cpp116
-rw-r--r--src/qml/jit/qv4binop_p.h67
-rw-r--r--src/qml/jit/qv4isel_masm.cpp1188
-rw-r--r--src/qml/jit/qv4isel_masm_p.h73
-rw-r--r--src/qml/jit/qv4regalloc.cpp6
-rw-r--r--src/qml/jit/qv4targetplatform_p.h269
-rw-r--r--src/qml/jit/qv4unop.cpp55
-rw-r--r--src/qml/jit/qv4unop_p.h12
-rw-r--r--src/qml/jsruntime/jsruntime.pri4
-rw-r--r--src/qml/jsruntime/qv4engine.cpp10
-rw-r--r--src/qml/jsruntime/qv4lookup_p.h3
-rw-r--r--src/qml/jsruntime/qv4object.cpp43
-rw-r--r--src/qml/jsruntime/qv4object_p.h7
-rw-r--r--src/qml/jsruntime/qv4runtime.cpp34
-rw-r--r--src/qml/jsruntime/qv4value_p.h45
-rw-r--r--src/qml/parser/qqmljskeywords_p.h2
-rw-r--r--src/qml/qml/qqmlimport.cpp154
-rw-r--r--src/qml/qml/qqmlimport_p.h42
-rw-r--r--src/qml/qml/qqmlpropertycache.cpp5
-rw-r--r--src/qml/qml/qqmltypeloader.cpp60
-rw-r--r--src/qml/qml/qqmltypeloader_p.h61
-rw-r--r--src/qml/qml/qqmltypenamecache.cpp57
-rw-r--r--src/qml/qml/qqmltypenamecache_p.h8
-rw-r--r--src/qmldevtools/qmldevtools.pro1
-rw-r--r--src/quick/items/qquickitemgrabresult.cpp2
-rw-r--r--src/quick/items/qquicktext.cpp89
-rw-r--r--src/quick/items/qquicktext_p.h5
-rw-r--r--src/quick/items/qquicktext_p_p.h1
-rw-r--r--src/quick/items/qquickwindow.cpp9
-rw-r--r--src/quick/scenegraph/coreapi/qsgbatchrenderer_p.h9
-rw-r--r--src/quick/scenegraph/qsgcontext_p.h98
-rw-r--r--src/quick/scenegraph/scenegraph.pri2
-rw-r--r--src/quick/util/qquickpixmapcache.cpp12
-rw-r--r--src/quick/util/qquickpixmapcache_p.h2
-rw-r--r--src/src.pro2
-rw-r--r--tests/auto/auto.pro2
-rw-r--r--tests/auto/qml/qml.pro2
-rw-r--r--tests/auto/qml/qqmlecmascript/tst_qqmlecmascript.cpp57
-rw-r--r--tests/auto/qml/qqmllanguage/data/compositeTypeByName_anon_qmldir.qml9
-rw-r--r--tests/auto/qml/qqmllanguage/data/compositeTypeByName_named_qmldir.qml9
-rw-r--r--tests/auto/qml/qqmllanguage/data/quickTypeByName_anon.qml8
-rw-r--r--tests/auto/qml/qqmllanguage/data/quickTypeByName_named.qml8
-rw-r--r--tests/auto/qml/qqmllanguage/data/simpleimportByName/SimpleType.qml4
-rw-r--r--tests/auto/qml/qqmllanguage/data/simpleimportByName/qmldir1
-rw-r--r--tests/auto/qml/qqmllanguage/tst_qqmllanguage.cpp29
-rw-r--r--tests/auto/qmltest/BLACKLIST2
-rw-r--r--tests/auto/qmltest/events/tst_drag.qml6
-rw-r--r--tests/auto/qmltest/itemgrabber/tst_itemgrabber.qml41
-rw-r--r--tests/auto/quick/qquicktext/data/fontInfo.qml24
-rw-r--r--tests/auto/quick/qquicktext/tst_qquicktext.cpp19
-rw-r--r--tools/qmlcachegen/qmlcache.prf12
-rw-r--r--tools/qmlcachegen/qmlcachegen.cpp309
-rw-r--r--tools/qmlcachegen/qmlcachegen.pro24
-rw-r--r--tools/qmljs/qmljs.cpp2
-rw-r--r--tools/tools.pro3
100 files changed, 4904 insertions, 1919 deletions
diff --git a/.qmake.conf b/.qmake.conf
index f03d05c7ac..4e4a28b8f9 100644
--- a/.qmake.conf
+++ b/.qmake.conf
@@ -1,4 +1,4 @@
load(qt_build_config)
CONFIG += warning_clean
-MODULE_VERSION = 5.9.0
+MODULE_VERSION = 5.10.0
diff --git a/examples/quick/demos/stocqt/doc/src/stocqt.qdoc b/examples/quick/demos/stocqt/doc/src/stocqt.qdoc
index bd204c8211..800bba570c 100644
--- a/examples/quick/demos/stocqt/doc/src/stocqt.qdoc
+++ b/examples/quick/demos/stocqt/doc/src/stocqt.qdoc
@@ -69,7 +69,7 @@
\quotefromfile demos/stocqt/content/StockChart.qml
\skipto Rectangle
- \printuntil height
+ \printuntil id
\dots
\skipto Canvas
\printuntil id
diff --git a/examples/quick/window/CurrentScreen.qml b/examples/quick/window/CurrentScreen.qml
index c65baab1f4..09fbce9a74 100644
--- a/examples/quick/window/CurrentScreen.qml
+++ b/examples/quick/window/CurrentScreen.qml
@@ -101,5 +101,12 @@ Item {
Shared.Label { text: "primary orientation" }
Shared.Label { text: orientationToString(Screen.primaryOrientation) + " (" + Screen.primaryOrientation + ")" }
//! [screen]
+
+ Shared.Label { text: "10mm rectangle" }
+ Rectangle {
+ color: "red"
+ width: Screen.pixelDensity * 10
+ height: width
+ }
}
}
diff --git a/src/3rdparty/masm/assembler/ARMv7Assembler.h b/src/3rdparty/masm/assembler/ARMv7Assembler.h
index f0fa07a1bf..6b32fbf487 100644
--- a/src/3rdparty/masm/assembler/ARMv7Assembler.h
+++ b/src/3rdparty/masm/assembler/ARMv7Assembler.h
@@ -27,10 +27,11 @@
#ifndef ARMAssembler_h
#define ARMAssembler_h
-#if ENABLE(ASSEMBLER) && CPU(ARM_THUMB2)
+#if ENABLE(ASSEMBLER) && (CPU(ARM_THUMB2) || defined(V4_BOOTSTRAP))
#include "AssemblerBuffer.h"
#include "MacroAssemblerCodeRef.h"
+#include "AbstractMacroAssembler.h"
#include <wtf/Assertions.h>
#include <wtf/Vector.h>
#include <stdint.h>
@@ -491,8 +492,8 @@ public:
private:
union {
struct RealTypes {
- intptr_t m_from : 31;
- intptr_t m_to : 31;
+ int32_t m_from : 31;
+ int32_t m_to : 31;
JumpType m_type : 8;
JumpLinkType m_linkType : 8;
Condition m_condition : 16;
@@ -510,6 +511,56 @@ public:
{
}
+
+ // Jump:
+ //
+ // A jump object is a reference to a jump instruction that has been planted
+ // into the code buffer - it is typically used to link the jump, setting the
+ // relative offset such that when executed it will jump to the desired
+ // destination.
+ template <typename LabelType>
+ class Jump {
+ template<class TemplateAssemblerType> friend class AbstractMacroAssembler;
+ friend class Call;
+ template <typename, template <typename> class> friend class LinkBufferBase;;
+ public:
+ Jump()
+ {
+ }
+
+ // Fixme: this information should be stored in the instruction stream, not in the Jump object.
+ Jump(AssemblerLabel jmp, ARMv7Assembler::JumpType type = ARMv7Assembler::JumpNoCondition, ARMv7Assembler::Condition condition = ARMv7Assembler::ConditionInvalid)
+ : m_label(jmp)
+ , m_type(type)
+ , m_condition(condition)
+ {
+ }
+
+ LabelType label() const
+ {
+ LabelType result;
+ result.m_label = m_label;
+ return result;
+ }
+
+ void link(AbstractMacroAssembler<ARMv7Assembler>* masm) const
+ {
+ masm->m_assembler.linkJump(m_label, masm->m_assembler.label(), m_type, m_condition);
+ }
+
+ void linkTo(LabelType label, AbstractMacroAssembler<ARMv7Assembler>* masm) const
+ {
+ masm->m_assembler.linkJump(m_label, label.label(), m_type, m_condition);
+ }
+
+ bool isSet() const { return m_label.isSet(); }
+
+ private:
+ AssemblerLabel m_label;
+ ARMv7Assembler::JumpType m_type;
+ ARMv7Assembler::Condition m_condition;
+ };
+
private:
// ARMv7, Appx-A.6.3
@@ -2115,6 +2166,7 @@ public:
linkJumpAbsolute(location, to);
}
+#if !defined(V4_BOOTSTRAP)
static void linkCall(void* code, AssemblerLabel from, void* to)
{
ASSERT(!(reinterpret_cast<intptr_t>(code) & 1));
@@ -2123,12 +2175,14 @@ public:
setPointer(reinterpret_cast<uint16_t*>(reinterpret_cast<intptr_t>(code) + from.m_offset) - 1, to, false);
}
+#endif
static void linkPointer(void* code, AssemblerLabel where, void* value)
{
setPointer(reinterpret_cast<char*>(code) + where.m_offset, value, false);
}
+#if !defined(V4_BOOTSTRAP)
static void relinkJump(void* from, void* to)
{
ASSERT(!(reinterpret_cast<intptr_t>(from) & 1));
@@ -2146,11 +2200,12 @@ public:
setPointer(reinterpret_cast<uint16_t*>(from) - 1, to, true);
}
-
+
static void* readCallTarget(void* from)
{
return readPointer(reinterpret_cast<uint16_t*>(from) - 1);
}
+#endif
static void repatchInt32(void* where, int32_t value)
{
@@ -2179,6 +2234,7 @@ public:
cacheFlush(location, sizeof(uint16_t) * 2);
}
+#if !defined(V4_BOOTSTRAP)
static void repatchPointer(void* where, void* value)
{
ASSERT(!(reinterpret_cast<intptr_t>(where) & 1));
@@ -2190,7 +2246,8 @@ public:
{
return reinterpret_cast<void*>(readInt32(where));
}
-
+#endif
+
static void replaceWithJump(void* instructionStart, void* to)
{
ASSERT(!(bitwise_cast<uintptr_t>(instructionStart) & 1));
@@ -2264,7 +2321,7 @@ public:
unsigned debugOffset() { return m_formatter.debugOffset(); }
-#if OS(LINUX)
+#if OS(LINUX) && !defined(V4_BOOTSTRAP)
static inline void linuxPageFlush(uintptr_t begin, uintptr_t end)
{
asm volatile(
@@ -2284,7 +2341,10 @@ public:
static void cacheFlush(void* code, size_t size)
{
-#if OS(IOS)
+#if defined(V4_BOOTSTRAP)
+ UNUSED_PARAM(code)
+ UNUSED_PARAM(size)
+#elif OS(IOS)
sys_cache_control(kCacheFunctionPrepareForExecution, code, size);
#elif OS(LINUX)
size_t page = pageSize();
@@ -2430,7 +2490,9 @@ private:
static void setPointer(void* code, void* value, bool flush)
{
- setInt32(code, reinterpret_cast<uint32_t>(value), flush);
+ // ### Deliberate "loss" of precision here. On 64-bit hosts void* is wider
+ // than uint32_t, but the target is 32-bit ARM anyway.
+ setInt32(code, static_cast<uint32_t>(reinterpret_cast<uintptr_t>(value)), flush);
}
static bool isB(void* address)
@@ -2594,6 +2656,11 @@ private:
static void linkBX(uint16_t* instruction, void* target)
{
+#if defined(V4_BOOTSTRAP)
+ UNUSED_PARAM(instruction);
+ UNUSED_PARAM(target);
+ RELEASE_ASSERT_NOT_REACHED();
+#else
// FIMXE: this should be up in the MacroAssembler layer. :-(
ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
@@ -2606,6 +2673,7 @@ private:
instruction[-3] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOVT, hi16);
instruction[-2] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER, hi16);
instruction[-1] = OP_BX | (JUMP_TEMPORARY_REGISTER << 3);
+#endif
}
void linkConditionalBX(Condition cond, uint16_t* instruction, void* target)
@@ -2638,6 +2706,9 @@ private:
instruction[-3] = OP_NOP_T2b;
linkJumpT4(instruction, target);
} else {
+#if defined(V4_BOOTSTRAP)
+ RELEASE_ASSERT_NOT_REACHED();
+#else
const uint16_t JUMP_TEMPORARY_REGISTER = ARMRegisters::ip;
ARMThumbImmediate lo16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(reinterpret_cast<uint32_t>(target) + 1));
ARMThumbImmediate hi16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(reinterpret_cast<uint32_t>(target) >> 16));
@@ -2646,6 +2717,7 @@ private:
instruction[-3] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOVT, hi16);
instruction[-2] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER, hi16);
instruction[-1] = OP_BX | (JUMP_TEMPORARY_REGISTER << 3);
+#endif
}
}
diff --git a/src/3rdparty/masm/assembler/AbstractMacroAssembler.h b/src/3rdparty/masm/assembler/AbstractMacroAssembler.h
index e90dd235c6..4f27e85c98 100644
--- a/src/3rdparty/masm/assembler/AbstractMacroAssembler.h
+++ b/src/3rdparty/masm/assembler/AbstractMacroAssembler.h
@@ -47,7 +47,10 @@
namespace JSC {
class JumpReplacementWatchpoint;
-class LinkBuffer;
+template <typename, template <typename> class>
+class LinkBufferBase;
+template <typename>
+class BranchCompactingLinkBuffer;
class RepatchBuffer;
class Watchpoint;
namespace DFG {
@@ -63,7 +66,9 @@ public:
typedef MacroAssemblerCodePtr CodePtr;
typedef MacroAssemblerCodeRef CodeRef;
+#if !CPU(ARM_THUMB2) && !defined(V4_BOOTSTRAP)
class Jump;
+#endif
typedef typename AssemblerType::RegisterID RegisterID;
typedef typename AssemblerType::FPRegisterID FPRegisterID;
@@ -325,7 +330,7 @@ public:
friend class Jump;
friend class JumpReplacementWatchpoint;
friend class MacroAssemblerCodeRef;
- friend class LinkBuffer;
+ template <typename, template <typename> class> friend class LinkBufferBase;
friend class Watchpoint;
public:
@@ -339,6 +344,8 @@ public:
}
bool isSet() const { return m_label.isSet(); }
+
+ const AssemblerLabel &label() const { return m_label; }
private:
AssemblerLabel m_label;
};
@@ -356,7 +363,7 @@ public:
class ConvertibleLoadLabel {
template<class TemplateAssemblerType>
friend class AbstractMacroAssembler;
- friend class LinkBuffer;
+ template <typename, template <typename> class> friend class LinkBufferBase;
public:
ConvertibleLoadLabel()
@@ -380,7 +387,7 @@ public:
class DataLabelPtr {
template<class TemplateAssemblerType>
friend class AbstractMacroAssembler;
- friend class LinkBuffer;
+ template <typename, template <typename> class> friend class LinkBufferBase;
public:
DataLabelPtr()
{
@@ -404,7 +411,7 @@ public:
class DataLabel32 {
template<class TemplateAssemblerType>
friend class AbstractMacroAssembler;
- friend class LinkBuffer;
+ template <typename, template <typename> class> friend class LinkBufferBase;
public:
DataLabel32()
{
@@ -428,7 +435,7 @@ public:
class DataLabelCompact {
template<class TemplateAssemblerType>
friend class AbstractMacroAssembler;
- friend class LinkBuffer;
+ template <typename, template <typename> class> friend class LinkBufferBase;
public:
DataLabelCompact()
{
@@ -448,6 +455,11 @@ public:
AssemblerLabel m_label;
};
+#if CPU(ARM_THUMB2) || defined(V4_BOOTSTRAP)
+ using Jump = typename AssemblerType::template Jump<Label>;
+ friend Jump;
+#endif
+
// Call:
//
// A Call object is a reference to a call instruction that has been planted
@@ -498,18 +510,19 @@ public:
// into the code buffer - it is typically used to link the jump, setting the
// relative offset such that when executed it will jump to the desired
// destination.
+#if !CPU(ARM_THUMB2) && !defined(V4_BOOTSTRAP)
class Jump {
template<class TemplateAssemblerType>
friend class AbstractMacroAssembler;
friend class Call;
friend struct DFG::OSRExit;
- friend class LinkBuffer;
+ template <typename, template <typename> class> friend class LinkBufferBase;
public:
Jump()
{
}
-#if CPU(ARM_THUMB2)
+#if CPU(ARM_THUMB2) || defined(V4_BOOTSTRAP)
// Fixme: this information should be stored in the instruction stream, not in the Jump object.
Jump(AssemblerLabel jmp, ARMv7Assembler::JumpType type = ARMv7Assembler::JumpNoCondition, ARMv7Assembler::Condition condition = ARMv7Assembler::ConditionInvalid)
: m_label(jmp)
@@ -610,10 +623,11 @@ public:
private:
AssemblerLabel m_label;
-#if CPU(ARM_THUMB2)
+#if CPU(ARM_THUMB2) || defined(V4_BOOTSTRAP)
ARMv7Assembler::JumpType m_type;
ARMv7Assembler::Condition m_condition;
-#elif CPU(ARM64)
+#endif
+#if CPU(ARM64)
ARM64Assembler::JumpType m_type;
ARM64Assembler::Condition m_condition;
bool m_is64Bit;
@@ -624,6 +638,7 @@ public:
SH4Assembler::JumpType m_type;
#endif
};
+#endif
struct PatchableJump {
PatchableJump()
@@ -645,7 +660,7 @@ public:
// A JumpList is a set of Jump objects.
// All jumps in the set will be linked to the same destination.
class JumpList {
- friend class LinkBuffer;
+ template <typename, template <typename> class> friend class LinkBufferBase;
public:
typedef Vector<Jump, 2> JumpVector;
@@ -819,7 +834,8 @@ protected:
static bool shouldBlindForSpecificArch(uint64_t) { return true; }
#endif
- friend class LinkBuffer;
+ template <typename, template <typename> class> friend class LinkBufferBase;
+ template <typename> friend class BranchCompactingLinkBuffer;
friend class RepatchBuffer;
static void linkJump(void* code, Jump jump, CodeLocationLabel target)
@@ -867,10 +883,12 @@ protected:
AssemblerType::repatchPointer(dataLabelPtr.dataLocation(), value);
}
+#if !defined(V4_BOOTSTRAP)
static void* readPointer(CodeLocationDataLabelPtr dataLabelPtr)
{
return AssemblerType::readPointer(dataLabelPtr.dataLocation());
}
+#endif
static void replaceWithLoad(CodeLocationConvertibleLoad label)
{
diff --git a/src/3rdparty/masm/assembler/LinkBuffer.cpp b/src/3rdparty/masm/assembler/LinkBuffer.cpp
index 432a7ee227..74c278135b 100644
--- a/src/3rdparty/masm/assembler/LinkBuffer.cpp
+++ b/src/3rdparty/masm/assembler/LinkBuffer.cpp
@@ -32,140 +32,6 @@
namespace JSC {
-LinkBuffer::CodeRef LinkBuffer::finalizeCodeWithoutDisassembly()
-{
- performFinalization();
-
- return CodeRef(m_executableMemory);
-}
-
-LinkBuffer::CodeRef LinkBuffer::finalizeCodeWithDisassembly(const char* format, ...)
-{
- ASSERT(Options::showDisassembly() || Options::showDFGDisassembly());
-
- CodeRef result = finalizeCodeWithoutDisassembly();
-
- dataLogF("Generated JIT code for ");
- va_list argList;
- va_start(argList, format);
- WTF::dataLogFV(format, argList);
- va_end(argList);
- dataLogF(":\n");
-
- dataLogF(
-#if OS(WINDOWS)
- " Code at [0x%p, 0x%p):\n",
-#else
- " Code at [%p, %p):\n",
-#endif
- result.code().executableAddress(), static_cast<char*>(result.code().executableAddress()) + result.size());
- disassemble(result.code(), m_size, " ", WTF::dataFile());
-
- return result;
-}
-
-void LinkBuffer::linkCode(void* ownerUID, JITCompilationEffort effort)
-{
- ASSERT(!m_code);
-#if !ENABLE(BRANCH_COMPACTION)
- m_executableMemory = m_assembler->m_assembler.executableCopy(*m_globalData, ownerUID, effort);
- if (!m_executableMemory)
- return;
- m_code = m_executableMemory->start();
- m_size = m_assembler->m_assembler.codeSize();
- ASSERT(m_code);
-#else
- m_initialSize = m_assembler->m_assembler.codeSize();
- m_executableMemory = m_globalData->executableAllocator.allocate(*m_globalData, m_initialSize, ownerUID, effort);
- if (!m_executableMemory)
- return;
- m_code = (uint8_t*)m_executableMemory->start();
- ASSERT(m_code);
- ExecutableAllocator::makeWritable(m_code, m_initialSize);
- uint8_t* inData = (uint8_t*)m_assembler->unlinkedCode();
- uint8_t* outData = reinterpret_cast<uint8_t*>(m_code);
- int readPtr = 0;
- int writePtr = 0;
- Vector<LinkRecord, 0, UnsafeVectorOverflow>& jumpsToLink = m_assembler->jumpsToLink();
- unsigned jumpCount = jumpsToLink.size();
- for (unsigned i = 0; i < jumpCount; ++i) {
- int offset = readPtr - writePtr;
- ASSERT(!(offset & 1));
-
- // Copy the instructions from the last jump to the current one.
- size_t regionSize = jumpsToLink[i].from() - readPtr;
- uint16_t* copySource = reinterpret_cast_ptr<uint16_t*>(inData + readPtr);
- uint16_t* copyEnd = reinterpret_cast_ptr<uint16_t*>(inData + readPtr + regionSize);
- uint16_t* copyDst = reinterpret_cast_ptr<uint16_t*>(outData + writePtr);
- ASSERT(!(regionSize % 2));
- ASSERT(!(readPtr % 2));
- ASSERT(!(writePtr % 2));
- while (copySource != copyEnd)
- *copyDst++ = *copySource++;
- m_assembler->recordLinkOffsets(readPtr, jumpsToLink[i].from(), offset);
- readPtr += regionSize;
- writePtr += regionSize;
-
- // Calculate absolute address of the jump target, in the case of backwards
- // branches we need to be precise, forward branches we are pessimistic
- const uint8_t* target;
- if (jumpsToLink[i].to() >= jumpsToLink[i].from())
- target = outData + jumpsToLink[i].to() - offset; // Compensate for what we have collapsed so far
- else
- target = outData + jumpsToLink[i].to() - m_assembler->executableOffsetFor(jumpsToLink[i].to());
-
- JumpLinkType jumpLinkType = m_assembler->computeJumpType(jumpsToLink[i], outData + writePtr, target);
- // Compact branch if we can...
- if (m_assembler->canCompact(jumpsToLink[i].type())) {
- // Step back in the write stream
- int32_t delta = m_assembler->jumpSizeDelta(jumpsToLink[i].type(), jumpLinkType);
- if (delta) {
- writePtr -= delta;
- m_assembler->recordLinkOffsets(jumpsToLink[i].from() - delta, readPtr, readPtr - writePtr);
- }
- }
- jumpsToLink[i].setFrom(writePtr);
- }
- // Copy everything after the last jump
- memcpy(outData + writePtr, inData + readPtr, m_initialSize - readPtr);
- m_assembler->recordLinkOffsets(readPtr, m_initialSize, readPtr - writePtr);
-
- for (unsigned i = 0; i < jumpCount; ++i) {
- uint8_t* location = outData + jumpsToLink[i].from();
- uint8_t* target = outData + jumpsToLink[i].to() - m_assembler->executableOffsetFor(jumpsToLink[i].to());
- m_assembler->link(jumpsToLink[i], location, target);
- }
-
- jumpsToLink.clear();
- m_size = writePtr + m_initialSize - readPtr;
- m_executableMemory->shrink(m_size);
-
-#if DUMP_LINK_STATISTICS
- dumpLinkStatistics(m_code, m_initialSize, m_size);
-#endif
-#if DUMP_CODE
- dumpCode(m_code, m_size);
-#endif
-#endif
-}
-
-void LinkBuffer::performFinalization()
-{
-#ifndef NDEBUG
- ASSERT(!m_completed);
- ASSERT(isValid());
- m_completed = true;
-#endif
-
-#if ENABLE(BRANCH_COMPACTION)
- ExecutableAllocator::makeExecutable(code(), m_initialSize);
-#else
- ASSERT(m_size <= INT_MAX);
- ExecutableAllocator::makeExecutable(code(), static_cast<int>(m_size));
-#endif
- MacroAssembler::cacheFlush(code(), m_size);
-}
-
#if DUMP_LINK_STATISTICS
void LinkBuffer::dumpLinkStatistics(void* code, size_t initializeSize, size_t finalSize)
{
diff --git a/src/3rdparty/masm/assembler/LinkBuffer.h b/src/3rdparty/masm/assembler/LinkBuffer.h
index e1882433c1..3a659a23ce 100644
--- a/src/3rdparty/masm/assembler/LinkBuffer.h
+++ b/src/3rdparty/masm/assembler/LinkBuffer.h
@@ -36,6 +36,7 @@
#include "JITCompilationEffort.h"
#include "MacroAssembler.h"
+#include "Options.h"
#include <wtf/DataLog.h>
#include <wtf/Noncopyable.h>
@@ -43,6 +44,12 @@ namespace JSC {
class JSGlobalData;
+template <typename T>
+struct DefaultExecutableOffsetCalculator {
+ template <typename Assembler>
+ static T applyOffset(Assembler *, T src) { return src; }
+};
+
// LinkBuffer:
//
// This class assists in linking code generated by the macro assembler, once code generation
@@ -57,30 +64,24 @@ class JSGlobalData;
// * The address of a Label pointing into the code may be resolved.
// * The value referenced by a DataLabel may be set.
//
-class LinkBuffer {
- WTF_MAKE_NONCOPYABLE(LinkBuffer);
+template <typename MacroAssembler, template <typename T> class ExecutableOffsetCalculator>
+class LinkBufferBase {
+ WTF_MAKE_NONCOPYABLE(LinkBufferBase);
typedef MacroAssemblerCodeRef CodeRef;
typedef MacroAssemblerCodePtr CodePtr;
- typedef MacroAssembler::Label Label;
- typedef MacroAssembler::Jump Jump;
- typedef MacroAssembler::PatchableJump PatchableJump;
- typedef MacroAssembler::JumpList JumpList;
- typedef MacroAssembler::Call Call;
- typedef MacroAssembler::DataLabelCompact DataLabelCompact;
- typedef MacroAssembler::DataLabel32 DataLabel32;
- typedef MacroAssembler::DataLabelPtr DataLabelPtr;
- typedef MacroAssembler::ConvertibleLoadLabel ConvertibleLoadLabel;
-#if ENABLE(BRANCH_COMPACTION)
- typedef MacroAssembler::LinkRecord LinkRecord;
- typedef MacroAssembler::JumpLinkType JumpLinkType;
-#endif
+ typedef typename MacroAssembler::Label Label;
+ typedef typename MacroAssembler::Jump Jump;
+ typedef typename MacroAssembler::PatchableJump PatchableJump;
+ typedef typename MacroAssembler::JumpList JumpList;
+ typedef typename MacroAssembler::Call Call;
+ typedef typename MacroAssembler::DataLabelCompact DataLabelCompact;
+ typedef typename MacroAssembler::DataLabel32 DataLabel32;
+ typedef typename MacroAssembler::DataLabelPtr DataLabelPtr;
+ typedef typename MacroAssembler::ConvertibleLoadLabel ConvertibleLoadLabel;
public:
- LinkBuffer(JSGlobalData& globalData, MacroAssembler* masm, void* ownerUID, JITCompilationEffort effort = JITCompilationMustSucceed)
+ LinkBufferBase(JSGlobalData& globalData, MacroAssembler* masm, JITCompilationEffort effort = JITCompilationMustSucceed)
: m_size(0)
-#if ENABLE(BRANCH_COMPACTION)
- , m_initialSize(0)
-#endif
, m_code(0)
, m_assembler(masm)
, m_globalData(&globalData)
@@ -89,10 +90,13 @@ public:
, m_effort(effort)
#endif
{
- linkCode(ownerUID, effort);
+#ifdef NDEBUG
+ UNUSED_PARAM(effort)
+#endif
+ // Simon: Moved this to the sub-classes linkCode(ownerUID, effort);
}
- ~LinkBuffer()
+ ~LinkBufferBase()
{
ASSERT(m_completed || (!m_executableMemory && m_effort == JITCompilationCanFail));
}
@@ -204,8 +208,8 @@ public:
// finalizeCodeWithoutDisassembly() directly if you have your own way of
// displaying disassembly.
- CodeRef finalizeCodeWithoutDisassembly();
- CodeRef finalizeCodeWithDisassembly(const char* format, ...) WTF_ATTRIBUTE_PRINTF(2, 3);
+ inline CodeRef finalizeCodeWithoutDisassembly();
+ inline CodeRef finalizeCodeWithDisassembly(const char* format, ...) WTF_ATTRIBUTE_PRINTF(2, 3);
CodePtr trampolineAt(Label label)
{
@@ -225,21 +229,19 @@ public:
private:
template <typename T> T applyOffset(T src)
{
-#if ENABLE(BRANCH_COMPACTION)
- src.m_offset -= m_assembler->executableOffsetFor(src.m_offset);
-#endif
- return src;
+ return ExecutableOffsetCalculator<T>::applyOffset(m_assembler, src);
}
+protected:
// Keep this private! - the underlying code should only be obtained externally via finalizeCode().
void* code()
{
return m_code;
}
- void linkCode(void* ownerUID, JITCompilationEffort);
+ inline void linkCode(void* ownerUID, JITCompilationEffort);
- void performFinalization();
+ inline void performFinalization();
#if DUMP_LINK_STATISTICS
static void dumpLinkStatistics(void* code, size_t initialSize, size_t finalSize);
@@ -251,12 +253,10 @@ private:
RefPtr<ExecutableMemoryHandle> m_executableMemory;
size_t m_size;
-#if ENABLE(BRANCH_COMPACTION)
- size_t m_initialSize;
-#endif
void* m_code;
MacroAssembler* m_assembler;
JSGlobalData* m_globalData;
+protected:
#ifndef NDEBUG
bool m_completed;
JITCompilationEffort m_effort;
@@ -290,6 +290,234 @@ private:
#define FINALIZE_DFG_CODE(linkBufferReference, dataLogFArgumentsForHeading) \
FINALIZE_CODE_IF((Options::showDisassembly() || Options::showDFGDisassembly()), linkBufferReference, dataLogFArgumentsForHeading)
+
+template <typename MacroAssembler, template <typename T> class ExecutableOffsetCalculator>
+inline typename LinkBufferBase<MacroAssembler, ExecutableOffsetCalculator>::CodeRef LinkBufferBase<MacroAssembler, ExecutableOffsetCalculator>::finalizeCodeWithoutDisassembly()
+{
+ performFinalization();
+
+ return CodeRef(m_executableMemory);
+}
+
+template <typename MacroAssembler, template <typename T> class ExecutableOffsetCalculator>
+inline typename LinkBufferBase<MacroAssembler, ExecutableOffsetCalculator>::CodeRef LinkBufferBase<MacroAssembler, ExecutableOffsetCalculator>::finalizeCodeWithDisassembly(const char* format, ...)
+{
+ ASSERT(Options::showDisassembly() || Options::showDFGDisassembly());
+
+ CodeRef result = finalizeCodeWithoutDisassembly();
+
+ dataLogF("Generated JIT code for ");
+ va_list argList;
+ va_start(argList, format);
+ WTF::dataLogFV(format, argList);
+ va_end(argList);
+ dataLogF(":\n");
+
+ dataLogF(
+#if OS(WINDOWS)
+ " Code at [0x%p, 0x%p):\n",
+#else
+ " Code at [%p, %p):\n",
+#endif
+ result.code().executableAddress(), static_cast<char*>(result.code().executableAddress()) + result.size());
+ disassemble(result.code(), m_size, " ", WTF::dataFile());
+
+ return result;
+}
+
+template <typename MacroAssembler, template <typename T> class ExecutableOffsetCalculator>
+inline void LinkBufferBase<MacroAssembler, ExecutableOffsetCalculator>::linkCode(void* ownerUID, JITCompilationEffort effort)
+{
+ UNUSED_PARAM(ownerUID)
+ UNUSED_PARAM(effort)
+ ASSERT(!m_code);
+ m_executableMemory = m_assembler->m_assembler.executableCopy(*m_globalData, ownerUID, effort);
+ if (!m_executableMemory)
+ return;
+ m_code = m_executableMemory->start();
+ m_size = m_assembler->m_assembler.codeSize();
+ ASSERT(m_code);
+}
+
+template <typename MacroAssembler, template <typename T> class ExecutableOffsetCalculator>
+inline void LinkBufferBase<MacroAssembler, ExecutableOffsetCalculator>::performFinalization()
+{
+ // NOTE: This function is specialized in LinkBuffer<MacroAssemblerARMv7>
+#ifndef NDEBUG
+ ASSERT(!m_completed);
+ ASSERT(isValid());
+ m_completed = true;
+#endif
+
+ ASSERT(m_size <= INT_MAX);
+ ExecutableAllocator::makeExecutable(code(), static_cast<int>(m_size));
+ MacroAssembler::cacheFlush(code(), m_size);
+}
+
+template <typename MacroAssembler>
+class LinkBuffer : public LinkBufferBase<MacroAssembler, DefaultExecutableOffsetCalculator>
+{
+public:
+ LinkBuffer(JSGlobalData& globalData, MacroAssembler* masm, void* ownerUID, JITCompilationEffort effort = JITCompilationMustSucceed)
+ : LinkBufferBase<MacroAssembler, DefaultExecutableOffsetCalculator>(globalData, masm, effort)
+ {
+ this->linkCode(ownerUID, effort);
+ }
+};
+
+#if CPU(ARM_THUMB2) || CPU(ARM64) || defined(V4_BOOTSTRAP)
+
+template <typename T>
+struct BranchCompactingExecutableOffsetCalculator {
+ template <typename Assembler>
+ static T applyOffset(Assembler *as, T src) {
+ src.m_offset -= as->executableOffsetFor(src.m_offset);
+ return src;
+ }
+};
+
+template <typename MacroAssembler>
+class BranchCompactingLinkBuffer : public LinkBufferBase<MacroAssembler, BranchCompactingExecutableOffsetCalculator>
+{
+public:
+ BranchCompactingLinkBuffer(JSGlobalData& globalData, MacroAssembler* masm, void* ownerUID, JITCompilationEffort effort = JITCompilationMustSucceed)
+ : LinkBufferBase<MacroAssembler, BranchCompactingExecutableOffsetCalculator>(globalData, masm, effort)
+ {
+ linkCode(ownerUID, effort);
+ }
+
+ inline void performFinalization();
+
+ inline void linkCode(void* ownerUID, JITCompilationEffort);
+
+private:
+ using Base = LinkBufferBase<MacroAssembler, BranchCompactingExecutableOffsetCalculator>;
+#ifndef NDEBUG
+ using Base::m_completed;
+#endif
+ using Base::isValid;
+ using Base::code;
+ using Base::m_code;
+ using Base::m_size;
+ using Base::m_assembler;
+ using Base::m_executableMemory;
+ using Base::m_globalData;
+
+ using LinkRecord = typename MacroAssembler::LinkRecord;
+ using JumpLinkType = typename MacroAssembler::JumpLinkType;
+
+ size_t m_initialSize = 0;
+};
+
+template <typename MacroAssembler>
+inline void BranchCompactingLinkBuffer<MacroAssembler>::performFinalization()
+{
+#ifndef NDEBUG
+ ASSERT(!m_completed);
+ ASSERT(isValid());
+ this->m_completed = true;
+#endif
+
+ ExecutableAllocator::makeExecutable(code(), m_initialSize);
+ MacroAssembler::cacheFlush(code(), m_size);
+}
+
+template <typename MacroAssembler>
+inline void BranchCompactingLinkBuffer<MacroAssembler>::linkCode(void* ownerUID, JITCompilationEffort effort)
+{
+ UNUSED_PARAM(ownerUID)
+ UNUSED_PARAM(effort)
+ ASSERT(!m_code);
+ m_initialSize = m_assembler->m_assembler.codeSize();
+ m_executableMemory = m_globalData->executableAllocator.allocate(*m_globalData, m_initialSize, ownerUID, effort);
+ if (!m_executableMemory)
+ return;
+ m_code = (uint8_t*)m_executableMemory->start();
+ ASSERT(m_code);
+ ExecutableAllocator::makeWritable(m_code, m_initialSize);
+ uint8_t* inData = (uint8_t*)m_assembler->unlinkedCode();
+ uint8_t* outData = reinterpret_cast<uint8_t*>(m_code);
+ int readPtr = 0;
+ int writePtr = 0;
+ Vector<LinkRecord, 0, UnsafeVectorOverflow>& jumpsToLink = m_assembler->jumpsToLink();
+ unsigned jumpCount = unsigned(jumpsToLink.size());
+ for (unsigned i = 0; i < jumpCount; ++i) {
+ int offset = readPtr - writePtr;
+ ASSERT(!(offset & 1));
+
+ // Copy the instructions from the last jump to the current one.
+ unsigned regionSize = unsigned(jumpsToLink[i].from() - readPtr);
+ uint16_t* copySource = reinterpret_cast_ptr<uint16_t*>(inData + readPtr);
+ uint16_t* copyEnd = reinterpret_cast_ptr<uint16_t*>(inData + readPtr + regionSize);
+ uint16_t* copyDst = reinterpret_cast_ptr<uint16_t*>(outData + writePtr);
+ ASSERT(!(regionSize % 2));
+ ASSERT(!(readPtr % 2));
+ ASSERT(!(writePtr % 2));
+ while (copySource != copyEnd)
+ *copyDst++ = *copySource++;
+ m_assembler->recordLinkOffsets(readPtr, jumpsToLink[i].from(), offset);
+ readPtr += regionSize;
+ writePtr += regionSize;
+
+ // Calculate absolute address of the jump target, in the case of backwards
+ // branches we need to be precise, forward branches we are pessimistic
+ const uint8_t* target;
+ if (jumpsToLink[i].to() >= jumpsToLink[i].from())
+ target = outData + jumpsToLink[i].to() - offset; // Compensate for what we have collapsed so far
+ else
+ target = outData + jumpsToLink[i].to() - m_assembler->executableOffsetFor(jumpsToLink[i].to());
+
+ JumpLinkType jumpLinkType = m_assembler->computeJumpType(jumpsToLink[i], outData + writePtr, target);
+ // Compact branch if we can...
+ if (m_assembler->canCompact(jumpsToLink[i].type())) {
+ // Step back in the write stream
+ int32_t delta = m_assembler->jumpSizeDelta(jumpsToLink[i].type(), jumpLinkType);
+ if (delta) {
+ writePtr -= delta;
+ m_assembler->recordLinkOffsets(jumpsToLink[i].from() - delta, readPtr, readPtr - writePtr);
+ }
+ }
+ jumpsToLink[i].setFrom(writePtr);
+ }
+ // Copy everything after the last jump
+ memcpy(outData + writePtr, inData + readPtr, m_initialSize - readPtr);
+ m_assembler->recordLinkOffsets(readPtr, unsigned(m_initialSize), readPtr - writePtr);
+
+ for (unsigned i = 0; i < jumpCount; ++i) {
+ uint8_t* location = outData + jumpsToLink[i].from();
+ uint8_t* target = outData + jumpsToLink[i].to() - m_assembler->executableOffsetFor(jumpsToLink[i].to());
+ m_assembler->link(jumpsToLink[i], location, target);
+ }
+
+ jumpsToLink.clear();
+ m_size = writePtr + m_initialSize - readPtr;
+ m_executableMemory->shrink(m_size);
+}
+
+#if CPU(ARM_THUMB2) || defined(V4_BOOTSTRAP)
+template <>
+class LinkBuffer<JSC::MacroAssembler<MacroAssemblerARMv7>> : public BranchCompactingLinkBuffer<JSC::MacroAssembler<MacroAssemblerARMv7>>
+{
+public:
+ LinkBuffer(JSGlobalData& globalData, JSC::MacroAssembler<MacroAssemblerARMv7>* masm, void* ownerUID, JITCompilationEffort effort = JITCompilationMustSucceed)
+ : BranchCompactingLinkBuffer<JSC::MacroAssembler<MacroAssemblerARMv7>>(globalData, masm, ownerUID, effort)
+ {}
+};
+#endif
+
+#if CPU(ARM64)
+template <>
+class LinkBuffer<JSC::MacroAssembler<MacroAssemblerARM64>> : public BranchCompactingLinkBuffer<JSC::MacroAssembler<MacroAssemblerARM64>>
+{
+public:
+ LinkBuffer(JSGlobalData& globalData, JSC::MacroAssembler<MacroAssemblerARM64>* masm, void* ownerUID, JITCompilationEffort effort = JITCompilationMustSucceed)
+ : BranchCompactingLinkBuffer<JSC::MacroAssembler<JSC::MacroAssemblerARM64>>(globalData, masm, ownerUID, effort)
+ {}
+};
+#endif
+
+#endif
+
} // namespace JSC
#endif // ENABLE(ASSEMBLER)
diff --git a/src/3rdparty/masm/assembler/MacroAssembler.h b/src/3rdparty/masm/assembler/MacroAssembler.h
index e122e2f3ae..87794c8ef4 100644
--- a/src/3rdparty/masm/assembler/MacroAssembler.h
+++ b/src/3rdparty/masm/assembler/MacroAssembler.h
@@ -30,8 +30,9 @@
#if ENABLE(ASSEMBLER)
-#if CPU(ARM_THUMB2)
#include "MacroAssemblerARMv7.h"
+
+#if CPU(ARM_THUMB2)
namespace JSC { typedef MacroAssemblerARMv7 MacroAssemblerBase; };
#elif CPU(ARM64)
@@ -68,13 +69,53 @@ typedef MacroAssemblerSH4 MacroAssemblerBase;
namespace JSC {
+template <typename MacroAssemblerBase>
class MacroAssembler : public MacroAssemblerBase {
public:
+ using DoubleCondition = typename MacroAssemblerBase::DoubleCondition;
+ using ResultCondition = typename MacroAssemblerBase::ResultCondition;
+ using RelationalCondition = typename MacroAssemblerBase::RelationalCondition;
+ using RegisterID = typename MacroAssemblerBase::RegisterID;
+ using Address = typename MacroAssemblerBase::Address;
+ using ExtendedAddress = typename MacroAssemblerBase::ExtendedAddress;
+ using BaseIndex = typename MacroAssemblerBase::BaseIndex;
+ using ImplicitAddress = typename MacroAssemblerBase::ImplicitAddress;
+ using AbsoluteAddress = typename MacroAssemblerBase::AbsoluteAddress;
+ using TrustedImm32 = typename MacroAssemblerBase::TrustedImm32;
+ using TrustedImm64 = typename MacroAssemblerBase::TrustedImm64;
+ using TrustedImmPtr = typename MacroAssemblerBase::TrustedImmPtr;
+ using Imm32 = typename MacroAssemblerBase::Imm32;
+ using Imm64 = typename MacroAssemblerBase::Imm64;
+ using ImmPtr = typename MacroAssemblerBase::ImmPtr;
+ using Label = typename MacroAssemblerBase::Label;
+ using DataLabelPtr = typename MacroAssemblerBase::DataLabelPtr;
+ using DataLabel32 = typename MacroAssemblerBase::DataLabel32;
+ using DataLabelCompact = typename MacroAssemblerBase::DataLabelCompact;
+ using Jump = typename MacroAssemblerBase::Jump;
+ using PatchableJump = typename MacroAssemblerBase::PatchableJump;
+
using MacroAssemblerBase::pop;
using MacroAssemblerBase::jump;
using MacroAssemblerBase::branch32;
using MacroAssemblerBase::move;
+ using MacroAssemblerBase::store32;
+ using MacroAssemblerBase::add32;
+ using MacroAssemblerBase::xor32;
+ using MacroAssemblerBase::sub32;
+ using MacroAssemblerBase::load32;
+
+
+#if defined(V4_BOOTSTRAP)
+ using MacroAssemblerBase::loadPtr;
+ using MacroAssemblerBase::storePtr;
+#elif CPU(X86_64) || CPU(ARM64)
+ using MacroAssemblerBase::add64;
+ using MacroAssemblerBase::sub64;
+ using MacroAssemblerBase::xor64;
+ using MacroAssemblerBase::load64;
+ using MacroAssemblerBase::store64;
+#endif
#if ENABLE(JIT_CONSTANT_BLINDING)
using MacroAssemblerBase::add32;
@@ -100,41 +141,41 @@ public:
static DoubleCondition invert(DoubleCondition cond)
{
switch (cond) {
- case DoubleEqual:
- return DoubleNotEqualOrUnordered;
- case DoubleNotEqual:
- return DoubleEqualOrUnordered;
- case DoubleGreaterThan:
- return DoubleLessThanOrEqualOrUnordered;
- case DoubleGreaterThanOrEqual:
- return DoubleLessThanOrUnordered;
- case DoubleLessThan:
- return DoubleGreaterThanOrEqualOrUnordered;
- case DoubleLessThanOrEqual:
- return DoubleGreaterThanOrUnordered;
- case DoubleEqualOrUnordered:
- return DoubleNotEqual;
- case DoubleNotEqualOrUnordered:
- return DoubleEqual;
- case DoubleGreaterThanOrUnordered:
- return DoubleLessThanOrEqual;
- case DoubleGreaterThanOrEqualOrUnordered:
- return DoubleLessThan;
- case DoubleLessThanOrUnordered:
- return DoubleGreaterThanOrEqual;
- case DoubleLessThanOrEqualOrUnordered:
- return DoubleGreaterThan;
+ case DoubleCondition::DoubleEqual:
+ return DoubleCondition::DoubleNotEqualOrUnordered;
+ case DoubleCondition::DoubleNotEqual:
+ return DoubleCondition::DoubleEqualOrUnordered;
+ case DoubleCondition::DoubleGreaterThan:
+ return DoubleCondition::DoubleLessThanOrEqualOrUnordered;
+ case DoubleCondition::DoubleGreaterThanOrEqual:
+ return DoubleCondition::DoubleLessThanOrUnordered;
+ case DoubleCondition::DoubleLessThan:
+ return DoubleCondition::DoubleGreaterThanOrEqualOrUnordered;
+ case DoubleCondition::DoubleLessThanOrEqual:
+ return DoubleCondition::DoubleGreaterThanOrUnordered;
+ case DoubleCondition::DoubleEqualOrUnordered:
+ return DoubleCondition::DoubleNotEqual;
+ case DoubleCondition::DoubleNotEqualOrUnordered:
+ return DoubleCondition::DoubleEqual;
+ case DoubleCondition::DoubleGreaterThanOrUnordered:
+ return DoubleCondition::DoubleLessThanOrEqual;
+ case DoubleCondition::DoubleGreaterThanOrEqualOrUnordered:
+ return DoubleCondition::DoubleLessThan;
+ case DoubleCondition::DoubleLessThanOrUnordered:
+ return DoubleCondition::DoubleGreaterThanOrEqual;
+ case DoubleCondition::DoubleLessThanOrEqualOrUnordered:
+ return DoubleCondition::DoubleGreaterThan;
default:
RELEASE_ASSERT_NOT_REACHED();
- return DoubleEqual; // make compiler happy
+ return DoubleCondition::DoubleEqual; // make compiler happy
}
}
static bool isInvertible(ResultCondition cond)
{
switch (cond) {
- case Zero:
- case NonZero:
+ case ResultCondition::Zero:
+ case ResultCondition::NonZero:
return true;
default:
return false;
@@ -144,13 +185,13 @@ public:
static ResultCondition invert(ResultCondition cond)
{
switch (cond) {
- case Zero:
- return NonZero;
- case NonZero:
- return Zero;
+ case ResultCondition::Zero:
+ return ResultCondition::NonZero;
+ case ResultCondition::NonZero:
+ return ResultCondition::Zero;
default:
RELEASE_ASSERT_NOT_REACHED();
- return Zero; // Make compiler happy for release builds.
+ return ResultCondition::Zero; // Make compiler happy for release builds.
}
}
#endif
@@ -159,17 +200,17 @@ public:
// described in terms of other macro assembly methods.
void pop()
{
- addPtr(TrustedImm32(sizeof(void*)), stackPointerRegister);
+ addPtr(TrustedImm32(sizeof(void*)), MacroAssemblerBase::stackPointerRegister);
}
void peek(RegisterID dest, int index = 0)
{
- loadPtr(Address(stackPointerRegister, (index * sizeof(void*))), dest);
+ loadPtr(Address(MacroAssemblerBase::stackPointerRegister, (index * sizeof(void*))), dest);
}
Address addressForPoke(int index)
{
- return Address(stackPointerRegister, (index * sizeof(void*)));
+ return Address(MacroAssemblerBase::stackPointerRegister, (index * sizeof(void*)));
}
void poke(RegisterID src, int index = 0)
@@ -187,10 +228,10 @@ public:
storePtr(imm, addressForPoke(index));
}
-#if CPU(X86_64) || CPU(ARM64)
+#if (CPU(X86_64) || CPU(ARM64)) && !defined(V4_BOOTSTRAP)
void peek64(RegisterID dest, int index = 0)
{
- load64(Address(stackPointerRegister, (index * sizeof(void*))), dest);
+ load64(Address(MacroAssemblerBase::stackPointerRegister, (index * sizeof(void*))), dest);
}
void poke(TrustedImm64 value, int index = 0)
@@ -296,36 +337,37 @@ public:
static RelationalCondition commute(RelationalCondition condition)
{
switch (condition) {
- case Above:
- return Below;
- case AboveOrEqual:
- return BelowOrEqual;
- case Below:
- return Above;
- case BelowOrEqual:
- return AboveOrEqual;
- case GreaterThan:
- return LessThan;
- case GreaterThanOrEqual:
- return LessThanOrEqual;
- case LessThan:
- return GreaterThan;
- case LessThanOrEqual:
- return GreaterThanOrEqual;
+ case RelationalCondition::Above:
+ return RelationalCondition::Below;
+ case RelationalCondition::AboveOrEqual:
+ return RelationalCondition::BelowOrEqual;
+ case RelationalCondition::Below:
+ return RelationalCondition::Above;
+ case RelationalCondition::BelowOrEqual:
+ return RelationalCondition::AboveOrEqual;
+ case RelationalCondition::GreaterThan:
+ return RelationalCondition::LessThan;
+ case RelationalCondition::GreaterThanOrEqual:
+ return RelationalCondition::LessThanOrEqual;
+ case RelationalCondition::LessThan:
+ return RelationalCondition::GreaterThan;
+ case RelationalCondition::LessThanOrEqual:
+ return RelationalCondition::GreaterThanOrEqual;
default:
break;
}
- ASSERT(condition == Equal || condition == NotEqual);
+ ASSERT(condition == RelationalCondition::Equal || condition == RelationalCondition::NotEqual);
return condition;
}
static const unsigned BlindingModulus = 64;
bool shouldConsiderBlinding()
{
- return !(random() & (BlindingModulus - 1));
+ return !(this->random() & (BlindingModulus - 1));
}
+#if !defined(V4_BOOTSTRAP)
// Ptr methods
// On 32-bit platforms (i.e. x86), these methods directly map onto their 32-bit equivalents.
// FIXME: should this use a test for 32-bitness instead of this specific exception?
@@ -850,6 +892,7 @@ public:
{
return branchSub64(cond, src1, src2, dest);
}
+#endif // !defined(V4_BOOTSTRAP)
#if ENABLE(JIT_CONSTANT_BLINDING)
using MacroAssemblerBase::and64;
@@ -1447,6 +1490,22 @@ public:
#endif
};
+#if CPU(ARM_THUMB2)
+typedef MacroAssembler<MacroAssemblerARMv7> DefaultMacroAssembler;
+#elif CPU(ARM64)
+typedef MacroAssembler<MacroAssemblerARM64> DefaultMacroAssembler;
+#elif CPU(ARM_TRADITIONAL)
+typedef MacroAssembler<MacroAssemblerARM> DefaultMacroAssembler;
+#elif CPU(MIPS)
+typedef MacroAssembler<MacroAssemblerMIPS> DefaultMacroAssembler;
+#elif CPU(X86)
+typedef MacroAssembler<MacroAssemblerX86> DefaultMacroAssembler;
+#elif CPU(X86_64)
+typedef MacroAssembler<MacroAssemblerX86_64> DefaultMacroAssembler;
+#elif CPU(SH4)
+typedef JSC::MacroAssemblerSH4 DefaultMacroAssembler;
+#endif
+
} // namespace JSC
#else // ENABLE(ASSEMBLER)
diff --git a/src/3rdparty/masm/assembler/MacroAssemblerARM.h b/src/3rdparty/masm/assembler/MacroAssemblerARM.h
index 01e34c97cd..268fe5fe73 100644
--- a/src/3rdparty/masm/assembler/MacroAssemblerARM.h
+++ b/src/3rdparty/masm/assembler/MacroAssemblerARM.h
@@ -1349,7 +1349,7 @@ protected:
}
private:
- friend class LinkBuffer;
+ template <typename, template <typename> class> friend class LinkBufferBase;
friend class RepatchBuffer;
void internalCompare32(RegisterID left, TrustedImm32 right)
diff --git a/src/3rdparty/masm/assembler/MacroAssemblerARM64.h b/src/3rdparty/masm/assembler/MacroAssemblerARM64.h
index bd85b6b2c1..3e425a0246 100644
--- a/src/3rdparty/masm/assembler/MacroAssemblerARM64.h
+++ b/src/3rdparty/masm/assembler/MacroAssemblerARM64.h
@@ -3353,7 +3353,7 @@ private:
return makeBranch(cond);
}
- friend class LinkBuffer;
+ template <typename, template <typename> class> friend class LinkBufferBase;
void recordLinkOffsets(int32_t regionStart, int32_t regionEnd, int32_t offset) {return m_assembler.recordLinkOffsets(regionStart, regionEnd, offset); }
int executableOffsetFor(int location) { return m_assembler.executableOffsetFor(location); }
diff --git a/src/3rdparty/masm/assembler/MacroAssemblerARMv7.h b/src/3rdparty/masm/assembler/MacroAssemblerARMv7.h
index 0938383513..806f2e13b6 100644
--- a/src/3rdparty/masm/assembler/MacroAssemblerARMv7.h
+++ b/src/3rdparty/masm/assembler/MacroAssemblerARMv7.h
@@ -27,7 +27,7 @@
#ifndef MacroAssemblerARMv7_h
#define MacroAssemblerARMv7_h
-#if ENABLE(ASSEMBLER) && CPU(ARM_THUMB2)
+#if ENABLE(ASSEMBLER) && (CPU(ARM_THUMB2) || defined(V4_BOOTSTRAP))
#include "ARMv7Assembler.h"
#include "AbstractMacroAssembler.h"
@@ -160,12 +160,41 @@ public:
{
add32(imm, dest, dest);
}
+
+#if defined(V4_BOOTSTRAP)
+ void loadPtr(ImplicitAddress address, RegisterID dest)
+ {
+ load32(address, dest);
+ }
+
+ void subPtr(TrustedImm32 imm, RegisterID dest)
+ {
+ sub32(imm, dest);
+ }
+
+ void addPtr(TrustedImm32 imm, RegisterID dest)
+ {
+ add32(imm, dest);
+ }
+
+ void addPtr(TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ add32(imm, src, dest);
+ }
+
+ void storePtr(RegisterID src, ImplicitAddress address)
+ {
+ store32(src, address);
+ }
+#endif
+#if !defined(V4_BOOTSTRAP)
void add32(AbsoluteAddress src, RegisterID dest)
{
load32(src.m_ptr, dataTempRegister);
add32(dataTempRegister, dest);
}
+#endif
void add32(TrustedImm32 imm, RegisterID src, RegisterID dest)
{
@@ -206,6 +235,7 @@ public:
add32(dataTempRegister, dest);
}
+#if !defined(V4_BOOTSTRAP)
void add32(TrustedImm32 imm, AbsoluteAddress address)
{
load32(address.m_ptr, dataTempRegister);
@@ -242,6 +272,7 @@ public:
m_assembler.adc(dataTempRegister, dataTempRegister, ARMThumbImmediate::makeEncodedImm(imm.m_value >> 31));
m_assembler.str(dataTempRegister, addressTempRegister, ARMThumbImmediate::makeUInt12(4));
}
+#endif
void and32(RegisterID op1, RegisterID op2, RegisterID dest)
{
@@ -343,6 +374,7 @@ public:
or32(dataTempRegister, dest);
}
+#if !defined(V4_BOOTSTRAP)
void or32(RegisterID src, AbsoluteAddress dest)
{
move(TrustedImmPtr(dest.m_ptr), addressTempRegister);
@@ -350,6 +382,7 @@ public:
or32(src, dataTempRegister);
store32(dataTempRegister, addressTempRegister);
}
+#endif
void or32(TrustedImm32 imm, RegisterID dest)
{
@@ -461,6 +494,7 @@ public:
sub32(dataTempRegister, dest);
}
+#if !defined(V4_BOOTSTRAP)
void sub32(TrustedImm32 imm, AbsoluteAddress address)
{
load32(address.m_ptr, dataTempRegister);
@@ -477,6 +511,7 @@ public:
store32(dataTempRegister, address.m_ptr);
}
+#endif
void xor32(Address src, RegisterID dest)
{
@@ -526,7 +561,8 @@ public:
// operand objects to loads and store will be implicitly constructed if a
// register is passed.
-private:
+ // internal function, but public because of "using load32;" in template sub-classes to pull
+ // in the other public overloads.
void load32(ArmAddress address, RegisterID dest)
{
if (address.type == ArmAddress::HasIndex)
@@ -541,6 +577,7 @@ private:
}
}
+private:
void load16(ArmAddress address, RegisterID dest)
{
if (address.type == ArmAddress::HasIndex)
@@ -646,11 +683,13 @@ public:
load16(setupArmAddress(address), dest);
}
+#if !defined(V4_BOOTSTRAP)
void load32(const void* address, RegisterID dest)
{
move(TrustedImmPtr(address), addressTempRegister);
m_assembler.ldr(dest, addressTempRegister, ARMThumbImmediate::makeUInt16(0));
}
+#endif
ConvertibleLoadLabel convertibleLoadPtr(Address address, RegisterID dest)
{
@@ -755,6 +794,7 @@ public:
store32(dataTempRegister, setupArmAddress(address));
}
+#if !defined(V4_BOOTSTRAP)
void store32(RegisterID src, const void* address)
{
move(TrustedImmPtr(address), addressTempRegister);
@@ -766,12 +806,14 @@ public:
move(imm, dataTempRegister);
store32(dataTempRegister, address);
}
+#endif
void store8(RegisterID src, BaseIndex address)
{
store8(src, setupArmAddress(address));
}
+#if !defined(V4_BOOTSTRAP)
void store8(RegisterID src, void* address)
{
move(TrustedImmPtr(address), addressTempRegister);
@@ -783,6 +825,7 @@ public:
move(imm, dataTempRegister);
store8(dataTempRegister, address);
}
+#endif
void store16(RegisterID src, BaseIndex address)
{
@@ -880,11 +923,13 @@ public:
m_assembler.vmov(dest, src);
}
+#if !defined(V4_BOOTSTRAP)
void loadDouble(const void* address, FPRegisterID dest)
{
move(TrustedImmPtr(address), addressTempRegister);
m_assembler.vldr(dest, addressTempRegister, 0);
}
+#endif
void storeDouble(FPRegisterID src, ImplicitAddress address)
{
@@ -916,11 +961,13 @@ public:
m_assembler.fsts(ARMRegisters::asSingle(src), base, offset);
}
+#if !defined(V4_BOOTSTRAP)
void storeDouble(FPRegisterID src, const void* address)
{
move(TrustedImmPtr(address), addressTempRegister);
storeDouble(src, addressTempRegister);
}
+#endif
void storeDouble(FPRegisterID src, BaseIndex address)
{
@@ -954,11 +1001,13 @@ public:
m_assembler.vadd(dest, op1, op2);
}
+#if !defined(V4_BOOTSTRAP)
void addDouble(AbsoluteAddress address, FPRegisterID dest)
{
loadDouble(address.m_ptr, fpTempRegister);
m_assembler.vadd(dest, dest, fpTempRegister);
}
+#endif
void divDouble(FPRegisterID src, FPRegisterID dest)
{
@@ -1037,6 +1086,7 @@ public:
m_assembler.vcvt_signedToFloatingPoint(dest, fpTempRegisterAsSingle());
}
+#if !defined(V4_BOOTSTRAP)
void convertInt32ToDouble(AbsoluteAddress address, FPRegisterID dest)
{
// Fixme: load directly into the fpr!
@@ -1044,6 +1094,7 @@ public:
m_assembler.vmov(fpTempRegister, dataTempRegister, dataTempRegister);
m_assembler.vcvt_signedToFloatingPoint(dest, fpTempRegisterAsSingle());
}
+#endif
void convertUInt32ToDouble(RegisterID src, FPRegisterID dest, RegisterID /*scratch*/)
{
@@ -1197,7 +1248,7 @@ public:
void push(RegisterID src)
{
// store preindexed with writeback
- m_assembler.str(src, ARMRegisters::sp, -sizeof(void*), true, true);
+ m_assembler.str(src, ARMRegisters::sp, -4 /*sizeof(void*)*/, true, true);
}
void push(Address address)
@@ -1239,10 +1290,12 @@ public:
m_assembler.mov(dest, src);
}
+#if !defined(V4_BOOTSTRAP)
void move(TrustedImmPtr imm, RegisterID dest)
{
move(TrustedImm32(imm), dest);
}
+#endif
void swap(RegisterID reg1, RegisterID reg2)
{
@@ -1383,6 +1436,7 @@ public:
return branch32(cond, addressTempRegister, right);
}
+#if !defined(V4_BOOTSTRAP)
Jump branch32(RelationalCondition cond, AbsoluteAddress left, RegisterID right)
{
load32(left.m_ptr, dataTempRegister);
@@ -1395,6 +1449,7 @@ public:
load32(left.m_ptr, addressTempRegister);
return branch32(cond, addressTempRegister, right);
}
+#endif
Jump branch8(RelationalCondition cond, RegisterID left, TrustedImm32 right)
{
@@ -1451,6 +1506,7 @@ public:
return branchTest32(cond, addressTempRegister, mask);
}
+#if !defined(V4_BOOTSTRAP)
Jump branchTest8(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1))
{
// use addressTempRegister incase the branchTest8 we call uses dataTempRegister. :-/
@@ -1458,6 +1514,7 @@ public:
load8(Address(addressTempRegister), addressTempRegister);
return branchTest32(cond, addressTempRegister, mask);
}
+#endif
void jump(RegisterID target)
{
@@ -1471,12 +1528,14 @@ public:
m_assembler.bx(dataTempRegister);
}
+#if !defined(V4_BOOTSTRAP)
void jump(AbsoluteAddress address)
{
move(TrustedImmPtr(address.m_ptr), dataTempRegister);
load32(Address(dataTempRegister), dataTempRegister);
m_assembler.bx(dataTempRegister);
}
+#endif
// Arithmetic control flow operations:
@@ -1517,6 +1576,7 @@ public:
return branchAdd32(cond, dest, imm, dest);
}
+#if !defined(V4_BOOTSTRAP)
Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, AbsoluteAddress dest)
{
// Move the high bits of the address into addressTempRegister,
@@ -1542,6 +1602,7 @@ public:
return Jump(makeBranch(cond));
}
+#endif
Jump branchMul32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
{
@@ -1712,6 +1773,7 @@ public:
return DataLabel32(this);
}
+#if !defined(V4_BOOTSTRAP)
ALWAYS_INLINE DataLabelPtr moveWithPatch(TrustedImmPtr imm, RegisterID dst)
{
padBeforePatch();
@@ -1739,7 +1801,8 @@ public:
m_makeJumpPatchable = false;
return PatchableJump(result);
}
-
+#endif
+
PatchableJump patchableBranchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
{
m_makeJumpPatchable = true;
@@ -1756,6 +1819,7 @@ public:
return PatchableJump(result);
}
+#if !defined(V4_BOOTSTRAP)
PatchableJump patchableBranchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
{
m_makeJumpPatchable = true;
@@ -1763,6 +1827,7 @@ public:
m_makeJumpPatchable = false;
return PatchableJump(result);
}
+#endif
PatchableJump patchableJump()
{
@@ -1773,6 +1838,7 @@ public:
return PatchableJump(result);
}
+#if !defined(V4_BOOTSTRAP)
ALWAYS_INLINE DataLabelPtr storePtrWithPatch(TrustedImmPtr initialValue, ImplicitAddress address)
{
DataLabelPtr label = moveWithPatch(initialValue, dataTempRegister);
@@ -1780,7 +1846,7 @@ public:
return label;
}
ALWAYS_INLINE DataLabelPtr storePtrWithPatch(ImplicitAddress address) { return storePtrWithPatch(TrustedImmPtr(0), address); }
-
+#endif
ALWAYS_INLINE Call tailRecursiveCall()
{
@@ -1801,6 +1867,7 @@ public:
return m_assembler.executableOffsetFor(location);
}
+#if !defined(V4_BOOTSTRAP)
static FunctionPtr readCallTarget(CodeLocationCall call)
{
return FunctionPtr(reinterpret_cast<void(*)()>(ARMv7Assembler::readCallTarget(call.dataLocation())));
@@ -1813,7 +1880,8 @@ public:
const unsigned twoWordOpSize = 4;
return label.labelAtOffset(-twoWordOpSize * 2);
}
-
+#endif
+
static void revertJumpReplacementToBranchPtrWithPatch(CodeLocationLabel instructionStart, RegisterID rd, void* initialValue)
{
#if OS(LINUX) || OS(QNX)
@@ -1927,9 +1995,10 @@ protected:
}
private:
- friend class LinkBuffer;
+ template <typename, template <typename> class> friend class LinkBufferBase;
friend class RepatchBuffer;
+#if !defined(V4_BOOTSTRAP)
static void linkCall(void* code, Call call, FunctionPtr function)
{
ARMv7Assembler::linkCall(code, call.m_label, function.value());
@@ -1944,6 +2013,7 @@ private:
{
ARMv7Assembler::relinkCall(call.dataLocation(), destination.executableAddress());
}
+#endif
bool m_makeJumpPatchable;
};
diff --git a/src/3rdparty/masm/assembler/MacroAssemblerMIPS.h b/src/3rdparty/masm/assembler/MacroAssemblerMIPS.h
index 734e779c70..68584527fc 100644
--- a/src/3rdparty/masm/assembler/MacroAssemblerMIPS.h
+++ b/src/3rdparty/masm/assembler/MacroAssemblerMIPS.h
@@ -2802,7 +2802,7 @@ private:
// Otherwise, we can emit any number of instructions.
bool m_fixedWidth;
- friend class LinkBuffer;
+ template <typename, template <typename> class> friend class LinkBufferBase;
friend class RepatchBuffer;
static void linkCall(void* code, Call call, FunctionPtr function)
diff --git a/src/3rdparty/masm/assembler/MacroAssemblerSH4.h b/src/3rdparty/masm/assembler/MacroAssemblerSH4.h
index 56fb74d45b..1e5a3113bb 100644
--- a/src/3rdparty/masm/assembler/MacroAssemblerSH4.h
+++ b/src/3rdparty/masm/assembler/MacroAssemblerSH4.h
@@ -2278,7 +2278,7 @@ protected:
return static_cast<SH4Assembler::Condition>(cond);
}
private:
- friend class LinkBuffer;
+ template <typename, template <typename> class> friend class LinkBufferBase;
friend class RepatchBuffer;
static void linkCall(void*, Call, FunctionPtr);
diff --git a/src/3rdparty/masm/assembler/MacroAssemblerX86.h b/src/3rdparty/masm/assembler/MacroAssemblerX86.h
index 9a33fe870e..742a4b48f7 100644
--- a/src/3rdparty/masm/assembler/MacroAssemblerX86.h
+++ b/src/3rdparty/masm/assembler/MacroAssemblerX86.h
@@ -54,6 +54,38 @@ public:
using MacroAssemblerX86Common::convertInt32ToDouble;
using MacroAssemblerX86Common::branchTest8;
+#if defined(V4_BOOTSTRAP)
+ void loadPtr(ImplicitAddress address, RegisterID dest)
+ {
+ load32(address, dest);
+ }
+
+ void subPtr(TrustedImm32 imm, RegisterID dest)
+ {
+ sub32(imm, dest);
+ }
+
+ void addPtr(TrustedImm32 imm, RegisterID dest)
+ {
+ add32(imm, dest);
+ }
+
+ void addPtr(TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ add32(imm, src, dest);
+ }
+
+ void storePtr(RegisterID src, ImplicitAddress address)
+ {
+ store32(src, address);
+ }
+
+ Jump branchTest8(ResultCondition cond, ExtendedAddress address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ return branchTest8(cond, Address(address.base, address.offset), mask);
+ }
+#endif
+
void add32(TrustedImm32 imm, RegisterID src, RegisterID dest)
{
m_assembler.leal_mr(imm.m_value, src, dest);
@@ -306,7 +338,7 @@ public:
}
private:
- friend class LinkBuffer;
+ template <typename, template <typename> class> friend class LinkBufferBase;
friend class RepatchBuffer;
static void linkCall(void* code, Call call, FunctionPtr function)
diff --git a/src/3rdparty/masm/assembler/MacroAssemblerX86_64.h b/src/3rdparty/masm/assembler/MacroAssemblerX86_64.h
index 9e74f1c29f..3566702413 100644
--- a/src/3rdparty/masm/assembler/MacroAssemblerX86_64.h
+++ b/src/3rdparty/masm/assembler/MacroAssemblerX86_64.h
@@ -52,6 +52,33 @@ public:
using MacroAssemblerX86Common::loadDouble;
using MacroAssemblerX86Common::convertInt32ToDouble;
+#if defined(V4_BOOTSTRAP)
+ void loadPtr(ImplicitAddress address, RegisterID dest)
+ {
+ load64(address, dest);
+ }
+
+ void subPtr(TrustedImm32 imm, RegisterID dest)
+ {
+ sub64(imm, dest);
+ }
+
+ void addPtr(TrustedImm32 imm, RegisterID dest)
+ {
+ add64(imm, dest);
+ }
+
+ void addPtr(TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ add64(imm, src, dest);
+ }
+
+ void storePtr(RegisterID src, ImplicitAddress address)
+ {
+ store64(src, address);
+ }
+#endif
+
void add32(TrustedImm32 imm, AbsoluteAddress address)
{
move(TrustedImmPtr(address.m_ptr), scratchRegister);
@@ -634,7 +661,7 @@ public:
}
private:
- friend class LinkBuffer;
+ template <typename, template <typename> class> friend class LinkBufferBase;
friend class RepatchBuffer;
static void linkCall(void* code, Call call, FunctionPtr function)
diff --git a/src/3rdparty/masm/assembler/X86Assembler.h b/src/3rdparty/masm/assembler/X86Assembler.h
index 1875ebaff0..24462ef38f 100644
--- a/src/3rdparty/masm/assembler/X86Assembler.h
+++ b/src/3rdparty/masm/assembler/X86Assembler.h
@@ -29,6 +29,7 @@
#if ENABLE(ASSEMBLER) && (CPU(X86) || CPU(X86_64))
#include "AssemblerBuffer.h"
+#include "AbstractMacroAssembler.h"
#include "JITCompilationEffort.h"
#include <stdint.h>
#include <wtf/Assertions.h>
@@ -252,6 +253,45 @@ public:
{
}
+ template <typename LabelType>
+ class Jump {
+ template<class TemplateAssemblerType>
+ friend class AbstractMacroAssembler;
+ friend class Call;
+ template <typename, template <typename> class> friend class LinkBufferBase;
+ public:
+ Jump()
+ {
+ }
+
+ Jump(AssemblerLabel jmp)
+ : m_label(jmp)
+ {
+ }
+
+ LabelType label() const
+ {
+ LabelType result;
+ result.m_label = m_label;
+ return result;
+ }
+
+ void link(AbstractMacroAssembler<X86Assembler>* masm) const
+ {
+ masm->m_assembler.linkJump(m_label, masm->m_assembler.label());
+ }
+
+ void linkTo(LabelType label, AbstractMacroAssembler<X86Assembler>* masm) const
+ {
+ masm->m_assembler.linkJump(m_label, label.label());
+ }
+
+ bool isSet() const { return m_label.isSet(); }
+
+ private:
+ AssemblerLabel m_label;
+ };
+
// Stack operations:
void push_r(RegisterID reg)
diff --git a/src/3rdparty/masm/masm.pri b/src/3rdparty/masm/masm.pri
index afa1438974..c63cd5da66 100644
--- a/src/3rdparty/masm/masm.pri
+++ b/src/3rdparty/masm/masm.pri
@@ -13,6 +13,7 @@ HEADERS += $$PWD/wtf/RawPointer.h
winrt: SOURCES += $$PWD/wtf/OSAllocatorWinRT.cpp
else:win32: SOURCES += $$PWD/wtf/OSAllocatorWin.cpp
+else:integrity: SOURCES += $$PWD/wtf/OSAllocatorIntegrity.cpp
else: SOURCES += $$PWD/wtf/OSAllocatorPosix.cpp
HEADERS += $$PWD/wtf/OSAllocator.h
diff --git a/src/3rdparty/masm/stubs/ExecutableAllocator.h b/src/3rdparty/masm/stubs/ExecutableAllocator.h
index 8617229b06..9a2a9773b5 100644
--- a/src/3rdparty/masm/stubs/ExecutableAllocator.h
+++ b/src/3rdparty/masm/stubs/ExecutableAllocator.h
@@ -61,7 +61,7 @@ namespace JSC {
class JSGlobalData;
struct ExecutableMemoryHandle : public RefCounted<ExecutableMemoryHandle> {
- ExecutableMemoryHandle(QV4::ExecutableAllocator *allocator, int size)
+ ExecutableMemoryHandle(QV4::ExecutableAllocator *allocator, size_t size)
: m_allocator(allocator)
, m_size(size)
{
@@ -79,14 +79,14 @@ struct ExecutableMemoryHandle : public RefCounted<ExecutableMemoryHandle> {
inline bool isManaged() const { return true; }
void* start() { return m_allocation->start(); }
- int sizeInBytes() { return m_size; }
+ size_t sizeInBytes() { return m_size; }
QV4::ExecutableAllocator::ChunkOfPages *chunk() const
{ return m_allocator->chunkForAllocation(m_allocation); }
QV4::ExecutableAllocator *m_allocator;
QV4::ExecutableAllocator::Allocation *m_allocation;
- int m_size;
+ size_t m_size;
};
struct ExecutableAllocator {
@@ -94,7 +94,7 @@ struct ExecutableAllocator {
: realAllocator(alloc)
{}
- PassRefPtr<ExecutableMemoryHandle> allocate(JSGlobalData&, int size, void*, int)
+ PassRefPtr<ExecutableMemoryHandle> allocate(JSGlobalData&, size_t size, void*, int)
{
return adoptRef(new ExecutableMemoryHandle(realAllocator, size));
}
diff --git a/src/3rdparty/masm/wtf/OSAllocatorIntegrity.cpp b/src/3rdparty/masm/wtf/OSAllocatorIntegrity.cpp
new file mode 100644
index 0000000000..451ca147d1
--- /dev/null
+++ b/src/3rdparty/masm/wtf/OSAllocatorIntegrity.cpp
@@ -0,0 +1,232 @@
+/****************************************************************************
+**
+** Copyright (C) Rolland Dudemaine All rights reserved.
+** Copyright (C) 2016 The Qt Company Ltd.
+** Contact: https://www.qt.io/licensing/
+**
+** This file is part of the QtQml module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and The Qt Company. For licensing terms
+** and conditions see https://www.qt.io/terms-conditions. For further
+** information use the contact form at https://www.qt.io/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 3 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL3 included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 3 requirements
+** will be met: https://www.gnu.org/licenses/lgpl-3.0.html.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 2.0 or (at your option) the GNU General
+** Public license version 3 or any later version approved by the KDE Free
+** Qt Foundation. The licenses are as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL2 and LICENSE.GPL3
+** included in the packaging of this file. Please review the following
+** information to ensure the GNU General Public License requirements will
+** be met: https://www.gnu.org/licenses/gpl-2.0.html and
+** https://www.gnu.org/licenses/gpl-3.0.html.
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+
+#include "config.h"
+#include "OSAllocator.h"
+
+#include <INTEGRITY.h>
+#include <memory_region.h>
+#include <set>
+#include <wtf/Assertions.h>
+#include <wtf/UnusedParam.h>
+
+#define ASP_PAGESIZE 0x1000
+
+namespace WTF {
+struct MRPair {
+ mutable MemoryRegion pmr;
+ mutable MemoryRegion vmr;
+
+ mutable bool mapped;
+
+ Address start;
+
+ MRPair(Address _start = 0) :
+ pmr(0),
+ vmr(0),
+ mapped(false),
+ start(_start)
+ {}
+
+ bool operator<(const MRPair& rhs) const
+ {
+ return this->start < rhs.start;
+ }
+};
+
+class MRContainer
+{
+private:
+ std::set<MRPair> mrset;
+ LocalMutex iteratorGuard;
+public:
+ MRContainer() {
+ CheckSuccess(CreateLocalMutex(&iteratorGuard));
+ }
+ const MRPair* getMRPair(Address start) {
+ WaitForLocalMutex(iteratorGuard);
+ auto pairIterator = mrset.find(MRPair(start));
+ const MRPair* result = ((pairIterator == mrset.end()) ? NULL : &(*pairIterator));
+ ReleaseLocalMutex(iteratorGuard);
+ return result;
+ }
+ Error deleteMRPair(const MRPair* pair) {
+ int erased = 0;
+ WaitForLocalMutex(iteratorGuard);
+ erased = mrset.erase(*pair);
+ ReleaseLocalMutex(iteratorGuard);
+ if(erased == 1)
+ return Success;
+ else
+ return ArgumentError; /* An exception could be thrown in this case */
+ }
+ Error insertMRPair(MRPair* pair) {
+ WaitForLocalMutex(iteratorGuard);
+ auto inserted = mrset.insert(*pair);
+ ReleaseLocalMutex(iteratorGuard);
+ if(inserted.second == true)
+ return Success;
+ else
+ return Failure; /* An exception could be thrown in this case */
+ }
+ ~MRContainer() {
+ CheckSuccess(CloseLocalMutex(iteratorGuard));
+ }
+};
+
+static MRContainer memoryRegionsContainer;
+
+Error setAttributes(MemoryRegion mr, bool writable, bool executable)
+{
+ Value attributes = MEMORY_READ;
+ if(writable)
+ attributes |= MEMORY_WRITE;
+ if(executable)
+ attributes |= MEMORY_EXEC;
+ return SetMemoryRegionAttributes(mr, attributes);
+}
+
+void* OSAllocator::reserveUncommitted(size_t bytes, Usage usage, bool writable, bool executable)
+{
+ MemoryRegion VMR;
+
+ Address virtualStart, length;
+
+ CheckSuccess(AllocateAnyMemoryRegion(__ghs_VirtualMemoryRegionPool, bytes, &VMR));
+ CheckSuccess(GetMemoryRegionAddresses(VMR, &virtualStart, &length));
+ Address addressIterator = virtualStart;
+ for(int i=0; i<(bytes + ASP_PAGESIZE -1)/ASP_PAGESIZE; i++) {
+ MRPair pair;
+ CheckSuccess(SplitMemoryRegion(VMR, ASP_PAGESIZE, &pair.vmr));
+ CheckSuccess(setAttributes(pair.vmr, writable, executable));
+ pair.start = addressIterator;
+
+ memoryRegionsContainer.insertMRPair(&pair);
+ addressIterator += ASP_PAGESIZE;
+ }
+
+ CheckSuccess(CloseMemoryRegion(VMR));
+ return (void*)virtualStart;
+}
+
+void* OSAllocator::reserveAndCommit(size_t bytes, Usage usage, bool writable, bool executable, bool includesGuardPages)
+{
+ MemoryRegion VMR;
+
+ Address virtualStart, length;
+
+ CheckSuccess(AllocateAnyMemoryRegion(__ghs_VirtualMemoryRegionPool, bytes, &VMR));
+ CheckSuccess(GetMemoryRegionAddresses(VMR, &virtualStart, &length));
+
+ Address addressIterator = virtualStart;
+ for(int i=0; i<(bytes + ASP_PAGESIZE -1)/ASP_PAGESIZE; i++) {
+ MRPair pair;
+ pair.start = addressIterator;
+ CheckSuccess(SplitMemoryRegion(VMR, ASP_PAGESIZE, &pair.vmr));
+ CheckSuccess(setAttributes(pair.vmr, writable, executable));
+ /* Do not map the first and the last pages if guard pages are required */
+ if(!includesGuardPages || (i!=0 && i!= (bytes + ASP_PAGESIZE -1)/ASP_PAGESIZE -1))
+ {
+ CheckSuccess(GetPageFromAddressSpaceFreeList(GetCurrentAddressSpace(), &pair.pmr));
+ CheckSuccess(MapMemoryRegion(pair.vmr, pair.pmr));
+ pair.mapped = true;
+ }
+
+ memoryRegionsContainer.insertMRPair(&pair);
+ addressIterator += ASP_PAGESIZE;
+ }
+
+ CheckSuccess(CloseMemoryRegion(VMR));
+ return (void*)virtualStart;
+}
+
+void OSAllocator::commit(void* address, size_t bytes, bool writable, bool executable)
+{
+ for(int i=0; i<(bytes + ASP_PAGESIZE -1)/ASP_PAGESIZE; i++)
+ {
+ const MRPair* pair = memoryRegionsContainer.getMRPair((Address)address);
+ if(pair == NULL)
+ return;
+ CheckSuccess(setAttributes(pair->vmr, writable, executable));
+ CheckSuccess(GetPageFromAddressSpaceFreeList(GetCurrentAddressSpace(), &pair->pmr));
+ CheckSuccess(MapMemoryRegion(pair->vmr, pair->pmr));
+ pair->mapped = true;
+ address = (char*)address + ASP_PAGESIZE;
+ }
+}
+
+void OSAllocator::decommit(void* address, size_t bytes)
+{
+ for(int i=0; i<(bytes + ASP_PAGESIZE -1)/ASP_PAGESIZE; i++)
+ {
+ const MRPair* pair = memoryRegionsContainer.getMRPair((Address)address);
+ if(pair == NULL)
+ return;
+ if(pair->mapped == false)
+ continue;
+
+ CheckSuccess(UnmapMemoryRegion(pair->vmr));
+ CheckSuccess(PutPageOnAddressSpaceFreeList(GetCurrentAddressSpace(), pair->pmr));
+ pair->mapped = false;
+ address = (char*)address + ASP_PAGESIZE;
+ }
+}
+
+void OSAllocator::releaseDecommitted(void* address, size_t bytes)
+{
+ for(int i=0; i<(bytes + ASP_PAGESIZE -1)/ASP_PAGESIZE; i++)
+ {
+ const MRPair* pair = memoryRegionsContainer.getMRPair((Address)address);
+ if(pair == NULL)
+ return;
+ /* Check if the memory is still committed */
+ if(pair->mapped == true)
+ {
+ CheckSuccess(UnmapMemoryRegion(pair->vmr));
+ CheckSuccess(PutPageOnAddressSpaceFreeList(GetCurrentAddressSpace(), pair->pmr));
+ pair->mapped = false;
+ }
+ CheckSuccess(AddToMemoryPool(__ghs_VirtualMemoryRegionPool, pair->vmr));
+ address = (char*)address + ASP_PAGESIZE;
+
+ memoryRegionsContainer.deleteMRPair(pair);
+ }
+}
+} // namespace WTF
diff --git a/src/3rdparty/masm/wtf/Platform.h b/src/3rdparty/masm/wtf/Platform.h
index bc62c381db..7f2023a68a 100644
--- a/src/3rdparty/masm/wtf/Platform.h
+++ b/src/3rdparty/masm/wtf/Platform.h
@@ -949,10 +949,6 @@
#define WTF_USE_ACCESSIBILITY_CONTEXT_MENUS 1
#endif
-#if CPU(ARM_THUMB2) || CPU(ARM64)
-#define ENABLE_BRANCH_COMPACTION 1
-#endif
-
#if !defined(ENABLE_THREADING_LIBDISPATCH) && HAVE(DISPATCH_H)
#define ENABLE_THREADING_LIBDISPATCH 1
#elif !defined(ENABLE_THREADING_OPENMP) && defined(_OPENMP)
diff --git a/src/3rdparty/masm/yarr/YarrJIT.cpp b/src/3rdparty/masm/yarr/YarrJIT.cpp
index d8211ec4b2..e4f2d97759 100644
--- a/src/3rdparty/masm/yarr/YarrJIT.cpp
+++ b/src/3rdparty/masm/yarr/YarrJIT.cpp
@@ -39,7 +39,7 @@ using namespace WTF;
namespace JSC { namespace Yarr {
template<YarrJITCompileMode compileMode>
-class YarrGenerator : private MacroAssembler {
+class YarrGenerator : private DefaultMacroAssembler {
friend void jitCompile(JSGlobalData*, YarrCodeBlock& jitObject, const String& pattern, unsigned& numSubpatterns, const char*& error, bool ignoreCase, bool multiline);
#if CPU(ARM)
@@ -599,7 +599,7 @@ class YarrGenerator : private MacroAssembler {
}
// Called at the end of code generation to link all return addresses.
- void linkDataLabels(LinkBuffer& linkBuffer)
+ void linkDataLabels(LinkBuffer<JSC::DefaultMacroAssembler>& linkBuffer)
{
ASSERT(isEmpty());
for (unsigned i = 0; i < m_backtrackRecords.size(); ++i)
@@ -2676,7 +2676,7 @@ public:
backtrack();
// Link & finalize the code.
- LinkBuffer linkBuffer(*globalData, this, REGEXP_CODE_ID);
+ LinkBuffer<JSC::DefaultMacroAssembler> linkBuffer(*globalData, this, REGEXP_CODE_ID);
m_backtrackingState.linkDataLabels(linkBuffer);
if (compileMode == MatchOnly) {
diff --git a/src/imports/imports.pro b/src/imports/imports.pro
index 0d84bf538f..6dab501d71 100644
--- a/src/imports/imports.pro
+++ b/src/imports/imports.pro
@@ -16,6 +16,7 @@ qtHaveModule(quick) {
layouts \
qtquick2 \
window \
+ sharedimage \
testlib
qtConfig(opengl(es1|es2)?): \
diff --git a/src/imports/sharedimage/plugin.cpp b/src/imports/sharedimage/plugin.cpp
new file mode 100644
index 0000000000..f20edc641c
--- /dev/null
+++ b/src/imports/sharedimage/plugin.cpp
@@ -0,0 +1,134 @@
+/****************************************************************************
+**
+** Copyright (C) 2017 The Qt Company Ltd.
+** Contact: https://www.qt.io/licensing/
+**
+** This file is part of the plugins of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and The Qt Company. For licensing terms
+** and conditions see https://www.qt.io/terms-conditions. For further
+** information use the contact form at https://www.qt.io/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 3 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL3 included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 3 requirements
+** will be met: https://www.gnu.org/licenses/lgpl-3.0.html.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 2.0 or (at your option) the GNU General
+** Public license version 3 or any later version approved by the KDE Free
+** Qt Foundation. The licenses are as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL2 and LICENSE.GPL3
+** included in the packaging of this file. Please review the following
+** information to ensure the GNU General Public License requirements will
+** be met: https://www.gnu.org/licenses/gpl-2.0.html and
+** https://www.gnu.org/licenses/gpl-3.0.html.
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+
+#include <qqmlextensionplugin.h>
+#include <qqmlengine.h>
+#include <sharedimageprovider.h>
+
+
+/*!
+ \qmlmodule QtQuick.SharedImage 1
+ \title Qt Quick Shared Image Provider
+ \ingroup qmlmodules
+ \brief Adds an image provider which utilizes shared CPU memory
+
+ \section2 Summary
+
+ This module provides functionality to save memory in use cases where
+ several Qt Quick applications use the same local image files. It does this
+ by placing the decoded QImage data in shared system memory, making it
+ accessible to all the processes (see QSharedMemory).
+
+ This module only shares CPU memory. It does not provide sharing of GPU
+ memory or textures.
+
+ \section2 Usage
+
+ To use this module, import it like this:
+ \code
+ import QtQuick.SharedImage 1.0
+ \endcode
+
+ The sharing functionality is provided through a QQuickImageProvider. Use
+ the "image:" scheme for the URL source of the image, followed by the
+ identifier \e shared, followed by the image file path. For example:
+
+ \code
+ Image { source: "image://shared/usr/share/wallpapers/mybackground.jpg" }
+ \endcode
+
+ This will look for the file \e /usr/share/wallpapers/mybackground.jpg.
+ The first process that does this will read the image file
+ using normal Qt image loading. The decoded image data will then be placed
+ in shared memory, using the full file path as key. Later processes
+ requesting the same image will discover that the data is already available
+ in shared memory. They will then use that instead of loading the image file
+ again.
+
+ The shared image data will be kept available until the last process has deleted
+ its last reference to the shared image, at which point it is automatically released.
+
+ If system memory sharing is not available, the shared image provider falls
+ back to normal, unshared image loading.
+
+ The file path must be absolute. To use a relative path, make it absolute
+ using \e Qt.resolvedUrl() and replace the URL scheme. For example:
+
+ \code
+ ...
+ property string imagePrefix: Qt.resolvedUrl("../myimages/").replace("file://", "image://shared/")
+ Image { source: imagePrefix + "myimage.png" }
+ \endcode
+
+ The shared image module does not provide any directly usable QML types.
+*/
+
+static void initResources()
+{
+#ifdef QT_STATIC
+ Q_INIT_RESOURCE(qmake_QtQuick_SharedImage);
+#endif
+}
+
+QT_BEGIN_NAMESPACE
+
+class QtQuickSharedImagePlugin : public QQmlExtensionPlugin
+{
+ Q_OBJECT
+ Q_PLUGIN_METADATA(IID QQmlExtensionInterface_iid)
+public:
+ QtQuickSharedImagePlugin(QObject *parent = 0) : QQmlExtensionPlugin(parent) { initResources(); }
+
+ void registerTypes(const char *uri) Q_DECL_OVERRIDE
+ {
+ Q_ASSERT(uri == QStringLiteral("QtQuick.SharedImage"));
+ // Need to register *something* to let our version number be known:
+ qmlRegisterTypeNotAvailable(uri, 1, 0, "nosuchtype", QStringLiteral("Just a dummy type, do not use"));
+ }
+
+ void initializeEngine(QQmlEngine *engine, const char *uri) override
+ {
+ Q_UNUSED(uri);
+ engine->addImageProvider("shared", new SharedImageProvider);
+ }
+};
+
+QT_END_NAMESPACE
+
+#include "plugin.moc"
diff --git a/src/imports/sharedimage/qmldir b/src/imports/sharedimage/qmldir
new file mode 100644
index 0000000000..64a5aa8ac1
--- /dev/null
+++ b/src/imports/sharedimage/qmldir
@@ -0,0 +1,3 @@
+module QtQuick.SharedImage
+plugin sharedimageplugin
+classname QtQuickSharedImagePlugin
diff --git a/src/imports/sharedimage/qsharedimageloader.cpp b/src/imports/sharedimage/qsharedimageloader.cpp
new file mode 100644
index 0000000000..65cbd92bb4
--- /dev/null
+++ b/src/imports/sharedimage/qsharedimageloader.cpp
@@ -0,0 +1,265 @@
+/****************************************************************************
+**
+** Copyright (C) 2017 The Qt Company Ltd.
+** Contact: https://www.qt.io/licensing/
+**
+** This file is part of the plugins of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and The Qt Company. For licensing terms
+** and conditions see https://www.qt.io/terms-conditions. For further
+** information use the contact form at https://www.qt.io/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 3 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL3 included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 3 requirements
+** will be met: https://www.gnu.org/licenses/lgpl-3.0.html.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 2.0 or (at your option) the GNU General
+** Public license version 3 or any later version approved by the KDE Free
+** Qt Foundation. The licenses are as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL2 and LICENSE.GPL3
+** included in the packaging of this file. Please review the following
+** information to ensure the GNU General Public License requirements will
+** be met: https://www.gnu.org/licenses/gpl-2.0.html and
+** https://www.gnu.org/licenses/gpl-3.0.html.
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+
+#include "qsharedimageloader_p.h"
+#include <private/qobject_p.h>
+#include <private/qimage_p.h>
+#include <QSharedMemory>
+
+
+QT_BEGIN_NAMESPACE
+
+Q_LOGGING_CATEGORY(lcSharedImage, "qt.quick.sharedimage");
+
+struct SharedImageHeader {
+ quint8 magic;
+ quint8 version;
+ quint16 offset;
+ qint32 width;
+ qint32 height;
+ qint32 bpl;
+ QImage::Format format;
+};
+Q_STATIC_ASSERT(sizeof(SharedImageHeader) % 4 == 0);
+
+#ifndef QT_NO_SHAREDMEMORY
+struct SharedImageInfo {
+ QString path;
+ QPointer<QSharedMemory> shmp;
+};
+
+void cleanupSharedImage(void *cleanupInfo)
+{
+ if (!cleanupInfo)
+ return;
+ SharedImageInfo *sii = static_cast<SharedImageInfo *>(cleanupInfo);
+ qCDebug(lcSharedImage) << "Cleanup called for" << sii->path;
+ if (sii->shmp.isNull()) {
+ qCDebug(lcSharedImage) << "shm is 0 for" << sii->path;
+ return;
+ }
+ QSharedMemory *shm = sii->shmp.data();
+ sii->shmp.clear();
+ delete shm; // destructor detaches
+ delete sii;
+}
+#else
+void cleanupSharedImage(void *) {}
+#endif
+
+class QSharedImageLoaderPrivate : public QObjectPrivate
+{
+ Q_DECLARE_PUBLIC(QSharedImageLoader)
+
+public:
+ QSharedImageLoaderPrivate()
+ : QObjectPrivate()
+ {}
+
+ QImage load(const QString &path, QSharedImageLoader::ImageParameters *params);
+
+ void storeImageToMem(void *data, const QImage &img);
+
+ bool verifyMem(const void *data, int size);
+
+ QImage createImageFromMem(const void *data, void *cleanupInfo);
+
+};
+
+
+void QSharedImageLoaderPrivate::storeImageToMem(void *data, const QImage &img)
+{
+ Q_ASSERT(data && !img.isNull());
+
+ SharedImageHeader *h = static_cast<SharedImageHeader *>(data);
+ h->magic = 'Q';
+ h->version = 1;
+ h->offset = sizeof(SharedImageHeader);
+ h->width = img.width();
+ h->height = img.height();
+ h->bpl = img.bytesPerLine();
+ h->format = img.format();
+
+ uchar *p = static_cast<uchar *>(data) + sizeof(SharedImageHeader);
+ memcpy(p, img.constBits(), img.byteCount());
+}
+
+
+bool QSharedImageLoaderPrivate::verifyMem(const void *data, int size)
+{
+ if (!data || size < int(sizeof(SharedImageHeader)))
+ return false;
+
+ const SharedImageHeader *h = static_cast<const SharedImageHeader *>(data);
+ if ((h->magic != 'Q')
+ || (h->version < 1)
+ || (h->offset < sizeof(SharedImageHeader))
+ || (h->width <= 0)
+ || (h->height <= 0)
+ || (h->bpl <= 0)
+ || (h->format <= QImage::Format_Invalid)
+ || (h->format >= QImage::NImageFormats)) {
+ return false;
+ }
+
+ int availSize = size - h->offset;
+ if (h->height * h->bpl > availSize)
+ return false;
+ if ((qt_depthForFormat(h->format) * h->width * h->height) > (8 * availSize))
+ return false;
+
+ return true;
+}
+
+
+QImage QSharedImageLoaderPrivate::createImageFromMem(const void *data, void *cleanupInfo)
+{
+ const SharedImageHeader *h = static_cast<const SharedImageHeader *>(data);
+ const uchar *p = static_cast<const uchar *>(data) + h->offset;
+
+ QImage img(p, h->width, h->height, h->bpl, h->format, cleanupSharedImage, cleanupInfo);
+ return img;
+}
+
+
+QImage QSharedImageLoaderPrivate::load(const QString &path, QSharedImageLoader::ImageParameters *params)
+{
+#ifndef QT_NO_SHAREDMEMORY
+ Q_Q(QSharedImageLoader);
+
+ QImage nil;
+ if (path.isEmpty())
+ return nil;
+
+ QScopedPointer<QSharedMemory> shm(new QSharedMemory(q->key(path, params)));
+ bool locked = false;
+
+ if (!shm->attach(QSharedMemory::ReadOnly)) {
+ QImage img = q->loadFile(path, params);
+ if (img.isNull())
+ return nil;
+ int size = sizeof(SharedImageHeader) + img.byteCount();
+ if (shm->create(size)) {
+ qCDebug(lcSharedImage) << "Created new shm segment of size" << size << "for image" << path;
+ if (!shm->lock()) {
+ qCDebug(lcSharedImage) << "Lock1 failed!?" << shm->errorString();
+ return nil;
+ }
+ locked = true;
+ storeImageToMem(shm->data(), img);
+ } else if (shm->error() == QSharedMemory::AlreadyExists) {
+ // race handling: other process may have created the share while
+ // we loaded the image, so try again to just attach
+ if (!shm->attach(QSharedMemory::ReadOnly)) {
+ qCDebug(lcSharedImage) << "Attach to existing failed?" << shm->errorString();
+ return nil;
+ }
+ } else {
+ qCDebug(lcSharedImage) << "Create failed?" << shm->errorString();
+ return nil;
+ }
+ }
+
+ Q_ASSERT(shm->isAttached());
+
+ if (!locked) {
+ if (!shm->lock()) {
+ qCDebug(lcSharedImage) << "Lock2 failed!?" << shm->errorString();
+ return nil;
+ }
+ locked = true;
+ }
+
+ if (!verifyMem(shm->constData(), shm->size())) {
+ qCDebug(lcSharedImage) << "Verifymem failed!?";
+ shm->unlock();
+ return nil;
+ }
+
+ QSharedMemory *shmp = shm.take();
+ SharedImageInfo *sii = new SharedImageInfo;
+ sii->path = path;
+ sii->shmp = shmp;
+ QImage shImg = createImageFromMem(shmp->constData(), sii);
+
+ if (!shmp->unlock()) {
+ qCDebug(lcSharedImage) << "UnLock failed!?";
+ }
+
+ return shImg;
+#else
+ Q_UNUSED(path);
+ Q_UNUSED(params);
+ return QImage();
+#endif
+}
+
+
+QSharedImageLoader::QSharedImageLoader(QObject *parent)
+ : QObject(*new QSharedImageLoaderPrivate, parent)
+{
+}
+
+QSharedImageLoader::~QSharedImageLoader()
+{
+}
+
+QImage QSharedImageLoader::load(const QString &path, ImageParameters *params)
+{
+ Q_D(QSharedImageLoader);
+
+ return d->load(path, params);
+}
+
+QImage QSharedImageLoader::loadFile(const QString &path, ImageParameters *params)
+{
+ Q_UNUSED(params);
+
+ return QImage(path);
+}
+
+QString QSharedImageLoader::key(const QString &path, ImageParameters *params)
+{
+ Q_UNUSED(params);
+
+ return path;
+}
+
+
+QT_END_NAMESPACE
diff --git a/src/imports/sharedimage/qsharedimageloader_p.h b/src/imports/sharedimage/qsharedimageloader_p.h
new file mode 100644
index 0000000000..afb50e5088
--- /dev/null
+++ b/src/imports/sharedimage/qsharedimageloader_p.h
@@ -0,0 +1,81 @@
+/****************************************************************************
+**
+** Copyright (C) 2017 The Qt Company Ltd.
+** Contact: https://www.qt.io/licensing/
+**
+** This file is part of the plugins of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and The Qt Company. For licensing terms
+** and conditions see https://www.qt.io/terms-conditions. For further
+** information use the contact form at https://www.qt.io/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 3 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL3 included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 3 requirements
+** will be met: https://www.gnu.org/licenses/lgpl-3.0.html.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 2.0 or (at your option) the GNU General
+** Public license version 3 or any later version approved by the KDE Free
+** Qt Foundation. The licenses are as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL2 and LICENSE.GPL3
+** included in the packaging of this file. Please review the following
+** information to ensure the GNU General Public License requirements will
+** be met: https://www.gnu.org/licenses/gpl-2.0.html and
+** https://www.gnu.org/licenses/gpl-3.0.html.
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+
+#ifndef QSHAREDIMAGELOADER_H
+#define QSHAREDIMAGELOADER_H
+
+#include <QImage>
+#include <QVariant>
+#include <QLoggingCategory>
+
+QT_BEGIN_NAMESPACE
+
+Q_DECLARE_LOGGING_CATEGORY(lcSharedImage);
+
+class QSharedImageLoaderPrivate;
+
+class QSharedImageLoader : public QObject
+{
+ Q_OBJECT
+ Q_DECLARE_PRIVATE(QSharedImageLoader)
+
+public:
+ enum ImageParameter {
+ OriginalSize = 0,
+ RequestedSize,
+ NumImageParameters
+ };
+ typedef QVector<QVariant> ImageParameters;
+
+ QSharedImageLoader(QObject *parent = Q_NULLPTR);
+ ~QSharedImageLoader();
+
+ QImage load(const QString &path, ImageParameters *params = Q_NULLPTR);
+
+protected:
+ virtual QImage loadFile(const QString &path, ImageParameters *params);
+ virtual QString key(const QString &path, ImageParameters *params);
+
+private:
+ Q_DISABLE_COPY(QSharedImageLoader)
+};
+
+QT_END_NAMESPACE
+
+#endif // QSHAREDIMAGELOADER_H
diff --git a/src/imports/sharedimage/sharedimage.pro b/src/imports/sharedimage/sharedimage.pro
new file mode 100644
index 0000000000..523de66ac1
--- /dev/null
+++ b/src/imports/sharedimage/sharedimage.pro
@@ -0,0 +1,17 @@
+CXX_MODULE = qml
+TARGET = sharedimageplugin
+TARGETPATH = QtQuick/SharedImage
+IMPORT_VERSION = 1.0
+
+QT *= quick qml gui-private core-private
+
+SOURCES += \
+ plugin.cpp \
+ sharedimageprovider.cpp \
+ qsharedimageloader.cpp
+
+HEADERS += \
+ sharedimageprovider.h \
+ qsharedimageloader_p.h
+
+load(qml_plugin)
diff --git a/src/imports/sharedimage/sharedimageprovider.cpp b/src/imports/sharedimage/sharedimageprovider.cpp
new file mode 100644
index 0000000000..2dd3a130e9
--- /dev/null
+++ b/src/imports/sharedimage/sharedimageprovider.cpp
@@ -0,0 +1,156 @@
+/****************************************************************************
+**
+** Copyright (C) 2017 The Qt Company Ltd.
+** Contact: https://www.qt.io/licensing/
+**
+** This file is part of the plugins of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and The Qt Company. For licensing terms
+** and conditions see https://www.qt.io/terms-conditions. For further
+** information use the contact form at https://www.qt.io/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 3 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL3 included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 3 requirements
+** will be met: https://www.gnu.org/licenses/lgpl-3.0.html.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 2.0 or (at your option) the GNU General
+** Public license version 3 or any later version approved by the KDE Free
+** Qt Foundation. The licenses are as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL2 and LICENSE.GPL3
+** included in the packaging of this file. Please review the following
+** information to ensure the GNU General Public License requirements will
+** be met: https://www.gnu.org/licenses/gpl-2.0.html and
+** https://www.gnu.org/licenses/gpl-3.0.html.
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+
+#include <sharedimageprovider.h>
+#include <qsharedimageloader_p.h>
+#include <qquickimageprovider.h>
+#include <private/qimage_p.h>
+#include <QImageReader>
+#include <QFileInfo>
+#include <QDir>
+
+class QuickSharedImageLoader : public QSharedImageLoader
+{
+ Q_OBJECT
+ friend class SharedImageProvider;
+
+public:
+ QuickSharedImageLoader(QObject *parent = Q_NULLPTR)
+ : QSharedImageLoader(parent)
+ {
+ }
+
+protected:
+ QImage loadFile(const QString &path, ImageParameters *params) override
+ {
+ QImageReader imgio(path);
+ QSize realSize = imgio.size();
+ QSize requestSize = params ? params->value(RequestedSize).toSize() : QSize();
+
+ // Following qquickpixmapcache's readImage, from here...
+ const bool force_scale = imgio.format() == "svg" || imgio.format() == "svgz";
+
+ if (requestSize.width() > 0 || requestSize.height() > 0) {
+ QSize s = realSize;
+ qreal ratio = 0.0;
+ if (requestSize.width() && (force_scale || requestSize.width() < s.width())) {
+ ratio = qreal(requestSize.width())/s.width();
+ }
+ if (requestSize.height() && (force_scale || requestSize.height() < s.height())) {
+ qreal hr = qreal(requestSize.height())/s.height();
+ if (ratio == 0.0 || hr < ratio)
+ ratio = hr;
+ }
+ if (ratio > 0.0) {
+ s.setHeight(qRound(s.height() * ratio));
+ s.setWidth(qRound(s.width() * ratio));
+ imgio.setScaledSize(s);
+ }
+ }
+ // ... to here
+
+ QImage image;
+ if (imgio.read(&image)) {
+ if (realSize.isEmpty())
+ realSize = image.size();
+ // Make sure we have acceptable format for texture uploader, or it will convert & lose sharing
+ // This mimics the testing & conversion normally done by the quick pixmapcache & texturefactory
+ if (image.format() != QImage::Format_RGB32 && image.format() != QImage::Format_ARGB32_Premultiplied) {
+ QImage::Format newFmt = QImage::Format_RGB32;
+ if (image.hasAlphaChannel() && image.data_ptr()->checkForAlphaPixels())
+ newFmt = QImage::Format_ARGB32_Premultiplied;
+ qCDebug(lcSharedImage) << "Convert on load from format" << image.format() << "to" << newFmt;
+ image = image.convertToFormat(newFmt);
+ }
+ }
+
+ if (params && params->count() > OriginalSize)
+ params->replace(OriginalSize, realSize);
+
+ return image;
+ }
+
+ QString key(const QString &path, ImageParameters *params) override
+ {
+ QSize reqSz = params->value(RequestedSize).toSize();
+ if (!reqSz.isValid())
+ return path;
+
+ QString key = path + QStringLiteral("_%1x%2").arg(reqSz.width()).arg(reqSz.height());
+ qCDebug(lcSharedImage) << "KEY:" << key;
+ return key;
+ }
+};
+
+
+SharedImageProvider::SharedImageProvider()
+ : QQuickImageProvider(QQuickImageProvider::Image), loader(new QuickSharedImageLoader)
+{
+}
+
+QImage SharedImageProvider::requestImage(const QString &id, QSize *size, const QSize &requestedSize)
+{
+ QFileInfo fi(QDir::root(), id);
+ QString path = fi.canonicalFilePath();
+ if (path.isEmpty())
+ return QImage();
+
+ QSharedImageLoader::ImageParameters params(QSharedImageLoader::NumImageParameters);
+ params[QSharedImageLoader::RequestedSize].setValue(requestedSize);
+
+ QImage img = loader->load(path, &params);
+ if (img.isNull()) {
+ // May be sharing problem, fall back to normal local load
+ img = loader->loadFile(path, &params);
+ if (!img.isNull())
+ qCWarning(lcSharedImage) << "Sharing problem; loading" << id << "unshared";
+ }
+
+ //... QSize realSize = params.value(QSharedImageLoader::OriginalSize).toSize();
+ // quickpixmapcache's readImage() reports back the original size, prior to requestedSize scaling, in the *size
+ // parameter. That value is currently ignored by quick however, which only cares about the present size of the
+ // returned image. So handling and sharing of info on pre-scaled size is currently not implemented.
+ if (size) {
+ *size = img.size();
+ }
+
+ return img;
+}
+
+#include "sharedimageprovider.moc"
diff --git a/src/imports/sharedimage/sharedimageprovider.h b/src/imports/sharedimage/sharedimageprovider.h
new file mode 100644
index 0000000000..a2f6b6ef2f
--- /dev/null
+++ b/src/imports/sharedimage/sharedimageprovider.h
@@ -0,0 +1,58 @@
+/****************************************************************************
+**
+** Copyright (C) 2017 The Qt Company Ltd.
+** Contact: https://www.qt.io/licensing/
+**
+** This file is part of the plugins of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and The Qt Company. For licensing terms
+** and conditions see https://www.qt.io/terms-conditions. For further
+** information use the contact form at https://www.qt.io/contact-us.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 3 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL3 included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 3 requirements
+** will be met: https://www.gnu.org/licenses/lgpl-3.0.html.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 2.0 or (at your option) the GNU General
+** Public license version 3 or any later version approved by the KDE Free
+** Qt Foundation. The licenses are as published by the Free Software
+** Foundation and appearing in the file LICENSE.GPL2 and LICENSE.GPL3
+** included in the packaging of this file. Please review the following
+** information to ensure the GNU General Public License requirements will
+** be met: https://www.gnu.org/licenses/gpl-2.0.html and
+** https://www.gnu.org/licenses/gpl-3.0.html.
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+
+#ifndef SHAREDIMAGEPROVIDER_H
+#define SHAREDIMAGEPROVIDER_H
+
+#include <QQuickImageProvider>
+#include <QScopedPointer>
+
+class QuickSharedImageLoader;
+
+class SharedImageProvider : public QQuickImageProvider
+{
+public:
+ SharedImageProvider();
+
+ QImage requestImage(const QString &id, QSize *size, const QSize &requestedSize) override;
+
+protected:
+ QScopedPointer<QuickSharedImageLoader> loader;
+};
+#endif // SHAREDIMAGEPROVIDER_H
diff --git a/src/plugins/scenegraph/openvg/qsgopenvgglyphnode.cpp b/src/plugins/scenegraph/openvg/qsgopenvgglyphnode.cpp
index 8be2a97034..9cf4184c20 100644
--- a/src/plugins/scenegraph/openvg/qsgopenvgglyphnode.cpp
+++ b/src/plugins/scenegraph/openvg/qsgopenvgglyphnode.cpp
@@ -43,6 +43,7 @@
#include "qsgopenvghelpers.h"
#include "qsgopenvgfontglyphcache.h"
#include "qopenvgoffscreensurface.h"
+#include <cmath>
QT_BEGIN_NAMESPACE
@@ -143,7 +144,7 @@ void QSGOpenVGGlyphNode::render()
vgLoadMatrix(transform().constData());
} else {
vgLoadIdentity();
- offscreenSurface = new QOpenVGOffscreenSurface(QSize(ceil(m_bounding_rect.width()), ceil(m_bounding_rect.height())));
+ offscreenSurface = new QOpenVGOffscreenSurface(QSize(std::ceil(m_bounding_rect.width()), std::ceil(m_bounding_rect.height())));
offscreenSurface->makeCurrent();
}
diff --git a/src/plugins/scenegraph/openvg/qsgopenvginternalrectanglenode.cpp b/src/plugins/scenegraph/openvg/qsgopenvginternalrectanglenode.cpp
index be437303bc..0bd51cbf46 100644
--- a/src/plugins/scenegraph/openvg/qsgopenvginternalrectanglenode.cpp
+++ b/src/plugins/scenegraph/openvg/qsgopenvginternalrectanglenode.cpp
@@ -39,7 +39,7 @@
#include "qsgopenvginternalrectanglenode.h"
#include "qsgopenvghelpers.h"
-
+#include <cmath>
#include <VG/vgu.h>
QSGOpenVGInternalRectangleNode::QSGOpenVGInternalRectangleNode()
@@ -207,9 +207,9 @@ void QSGOpenVGInternalRectangleNode::render()
vgLoadIdentity();
if (m_radius > 0) {
// Fallback to rendering to an image for rounded rects with perspective transforms
- if (m_offscreenSurface == nullptr || m_offscreenSurface->size() != QSize(ceil(m_rect.width()), ceil(m_rect.height()))) {
+ if (m_offscreenSurface == nullptr || m_offscreenSurface->size() != QSize(std::ceil(m_rect.width()), std::ceil(m_rect.height()))) {
delete m_offscreenSurface;
- m_offscreenSurface = new QOpenVGOffscreenSurface(QSize(ceil(m_rect.width()), ceil(m_rect.height())));
+ m_offscreenSurface = new QOpenVGOffscreenSurface(QSize(std::ceil(m_rect.width()), std::ceil(m_rect.height())));
}
m_offscreenSurface->makeCurrent();
diff --git a/src/qml/compiler/compiler.pri b/src/qml/compiler/compiler.pri
index 1de5dfa6fa..fa66d3a6e3 100644
--- a/src/qml/compiler/compiler.pri
+++ b/src/qml/compiler/compiler.pri
@@ -39,7 +39,10 @@ SOURCES += \
unix: SOURCES += $$PWD/qv4compilationunitmapper_unix.cpp
else: SOURCES += $$PWD/qv4compilationunitmapper_win.cpp
-qtConfig(qml-interpreter) {
+qtConfig(private_tests): LIBS_PRIVATE += $$QMAKE_LIBS_DYNLOAD
+}
+
+qmldevtools_build|qtConfig(qml-interpreter) {
HEADERS += \
$$PWD/qv4instr_moth_p.h \
$$PWD/qv4isel_moth_p.h
@@ -48,6 +51,3 @@ qtConfig(qml-interpreter) {
$$PWD/qv4isel_moth.cpp
}
-
-qtConfig(private_tests): LIBS_PRIVATE += $$QMAKE_LIBS_DYNLOAD
-}
diff --git a/src/qml/compiler/qqmlirbuilder.cpp b/src/qml/compiler/qqmlirbuilder.cpp
index 54d0cb4f46..8a7507c92e 100644
--- a/src/qml/compiler/qqmlirbuilder.cpp
+++ b/src/qml/compiler/qqmlirbuilder.cpp
@@ -2061,4 +2061,156 @@ QQmlPropertyData *PropertyResolver::signal(const QString &name, bool *notInRevis
return 0;
}
+IRLoader::IRLoader(const QV4::CompiledData::Unit *qmlData, QmlIR::Document *output)
+ : unit(qmlData)
+ , output(output)
+{
+ pool = output->jsParserEngine.pool();
+}
+
+void IRLoader::load()
+{
+ output->jsGenerator.stringTable.clear();
+ for (uint i = 0; i < unit->stringTableSize; ++i)
+ output->jsGenerator.stringTable.registerString(unit->stringAt(i));
+
+ for (quint32 i = 0; i < unit->nImports; ++i)
+ output->imports << unit->importAt(i);
+
+ if (unit->flags & QV4::CompiledData::Unit::IsSingleton) {
+ QmlIR::Pragma *p = New<QmlIR::Pragma>();
+ p->location = QV4::CompiledData::Location();
+ p->type = QmlIR::Pragma::PragmaSingleton;
+ output->pragmas << p;
+ }
+
+ output->indexOfRootObject = unit->indexOfRootObject;
+
+ for (uint i = 0; i < unit->nObjects; ++i) {
+ const QV4::CompiledData::Object *serializedObject = unit->objectAt(i);
+ QmlIR::Object *object = loadObject(serializedObject);
+ output->objects.append(object);
+ }
+}
+
+struct FakeExpression : public QQmlJS::AST::NullExpression
+{
+ FakeExpression(int start, int length)
+ : location(start, length)
+ {}
+
+ virtual QQmlJS::AST::SourceLocation firstSourceLocation() const
+ { return location; }
+
+ virtual QQmlJS::AST::SourceLocation lastSourceLocation() const
+ { return location; }
+
+private:
+ QQmlJS::AST::SourceLocation location;
+};
+
+QmlIR::Object *IRLoader::loadObject(const QV4::CompiledData::Object *serializedObject)
+{
+ QmlIR::Object *object = pool->New<QmlIR::Object>();
+ object->init(pool, serializedObject->inheritedTypeNameIndex, serializedObject->idNameIndex);
+
+ object->indexOfDefaultPropertyOrAlias = serializedObject->indexOfDefaultPropertyOrAlias;
+ object->defaultPropertyIsAlias = serializedObject->defaultPropertyIsAlias;
+
+ object->location = serializedObject->location;
+ object->locationOfIdProperty = serializedObject->locationOfIdProperty;
+
+ QVector<int> functionIndices;
+ functionIndices.reserve(serializedObject->nFunctions + serializedObject->nBindings / 2);
+
+ for (uint i = 0; i < serializedObject->nBindings; ++i) {
+ QmlIR::Binding *b = pool->New<QmlIR::Binding>();
+ *static_cast<QV4::CompiledData::Binding*>(b) = serializedObject->bindingTable()[i];
+ object->bindings->append(b);
+ if (b->type == QV4::CompiledData::Binding::Type_Script) {
+ functionIndices.append(b->value.compiledScriptIndex);
+ b->value.compiledScriptIndex = functionIndices.count() - 1;
+
+ QmlIR::CompiledFunctionOrExpression *foe = pool->New<QmlIR::CompiledFunctionOrExpression>();
+ foe->disableAcceleratedLookups = true;
+ foe->nameIndex = 0;
+
+ QQmlJS::AST::ExpressionNode *expr;
+
+ if (b->stringIndex != quint32(0)) {
+ const int start = output->code.length();
+ const QString script = output->stringAt(b->stringIndex);
+ const int length = script.length();
+ output->code.append(script);
+ expr = new (pool) FakeExpression(start, length);
+ } else
+ expr = new (pool) QQmlJS::AST::NullExpression();
+ foe->node = new (pool) QQmlJS::AST::ExpressionStatement(expr); // dummy
+ object->functionsAndExpressions->append(foe);
+ }
+ }
+
+ Q_ASSERT(object->functionsAndExpressions->count == functionIndices.count());
+
+ for (uint i = 0; i < serializedObject->nSignals; ++i) {
+ const QV4::CompiledData::Signal *serializedSignal = serializedObject->signalAt(i);
+ QmlIR::Signal *s = pool->New<QmlIR::Signal>();
+ s->nameIndex = serializedSignal->nameIndex;
+ s->location = serializedSignal->location;
+ s->parameters = pool->New<QmlIR::PoolList<QmlIR::SignalParameter> >();
+
+ for (uint i = 0; i < serializedSignal->nParameters; ++i) {
+ QmlIR::SignalParameter *p = pool->New<QmlIR::SignalParameter>();
+ *static_cast<QV4::CompiledData::Parameter*>(p) = *serializedSignal->parameterAt(i);
+ s->parameters->append(p);
+ }
+
+ object->qmlSignals->append(s);
+ }
+
+ const QV4::CompiledData::Property *serializedProperty = serializedObject->propertyTable();
+ for (uint i = 0; i < serializedObject->nProperties; ++i, ++serializedProperty) {
+ QmlIR::Property *p = pool->New<QmlIR::Property>();
+ *static_cast<QV4::CompiledData::Property*>(p) = *serializedProperty;
+ object->properties->append(p);
+ }
+
+ QQmlJS::Engine *jsParserEngine = &output->jsParserEngine;
+
+ const QV4::CompiledData::LEUInt32 *functionIdx = serializedObject->functionOffsetTable();
+ for (uint i = 0; i < serializedObject->nFunctions; ++i, ++functionIdx) {
+ QmlIR::Function *f = pool->New<QmlIR::Function>();
+ const QV4::CompiledData::Function *compiledFunction = unit->functionAt(*functionIdx);
+
+ functionIndices.append(*functionIdx);
+ f->index = functionIndices.count() - 1;
+ f->location = compiledFunction->location;
+ f->nameIndex = compiledFunction->nameIndex;
+
+ QQmlJS::AST::FormalParameterList *paramList = 0;
+ const QV4::CompiledData::LEUInt32 *formalNameIdx = compiledFunction->formalsTable();
+ for (uint i = 0; i < compiledFunction->nFormals; ++i, ++formalNameIdx) {
+ const QString formal = unit->stringAt(*formalNameIdx);
+ QStringRef paramNameRef = jsParserEngine->newStringRef(formal);
+
+ if (paramList)
+ paramList = new (pool) QQmlJS::AST::FormalParameterList(paramList, paramNameRef);
+ else
+ paramList = new (pool) QQmlJS::AST::FormalParameterList(paramNameRef);
+ }
+
+ if (paramList)
+ paramList = paramList->finish();
+
+ const QString name = unit->stringAt(compiledFunction->nameIndex);
+ f->functionDeclaration = new(pool) QQmlJS::AST::FunctionDeclaration(jsParserEngine->newStringRef(name), paramList, /*body*/0);
+
+ object->functions->append(f);
+ }
+
+ object->runtimeFunctionIndices.allocate(pool, functionIndices);
+
+ return object;
+}
+
#endif // V4_BOOTSTRAP
diff --git a/src/qml/compiler/qqmlirbuilder_p.h b/src/qml/compiler/qqmlirbuilder_p.h
index 95756845c3..2022112e07 100644
--- a/src/qml/compiler/qqmlirbuilder_p.h
+++ b/src/qml/compiler/qqmlirbuilder_p.h
@@ -622,6 +622,21 @@ private:
int _importedScriptsTemp;
};
+struct IRLoader {
+ IRLoader(const QV4::CompiledData::Unit *unit, QmlIR::Document *output);
+
+ void load();
+
+private:
+ QmlIR::Object *loadObject(const QV4::CompiledData::Object *serializedObject);
+
+ template <typename _Tp> _Tp *New() { return pool->New<_Tp>(); }
+
+ const QV4::CompiledData::Unit *unit;
+ QmlIR::Document *output;
+ QQmlJS::MemoryPool *pool;
+};
+
} // namespace QmlIR
QT_END_NAMESPACE
diff --git a/src/qml/compiler/qv4codegen.cpp b/src/qml/compiler/qv4codegen.cpp
index 7d3ad38f97..b5b70dbf45 100644
--- a/src/qml/compiler/qv4codegen.cpp
+++ b/src/qml/compiler/qv4codegen.cpp
@@ -216,6 +216,10 @@ bool Codegen::ScanFunctions::visit(VariableDeclaration *ast)
checkName(ast->name, ast->identifierToken);
if (ast->name == QLatin1String("arguments"))
_env->usesArgumentsObject = Environment::ArgumentsObjectNotUsed;
+ if (ast->readOnly && !ast->expression) {
+ _cg->throwSyntaxError(ast->identifierToken, QStringLiteral("Missing initializer in const declaration"));
+ return false;
+ }
_env->enter(ast->name.toString(), ast->expression ? Environment::VariableDefinition : Environment::VariableDeclaration);
return true;
}
diff --git a/src/qml/compiler/qv4compileddata.cpp b/src/qml/compiler/qv4compileddata.cpp
index 25b090f555..8f8d374e24 100644
--- a/src/qml/compiler/qv4compileddata.cpp
+++ b/src/qml/compiler/qv4compileddata.cpp
@@ -52,7 +52,6 @@
#include "qv4compilationunitmapper_p.h"
#include <QQmlPropertyMap>
#include <QDateTime>
-#include <QSaveFile>
#include <QFile>
#include <QFileInfo>
#include <QScopedValueRollback>
@@ -62,6 +61,7 @@
#include <private/qqmlirbuilder_p.h>
#include <QCoreApplication>
#include <QCryptographicHash>
+#include <QSaveFile>
#include <algorithm>
@@ -77,6 +77,27 @@ namespace QV4 {
namespace CompiledData {
+#ifdef V4_BOOTSTRAP
+static QString cacheFilePath(const QString &localSourcePath)
+{
+ const QString localCachePath = localSourcePath + QLatin1Char('c');
+ return localCachePath;
+}
+#else
+static QString cacheFilePath(const QUrl &url)
+{
+ const QString localSourcePath = QQmlFile::urlToLocalFileOrQrc(url);
+ const QString localCachePath = localSourcePath + QLatin1Char('c');
+ if (QFileInfo(QFileInfo(localSourcePath).dir().absolutePath()).isWritable())
+ return localCachePath;
+ QCryptographicHash fileNameHash(QCryptographicHash::Sha1);
+ fileNameHash.addData(localSourcePath.toUtf8());
+ QString directory = QStandardPaths::writableLocation(QStandardPaths::CacheLocation) + QLatin1String("/qmlcache/");
+ QDir::root().mkpath(directory);
+ return directory + QString::fromUtf8(fileNameHash.result().toHex()) + QLatin1Char('.') + QFileInfo(localCachePath).completeSuffix();
+}
+#endif
+
#ifndef V4_BOOTSTRAP
CompilationUnit::CompilationUnit()
: data(0)
@@ -329,20 +350,68 @@ bool CompilationUnit::verifyChecksum(QQmlEngine *engine,
sizeof(data->dependencyMD5Checksum)) == 0;
}
-static QString cacheFilePath(const QUrl &url)
+bool CompilationUnit::loadFromDisk(const QUrl &url, EvalISelFactory *iselFactory, QString *errorString)
{
- const QString localSourcePath = QQmlFile::urlToLocalFileOrQrc(url);
- const QString localCachePath = localSourcePath + QLatin1Char('c');
- if (QFileInfo(QFileInfo(localSourcePath).dir().absolutePath()).isWritable())
- return localCachePath;
- QCryptographicHash fileNameHash(QCryptographicHash::Sha1);
- fileNameHash.addData(localSourcePath.toUtf8());
- QString directory = QStandardPaths::writableLocation(QStandardPaths::CacheLocation) + QLatin1String("/qmlcache/");
- QDir::root().mkpath(directory);
- return directory + QString::fromUtf8(fileNameHash.result().toHex()) + QLatin1Char('.') + QFileInfo(localCachePath).completeSuffix();
+ if (!QQmlFile::isLocalFile(url)) {
+ *errorString = QStringLiteral("File has to be a local file.");
+ return false;
+ }
+
+ const QString sourcePath = QQmlFile::urlToLocalFileOrQrc(url);
+ QScopedPointer<CompilationUnitMapper> cacheFile(new CompilationUnitMapper());
+
+ CompiledData::Unit *mappedUnit = cacheFile->open(cacheFilePath(url), sourcePath, errorString);
+ if (!mappedUnit)
+ return false;
+
+ const Unit * const oldDataPtr = (data && !(data->flags & QV4::CompiledData::Unit::StaticData)) ? data : nullptr;
+ QScopedValueRollback<const Unit *> dataPtrChange(data, mappedUnit);
+
+ if (sourcePath != QQmlFile::urlToLocalFileOrQrc(stringAt(data->sourceFileIndex))) {
+ *errorString = QStringLiteral("QML source file has moved to a different location.");
+ return false;
+ }
+
+ {
+ const QString foundArchitecture = stringAt(data->architectureIndex);
+ const QString expectedArchitecture = QSysInfo::buildAbi();
+ if (foundArchitecture != expectedArchitecture) {
+ *errorString = QString::fromUtf8("Architecture mismatch. Found %1 expected %2").arg(foundArchitecture).arg(expectedArchitecture);
+ return false;
+ }
+ }
+
+ {
+ const QString foundCodeGenerator = stringAt(data->codeGeneratorIndex);
+ const QString expectedCodeGenerator = iselFactory->codeGeneratorName;
+ if (foundCodeGenerator != expectedCodeGenerator) {
+ *errorString = QString::fromUtf8("Code generator mismatch. Found code generated by %1 but expected %2").arg(foundCodeGenerator).arg(expectedCodeGenerator);
+ return false;
+ }
+ }
+
+ if (!memoryMapCode(errorString))
+ return false;
+
+ dataPtrChange.commit();
+ free(const_cast<Unit*>(oldDataPtr));
+ backingFile.reset(cacheFile.take());
+ return true;
+}
+
+bool CompilationUnit::memoryMapCode(QString *errorString)
+{
+ *errorString = QStringLiteral("Missing code mapping backend");
+ return false;
}
+#endif // V4_BOOTSTRAP
+
+#if defined(V4_BOOTSTRAP)
+bool CompilationUnit::saveToDisk(const QString &unitUrl, QString *errorString)
+#else
bool CompilationUnit::saveToDisk(const QUrl &unitUrl, QString *errorString)
+#endif
{
errorString->clear();
@@ -351,10 +420,12 @@ bool CompilationUnit::saveToDisk(const QUrl &unitUrl, QString *errorString)
return false;
}
+#if !defined(V4_BOOTSTRAP)
if (!QQmlFile::isLocalFile(unitUrl)) {
*errorString = QStringLiteral("File has to be a local file.");
return false;
}
+#endif
// Foo.qml -> Foo.qmlc
QSaveFile cacheFile(cacheFilePath(unitUrl));
@@ -390,78 +461,105 @@ bool CompilationUnit::saveToDisk(const QUrl &unitUrl, QString *errorString)
return true;
}
-bool CompilationUnit::loadFromDisk(const QUrl &url, EvalISelFactory *iselFactory, QString *errorString)
+void CompilationUnit::prepareCodeOffsetsForDiskStorage(Unit *unit)
{
- if (!QQmlFile::isLocalFile(url)) {
- *errorString = QStringLiteral("File has to be a local file.");
- return false;
- }
+ Q_UNUSED(unit);
+}
- const QString sourcePath = QQmlFile::urlToLocalFileOrQrc(url);
- QScopedPointer<CompilationUnitMapper> cacheFile(new CompilationUnitMapper());
+bool CompilationUnit::saveCodeToDisk(QIODevice *device, const Unit *unit, QString *errorString)
+{
+ Q_UNUSED(device);
+ Q_UNUSED(unit);
+ *errorString = QStringLiteral("Saving code to disk is not supported in this configuration");
+ return false;
+}
- CompiledData::Unit *mappedUnit = cacheFile->open(cacheFilePath(url), sourcePath, errorString);
- if (!mappedUnit)
- return false;
+Unit *CompilationUnit::createUnitData(QmlIR::Document *irDocument)
+{
+ if (!irDocument->javaScriptCompilationUnit->data)
+ return irDocument->jsGenerator.generateUnit(QV4::Compiler::JSUnitGenerator::GenerateWithoutStringTable);
- const Unit * const oldDataPtr = (data && !(data->flags & QV4::CompiledData::Unit::StaticData)) ? data : nullptr;
- QScopedValueRollback<const Unit *> dataPtrChange(data, mappedUnit);
+ QQmlRefPointer<QV4::CompiledData::CompilationUnit> compilationUnit = irDocument->javaScriptCompilationUnit;
+ QV4::CompiledData::Unit *jsUnit = const_cast<QV4::CompiledData::Unit*>(irDocument->javaScriptCompilationUnit->data);
- if (sourcePath != QQmlFile::urlToLocalFileOrQrc(stringAt(data->sourceFileIndex))) {
- *errorString = QStringLiteral("QML source file has moved to a different location.");
- return false;
- }
+ QV4::Compiler::StringTableGenerator &stringTable = irDocument->jsGenerator.stringTable;
- {
- const QString foundArchitecture = stringAt(data->architectureIndex);
- const QString expectedArchitecture = QSysInfo::buildAbi();
- if (foundArchitecture != expectedArchitecture) {
- *errorString = QString::fromUtf8("Architecture mismatch. Found %1 expected %2").arg(foundArchitecture).arg(expectedArchitecture);
- return false;
+ // Collect signals that have had a change in signature (from onClicked to onClicked(mouse) for example)
+ // and now need fixing in the QV4::CompiledData. Also register strings at the same time, to finalize
+ // the string table.
+ QVector<quint32> changedSignals;
+ QVector<QQmlJS::AST::FormalParameterList*> changedSignalParameters;
+ for (QmlIR::Object *o: qAsConst(irDocument->objects)) {
+ for (QmlIR::Binding *binding = o->firstBinding(); binding; binding = binding->next) {
+ if (!(binding->flags & QV4::CompiledData::Binding::IsSignalHandlerExpression))
+ continue;
+
+ quint32 functionIndex = binding->value.compiledScriptIndex;
+ QmlIR::CompiledFunctionOrExpression *foe = o->functionsAndExpressions->slowAt(functionIndex);
+ if (!foe)
+ continue;
+
+ // save absolute index
+ changedSignals << o->runtimeFunctionIndices.at(functionIndex);
+
+ Q_ASSERT(foe->node);
+ Q_ASSERT(QQmlJS::AST::cast<QQmlJS::AST::FunctionDeclaration*>(foe->node));
+
+ QQmlJS::AST::FormalParameterList *parameters = QQmlJS::AST::cast<QQmlJS::AST::FunctionDeclaration*>(foe->node)->formals;
+ changedSignalParameters << parameters;
+
+ for (; parameters; parameters = parameters->next)
+ stringTable.registerString(parameters->name.toString());
}
}
- {
- const QString foundCodeGenerator = stringAt(data->codeGeneratorIndex);
- const QString expectedCodeGenerator = iselFactory->codeGeneratorName;
- if (foundCodeGenerator != expectedCodeGenerator) {
- *errorString = QString::fromUtf8("Code generator mismatch. Found code generated by %1 but expected %2").arg(foundCodeGenerator).arg(expectedCodeGenerator);
- return false;
+ QVector<quint32> signalParameterNameTable;
+ quint32 signalParameterNameTableOffset = jsUnit->unitSize;
+
+ // Update signal signatures
+ if (!changedSignals.isEmpty()) {
+ if (jsUnit == compilationUnit->data) {
+ char *unitCopy = (char*)malloc(jsUnit->unitSize);
+ memcpy(unitCopy, jsUnit, jsUnit->unitSize);
+ jsUnit = reinterpret_cast<QV4::CompiledData::Unit*>(unitCopy);
}
- }
- if (!memoryMapCode(errorString))
- return false;
+ for (int i = 0; i < changedSignals.count(); ++i) {
+ const uint functionIndex = changedSignals.at(i);
+ // The data is now read-write due to the copy above, so the const_cast is ok.
+ QV4::CompiledData::Function *function = const_cast<QV4::CompiledData::Function *>(jsUnit->functionAt(functionIndex));
+ Q_ASSERT(function->nFormals == quint32(0));
- dataPtrChange.commit();
- free(const_cast<Unit*>(oldDataPtr));
- backingFile.reset(cacheFile.take());
- return true;
-}
+ function->formalsOffset = signalParameterNameTableOffset - jsUnit->functionOffsetTable()[functionIndex];
-void CompilationUnit::prepareCodeOffsetsForDiskStorage(Unit *unit)
-{
- Q_UNUSED(unit);
-}
+ for (QQmlJS::AST::FormalParameterList *parameters = changedSignalParameters.at(i);
+ parameters; parameters = parameters->next) {
+ signalParameterNameTable.append(stringTable.getStringId(parameters->name.toString()));
+ function->nFormals = function->nFormals + 1;
+ }
-bool CompilationUnit::saveCodeToDisk(QIODevice *device, const Unit *unit, QString *errorString)
-{
- Q_UNUSED(device);
- Q_UNUSED(unit);
- *errorString = QStringLiteral("Saving code to disk is not supported in this configuration");
- return false;
-}
+ // Hack to ensure an activation is created.
+ function->flags |= QV4::CompiledData::Function::HasCatchOrWith | QV4::CompiledData::Function::HasDirectEval;
-bool CompilationUnit::memoryMapCode(QString *errorString)
-{
- *errorString = QStringLiteral("Missing code mapping backend");
- return false;
-}
-#endif // V4_BOOTSTRAP
+ signalParameterNameTableOffset += function->nFormals * sizeof(quint32);
+ }
+ }
-Unit *CompilationUnit::createUnitData(QmlIR::Document *irDocument)
-{
- return irDocument->jsGenerator.generateUnit(QV4::Compiler::JSUnitGenerator::GenerateWithoutStringTable);
+ if (!signalParameterNameTable.isEmpty()) {
+ Q_ASSERT(jsUnit != compilationUnit->data);
+ const uint signalParameterTableSize = signalParameterNameTable.count() * sizeof(quint32);
+ uint newSize = jsUnit->unitSize + signalParameterTableSize;
+ const uint oldSize = jsUnit->unitSize;
+ char *unitWithSignalParameters = (char*)realloc(jsUnit, newSize);
+ memcpy(unitWithSignalParameters + oldSize, signalParameterNameTable.constData(), signalParameterTableSize);
+ jsUnit = reinterpret_cast<QV4::CompiledData::Unit*>(unitWithSignalParameters);
+ jsUnit->unitSize = newSize;
+ }
+
+ if (jsUnit != compilationUnit->data)
+ jsUnit->flags &= ~QV4::CompiledData::Unit::StaticData;
+
+ return jsUnit;
}
QString Binding::valueAsString(const Unit *unit) const
@@ -624,7 +722,7 @@ static QByteArray ownLibraryChecksum()
if (checksumInitialized)
return libraryChecksum;
checksumInitialized = true;
-#if defined(Q_OS_UNIX) && !defined(QT_NO_DYNAMIC_CAST)
+#if defined(Q_OS_UNIX) && !defined(QT_NO_DYNAMIC_CAST) && !defined(Q_OS_INTEGRITY)
Dl_info libInfo;
if (dladdr(reinterpret_cast<const void *>(&ownLibraryChecksum), &libInfo) != 0) {
QFile library(QFile::decodeName(libInfo.dli_fname));
diff --git a/src/qml/compiler/qv4compileddata_p.h b/src/qml/compiler/qv4compileddata_p.h
index 97623b4d86..13a0c4b075 100644
--- a/src/qml/compiler/qv4compileddata_p.h
+++ b/src/qml/compiler/qv4compileddata_p.h
@@ -71,7 +71,7 @@
QT_BEGIN_NAMESPACE
// Bump this whenever the compiler data structures change in an incompatible way.
-#define QV4_DATA_STRUCTURE_VERSION 0x08
+#define QV4_DATA_STRUCTURE_VERSION 0x09
class QIODevice;
class QQmlPropertyCache;
@@ -133,7 +133,7 @@ struct Location
QJsonPrivate::qle_bitfield<20, 12> column;
};
- Location() { line = 0; column = 0; }
+ Location() { line.val = 0; column.val = 0; }
inline bool operator<(const Location &other) const {
return line < other.line ||
@@ -153,7 +153,7 @@ struct RegExp
QJsonPrivate::qle_bitfield<4, 28> stringIndex;
};
- RegExp() { flags = 0; stringIndex = 0; }
+ RegExp() { flags.val = 0; stringIndex.val = 0; }
};
struct Lookup
@@ -171,7 +171,7 @@ struct Lookup
QJsonPrivate::qle_bitfield<4, 28> nameIndex;
};
- Lookup() { type_and_flags = 0; nameIndex = 0; }
+ Lookup() { type_and_flags.val = 0; nameIndex.val = 0; }
};
struct JSClassMember
@@ -625,7 +625,8 @@ struct Unit
StaticData = 0x4, // Unit data persistent in memory?
IsSingleton = 0x8,
IsSharedLibrary = 0x10, // .pragma shared?
- ContainsMachineCode = 0x20 // used to determine if we need to mmap with execute permissions
+ ContainsMachineCode = 0x20, // used to determine if we need to mmap with execute permissions
+ PendingTypeCompilation = 0x40 // the QML data structures present are incomplete and require type compilation
};
LEUInt32 flags;
LEUInt32 stringTableSize;
@@ -777,31 +778,7 @@ struct TypeReferenceMap : QHash<int, TypeReference>
};
#ifndef V4_BOOTSTRAP
-struct ResolvedTypeReference
-{
- ResolvedTypeReference()
- : type(0)
- , majorVersion(0)
- , minorVersion(0)
- , isFullyDynamicType(false)
- {}
-
- QQmlType *type;
- QQmlRefPointer<QQmlPropertyCache> typePropertyCache;
- QQmlRefPointer<QV4::CompiledData::CompilationUnit> compilationUnit;
-
- int majorVersion;
- int minorVersion;
- // Types such as QQmlPropertyMap can add properties dynamically at run-time and
- // therefore cannot have a property cache installed when instantiated.
- bool isFullyDynamicType;
-
- QQmlPropertyCache *propertyCache() const;
- QQmlPropertyCache *createPropertyCache(QQmlEngine *);
- bool addToHash(QCryptographicHash *hash, QQmlEngine *engine);
-
- void doDynamicTypeCheck();
-};
+struct ResolvedTypeReference;
// map from name index
// While this could be a hash, a map is chosen here to provide a stable
// order, which is used to calculating a check-sum on dependent meta-objects.
@@ -841,10 +818,14 @@ struct Q_QML_PRIVATE_EXPORT CompilationUnit : public QQmlRefCount
#ifndef V4_BOOTSTRAP
ExecutionEngine *engine;
+#endif
+
+ QV4::Heap::String **runtimeStrings; // Array
+
+#ifndef V4_BOOTSTRAP
QString fileName() const { return data->stringAt(data->sourceFileIndex); }
QUrl url() const { if (m_url.isNull) m_url = QUrl(fileName()); return m_url; }
- QV4::Heap::String **runtimeStrings; // Array
QV4::Lookup *runtimeLookups;
QV4::Value *runtimeRegularExpressions;
QV4::InternalClass **runtimeClasses;
@@ -918,17 +899,53 @@ struct Q_QML_PRIVATE_EXPORT CompilationUnit : public QQmlRefCount
void destroy() Q_DECL_OVERRIDE;
- bool saveToDisk(const QUrl &unitUrl, QString *errorString);
bool loadFromDisk(const QUrl &url, EvalISelFactory *iselFactory, QString *errorString);
protected:
virtual void linkBackendToEngine(QV4::ExecutionEngine *engine) = 0;
- virtual void prepareCodeOffsetsForDiskStorage(CompiledData::Unit *unit);
- virtual bool saveCodeToDisk(QIODevice *device, const CompiledData::Unit *unit, QString *errorString);
virtual bool memoryMapCode(QString *errorString);
#endif // V4_BOOTSTRAP
+
+public:
+#if defined(V4_BOOTSTRAP)
+ bool saveToDisk(const QString &unitUrl, QString *errorString);
+#else
+ bool saveToDisk(const QUrl &unitUrl, QString *errorString);
+#endif
+
+protected:
+ virtual void prepareCodeOffsetsForDiskStorage(CompiledData::Unit *unit);
+ virtual bool saveCodeToDisk(QIODevice *device, const CompiledData::Unit *unit, QString *errorString);
};
+#ifndef V4_BOOTSTRAP
+struct ResolvedTypeReference
+{
+ ResolvedTypeReference()
+ : type(0)
+ , majorVersion(0)
+ , minorVersion(0)
+ , isFullyDynamicType(false)
+ {}
+
+ QQmlType *type;
+ QQmlRefPointer<QQmlPropertyCache> typePropertyCache;
+ QQmlRefPointer<QV4::CompiledData::CompilationUnit> compilationUnit;
+
+ int majorVersion;
+ int minorVersion;
+ // Types such as QQmlPropertyMap can add properties dynamically at run-time and
+ // therefore cannot have a property cache installed when instantiated.
+ bool isFullyDynamicType;
+
+ QQmlPropertyCache *propertyCache() const;
+ QQmlPropertyCache *createPropertyCache(QQmlEngine *);
+ bool addToHash(QCryptographicHash *hash, QQmlEngine *engine);
+
+ void doDynamicTypeCheck();
+};
+#endif
+
}
}
diff --git a/src/qml/compiler/qv4instr_moth_p.h b/src/qml/compiler/qv4instr_moth_p.h
index ca4e0b73d4..53d9956315 100644
--- a/src/qml/compiler/qv4instr_moth_p.h
+++ b/src/qml/compiler/qv4instr_moth_p.h
@@ -52,10 +52,11 @@
//
#include <private/qv4global_p.h>
#include <private/qv4value_p.h>
-#include <private/qv4function_p.h>
#include <private/qv4runtime_p.h>
+#if !defined(V4_BOOTSTRAP)
QT_REQUIRE_CONFIG(qml_interpreter);
+#endif
QT_BEGIN_NAMESPACE
diff --git a/src/qml/compiler/qv4isel_moth.cpp b/src/qml/compiler/qv4isel_moth.cpp
index 9dbebd1128..04844302d9 100644
--- a/src/qml/compiler/qv4isel_moth.cpp
+++ b/src/qml/compiler/qv4isel_moth.cpp
@@ -39,15 +39,15 @@
#include "qv4isel_util_p.h"
#include "qv4isel_moth_p.h"
-#include "qv4vme_moth_p.h"
#include "qv4ssa_p.h"
-#include <private/qv4debugging_p.h>
-#include <private/qv4function_p.h>
-#include <private/qv4regexpobject_p.h>
#include <private/qv4compileddata_p.h>
-#include <private/qqmlengine_p.h>
#include <wtf/MathExtras.h>
+#if !defined(V4_BOOTSTRAP)
+#include "qv4vme_moth_p.h"
+#include <private/qv4function_p.h>
+#endif
+
#undef USE_TYPE_INFO
using namespace QV4;
@@ -1185,8 +1185,11 @@ void InstructionSelection::callBuiltinPushWithScope(IR::Expr *arg)
void InstructionSelection::callBuiltinPopScope()
{
+ QT_WARNING_PUSH
+ QT_WARNING_DISABLE_GCC("-Wuninitialized")
Instruction::CallBuiltinPopScope call;
addInstruction(call);
+ QT_WARNING_POP
}
void InstructionSelection::callBuiltinDeclareVar(bool deletable, const QString &name)
@@ -1335,8 +1338,11 @@ void InstructionSelection::callBuiltinSetupArgumentObject(IR::Expr *result)
void QV4::Moth::InstructionSelection::callBuiltinConvertThisToObject()
{
+ QT_WARNING_PUSH
+ QT_WARNING_DISABLE_GCC("-Wuninitialized")
Instruction::CallBuiltinConvertThisToObject call;
addInstruction(call);
+ QT_WARNING_POP
}
ptrdiff_t InstructionSelection::addInstructionHelper(Instr::Type type, Instr &instr)
@@ -1425,6 +1431,8 @@ CompilationUnit::~CompilationUnit()
{
}
+#if !defined(V4_BOOTSTRAP)
+
void CompilationUnit::linkBackendToEngine(QV4::ExecutionEngine *engine)
{
#ifdef MOTH_THREADED_INTERPRETER
@@ -1461,6 +1469,31 @@ void CompilationUnit::linkBackendToEngine(QV4::ExecutionEngine *engine)
}
}
+bool CompilationUnit::memoryMapCode(QString *errorString)
+{
+ Q_UNUSED(errorString);
+ codeRefs.resize(data->functionTableSize);
+
+ const char *basePtr = reinterpret_cast<const char *>(data);
+
+ for (uint i = 0; i < data->functionTableSize; ++i) {
+ const CompiledData::Function *compiledFunction = data->functionAt(i);
+ const char *codePtr = const_cast<const char *>(reinterpret_cast<const char *>(basePtr + compiledFunction->codeOffset));
+#ifdef MOTH_THREADED_INTERPRETER
+ // for the threaded interpreter we need to make a copy of the data because it needs to be
+ // modified for the instruction handler addresses.
+ QByteArray code(codePtr, compiledFunction->codeSize);
+#else
+ QByteArray code = QByteArray::fromRawData(codePtr, compiledFunction->codeSize);
+#endif
+ codeRefs[i] = code;
+ }
+
+ return true;
+}
+
+#endif // V4_BOOTSTRAP
+
void CompilationUnit::prepareCodeOffsetsForDiskStorage(CompiledData::Unit *unit)
{
const int codeAlignment = 16;
@@ -1482,7 +1515,7 @@ bool CompilationUnit::saveCodeToDisk(QIODevice *device, const CompiledData::Unit
QByteArray padding;
-#ifdef MOTH_THREADED_INTERPRETER
+#if defined(MOTH_THREADED_INTERPRETER) && !defined(V4_BOOTSTRAP)
// Map from instruction label back to instruction type. Only needed when persisting
// already linked compilation units;
QHash<void*, int> reverseInstructionMapping;
@@ -1511,7 +1544,7 @@ bool CompilationUnit::saveCodeToDisk(QIODevice *device, const CompiledData::Unit
QByteArray code = codeRefs.at(i);
-#ifdef MOTH_THREADED_INTERPRETER
+#if defined(MOTH_THREADED_INTERPRETER) && !defined(V4_BOOTSTRAP)
if (!reverseInstructionMapping.isEmpty()) {
char *codePtr = code.data(); // detaches
int index = 0;
@@ -1541,29 +1574,6 @@ bool CompilationUnit::saveCodeToDisk(QIODevice *device, const CompiledData::Unit
return true;
}
-bool CompilationUnit::memoryMapCode(QString *errorString)
-{
- Q_UNUSED(errorString);
- codeRefs.resize(data->functionTableSize);
-
- const char *basePtr = reinterpret_cast<const char *>(data);
-
- for (uint i = 0; i < data->functionTableSize; ++i) {
- const CompiledData::Function *compiledFunction = data->functionAt(i);
- const char *codePtr = const_cast<const char *>(reinterpret_cast<const char *>(basePtr + compiledFunction->codeOffset));
-#ifdef MOTH_THREADED_INTERPRETER
- // for the threaded interpreter we need to make a copy of the data because it needs to be
- // modified for the instruction handler addresses.
- QByteArray code(codePtr, compiledFunction->codeSize);
-#else
- QByteArray code = QByteArray::fromRawData(codePtr, compiledFunction->codeSize);
-#endif
- codeRefs[i] = code;
- }
-
- return true;
-}
-
QQmlRefPointer<CompiledData::CompilationUnit> ISelFactory::createUnitForLoading()
{
QQmlRefPointer<CompiledData::CompilationUnit> result;
diff --git a/src/qml/compiler/qv4isel_moth_p.h b/src/qml/compiler/qv4isel_moth_p.h
index afe5fe342e..41469f1985 100644
--- a/src/qml/compiler/qv4isel_moth_p.h
+++ b/src/qml/compiler/qv4isel_moth_p.h
@@ -59,7 +59,9 @@
#include <private/qv4value_p.h>
#include "qv4instr_moth_p.h"
+#if !defined(V4_BOOTSTRAP)
QT_REQUIRE_CONFIG(qml_interpreter);
+#endif
QT_BEGIN_NAMESPACE
@@ -69,10 +71,12 @@ namespace Moth {
struct CompilationUnit : public QV4::CompiledData::CompilationUnit
{
virtual ~CompilationUnit();
+#if !defined(V4_BOOTSTRAP)
void linkBackendToEngine(QV4::ExecutionEngine *engine) Q_DECL_OVERRIDE;
+ bool memoryMapCode(QString *errorString) Q_DECL_OVERRIDE;
+#endif
void prepareCodeOffsetsForDiskStorage(CompiledData::Unit *unit) Q_DECL_OVERRIDE;
bool saveCodeToDisk(QIODevice *device, const CompiledData::Unit *unit, QString *errorString) Q_DECL_OVERRIDE;
- bool memoryMapCode(QString *errorString) Q_DECL_OVERRIDE;
QVector<QByteArray> codeRefs;
diff --git a/src/qml/compiler/qv4jsir_p.h b/src/qml/compiler/qv4jsir_p.h
index 73aa6c4975..04bc3d86e5 100644
--- a/src/qml/compiler/qv4jsir_p.h
+++ b/src/qml/compiler/qv4jsir_p.h
@@ -315,20 +315,20 @@ struct Q_AUTOTEST_EXPORT Expr {
Expr(ExprKind exprKind): type(UnknownType), exprKind(exprKind) {}
bool isLValue() const;
- Const *asConst() { return as<Const>(); }
- String *asString() { return as<String>(); }
- RegExp *asRegExp() { return as<RegExp>(); }
- Name *asName() { return as<Name>(); }
- Temp *asTemp() { return as<Temp>(); }
- ArgLocal *asArgLocal() { return as<ArgLocal>(); }
- Closure *asClosure() { return as<Closure>(); }
- Convert *asConvert() { return as<Convert>(); }
- Unop *asUnop() { return as<Unop>(); }
- Binop *asBinop() { return as<Binop>(); }
- Call *asCall() { return as<Call>(); }
- New *asNew() { return as<New>(); }
- Subscript *asSubscript() { return as<Subscript>(); }
- Member *asMember() { return as<Member>(); }
+ Const *asConst();
+ String *asString();
+ RegExp *asRegExp();
+ Name *asName();
+ Temp *asTemp();
+ ArgLocal *asArgLocal();
+ Closure *asClosure();
+ Convert *asConvert();
+ Unop *asUnop();
+ Binop *asBinop();
+ Call *asCall();
+ New *asNew();
+ Subscript *asSubscript();
+ Member *asMember();
};
#define EXPR_VISIT_ALL_KINDS(e) \
@@ -773,12 +773,12 @@ struct Stmt {
Stmt *asTerminator();
- Exp *asExp() { return as<Exp>(); }
- Move *asMove() { return as<Move>(); }
- Jump *asJump() { return as<Jump>(); }
- CJump *asCJump() { return as<CJump>(); }
- Ret *asRet() { return as<Ret>(); }
- Phi *asPhi() { return as<Phi>(); }
+ Exp *asExp();
+ Move *asMove();
+ Jump *asJump();
+ CJump *asCJump();
+ Ret *asRet();
+ Phi *asPhi();
int id() const { return _id; }
@@ -1720,6 +1720,28 @@ inline Stmt *BasicBlock::RET(Expr *expr)
return s;
}
+inline Const *Expr::asConst() { return as<Const>(); }
+inline String *Expr::asString() { return as<String>(); }
+inline RegExp *Expr::asRegExp() { return as<RegExp>(); }
+inline Name *Expr::asName() { return as<Name>(); }
+inline Temp *Expr::asTemp() { return as<Temp>(); }
+inline ArgLocal *Expr::asArgLocal() { return as<ArgLocal>(); }
+inline Closure *Expr::asClosure() { return as<Closure>(); }
+inline Convert *Expr::asConvert() { return as<Convert>(); }
+inline Unop *Expr::asUnop() { return as<Unop>(); }
+inline Binop *Expr::asBinop() { return as<Binop>(); }
+inline Call *Expr::asCall() { return as<Call>(); }
+inline New *Expr::asNew() { return as<New>(); }
+inline Subscript *Expr::asSubscript() { return as<Subscript>(); }
+inline Member *Expr::asMember() { return as<Member>(); }
+
+inline Exp *Stmt::asExp() { return as<Exp>(); }
+inline Move *Stmt::asMove() { return as<Move>(); }
+inline Jump *Stmt::asJump() { return as<Jump>(); }
+inline CJump *Stmt::asCJump() { return as<CJump>(); }
+inline Ret *Stmt::asRet() { return as<Ret>(); }
+inline Phi *Stmt::asPhi() { return as<Phi>(); }
+
} // end of namespace IR
} // end of namespace QV4
diff --git a/src/qml/compiler/qv4ssa.cpp b/src/qml/compiler/qv4ssa.cpp
index 1d512711b8..10f0bbcf8f 100644
--- a/src/qml/compiler/qv4ssa.cpp
+++ b/src/qml/compiler/qv4ssa.cpp
@@ -5123,7 +5123,7 @@ void LifeTimeInterval::setFrom(int from) {
Q_ASSERT(from > 0);
if (_ranges.isEmpty()) { // this is the case where there is no use, only a define
- _ranges.prepend(Range(from, from));
+ _ranges.prepend(LifeTimeIntervalRange(from, from));
if (_end == InvalidPosition)
_end = from;
} else {
@@ -5137,17 +5137,17 @@ void LifeTimeInterval::addRange(int from, int to) {
Q_ASSERT(to >= from);
if (_ranges.isEmpty()) {
- _ranges.prepend(Range(from, to));
+ _ranges.prepend(LifeTimeIntervalRange(from, to));
_end = to;
return;
}
- Range *p = &_ranges.first();
+ LifeTimeIntervalRange *p = &_ranges.first();
if (to + 1 >= p->start && p->end + 1 >= from) {
p->start = qMin(p->start, from);
p->end = qMax(p->end, to);
while (_ranges.count() > 1) {
- Range *p1 = p + 1;
+ LifeTimeIntervalRange *p1 = p + 1;
if (p->end + 1 < p1->start || p1->end + 1 < p->start)
break;
p1->start = qMin(p->start, p1->start);
@@ -5157,10 +5157,10 @@ void LifeTimeInterval::addRange(int from, int to) {
}
} else {
if (to < p->start) {
- _ranges.prepend(Range(from, to));
+ _ranges.prepend(LifeTimeIntervalRange(from, to));
} else {
Q_ASSERT(from > _ranges.last().end);
- _ranges.push_back(Range(from, to));
+ _ranges.push_back(LifeTimeIntervalRange(from, to));
}
}
@@ -5206,7 +5206,7 @@ LifeTimeInterval LifeTimeInterval::split(int atPosition, int newStart)
} else {
// find the first range where the temp will get active again:
while (!newInterval._ranges.isEmpty()) {
- const Range &range = newInterval._ranges.first();
+ const LifeTimeIntervalRange &range = newInterval._ranges.first();
if (range.start > newStart) {
// The split position is before the start of the range. Either we managed to skip
// over the correct range, or we got an invalid split request. Either way, this
diff --git a/src/qml/compiler/qv4ssa_p.h b/src/qml/compiler/qv4ssa_p.h
index db8b6edd1a..c07abd04c4 100644
--- a/src/qml/compiler/qv4ssa_p.h
+++ b/src/qml/compiler/qv4ssa_p.h
@@ -63,20 +63,28 @@ class QQmlEnginePrivate;
namespace QV4 {
namespace IR {
-class Q_AUTOTEST_EXPORT LifeTimeInterval {
-public:
- struct Range {
- int start;
- int end;
+struct LifeTimeIntervalRange {
+ int start;
+ int end;
- Range(int start = InvalidPosition, int end = InvalidPosition)
- : start(start)
- , end(end)
- {}
+ LifeTimeIntervalRange(int start = -1, int end = -1)
+ : start(start)
+ , end(end)
+ {}
- bool covers(int position) const { return start <= position && position <= end; }
- };
- typedef QVarLengthArray<Range, 4> Ranges;
+ bool covers(int position) const { return start <= position && position <= end; }
+};
+} // IR namespace
+} // QV4 namespace
+
+Q_DECLARE_TYPEINFO(QV4::IR::LifeTimeIntervalRange, Q_PRIMITIVE_TYPE);
+
+namespace QV4 {
+namespace IR {
+
+class Q_AUTOTEST_EXPORT LifeTimeInterval {
+public:
+ typedef QVarLengthArray<LifeTimeIntervalRange, 4> Ranges;
private:
Temp _temp;
@@ -137,7 +145,7 @@ public:
// Validate the new range
if (_end != InvalidPosition) {
Q_ASSERT(!_ranges.isEmpty());
- for (const Range &range : qAsConst(_ranges)) {
+ for (const LifeTimeIntervalRange &range : qAsConst(_ranges)) {
Q_ASSERT(range.start >= 0);
Q_ASSERT(range.end >= 0);
Q_ASSERT(range.start <= range.end);
@@ -457,7 +465,6 @@ protected:
Q_DECLARE_TYPEINFO(QV4::IR::LifeTimeInterval, Q_MOVABLE_TYPE);
-Q_DECLARE_TYPEINFO(QV4::IR::LifeTimeInterval::Range, Q_PRIMITIVE_TYPE);
QT_END_NAMESPACE
diff --git a/src/qml/jit/qv4assembler.cpp b/src/qml/jit/qv4assembler.cpp
index 018396318e..646d9a8871 100644
--- a/src/qml/jit/qv4assembler.cpp
+++ b/src/qml/jit/qv4assembler.cpp
@@ -39,11 +39,6 @@
#include "qv4isel_masm_p.h"
#include "qv4runtime_p.h"
-#include "qv4object_p.h"
-#include "qv4functionobject_p.h"
-#include "qv4regexpobject_p.h"
-#include "qv4lookup_p.h"
-#include "qv4function_p.h"
#include "qv4ssa_p.h"
#include "qv4regalloc_p.h"
#include "qv4assembler_p.h"
@@ -51,6 +46,10 @@
#include <assembler/LinkBuffer.h>
#include <WTFStubs.h>
+#if !defined(V4_BOOTSTRAP)
+#include "qv4function_p.h"
+#endif
+
#include <iostream>
#include <QBuffer>
#include <QCoreApplication>
@@ -68,6 +67,8 @@ CompilationUnit::~CompilationUnit()
{
}
+#if !defined(V4_BOOTSTRAP)
+
void CompilationUnit::linkBackendToEngine(ExecutionEngine *engine)
{
runtimeFunctions.resize(data->functionTableSize);
@@ -81,6 +82,26 @@ void CompilationUnit::linkBackendToEngine(ExecutionEngine *engine)
}
}
+bool CompilationUnit::memoryMapCode(QString *errorString)
+{
+ Q_UNUSED(errorString);
+ codeRefs.resize(data->functionTableSize);
+
+ const char *basePtr = reinterpret_cast<const char *>(data);
+
+ for (uint i = 0; i < data->functionTableSize; ++i) {
+ const CompiledData::Function *compiledFunction = data->functionAt(i);
+ void *codePtr = const_cast<void *>(reinterpret_cast<const void *>(basePtr + compiledFunction->codeOffset));
+ JSC::MacroAssemblerCodeRef codeRef = JSC::MacroAssemblerCodeRef::createSelfManagedCodeRef(JSC::MacroAssemblerCodePtr(codePtr));
+ JSC::ExecutableAllocator::makeExecutable(codePtr, compiledFunction->codeSize);
+ codeRefs[i] = codeRef;
+ }
+
+ return true;
+}
+
+#endif // !defined(V4_BOOTSTRAP)
+
void CompilationUnit::prepareCodeOffsetsForDiskStorage(CompiledData::Unit *unit)
{
const int codeAlignment = 16;
@@ -128,27 +149,11 @@ bool CompilationUnit::saveCodeToDisk(QIODevice *device, const CompiledData::Unit
return true;
}
-bool CompilationUnit::memoryMapCode(QString *errorString)
-{
- Q_UNUSED(errorString);
- codeRefs.resize(data->functionTableSize);
-
- const char *basePtr = reinterpret_cast<const char *>(data);
-
- for (uint i = 0; i < data->functionTableSize; ++i) {
- const CompiledData::Function *compiledFunction = data->functionAt(i);
- void *codePtr = const_cast<void *>(reinterpret_cast<const void *>(basePtr + compiledFunction->codeOffset));
- JSC::MacroAssemblerCodeRef codeRef = JSC::MacroAssemblerCodeRef::createSelfManagedCodeRef(JSC::MacroAssemblerCodePtr(codePtr));
- JSC::ExecutableAllocator::makeExecutable(codePtr, compiledFunction->codeSize);
- codeRefs[i] = codeRef;
- }
-
- return true;
-}
-
-const Assembler::VoidType Assembler::Void;
+template <typename TargetConfiguration>
+const typename Assembler<TargetConfiguration>::VoidType Assembler<TargetConfiguration>::Void;
-Assembler::Assembler(QV4::Compiler::JSUnitGenerator *jsGenerator, IR::Function* function, QV4::ExecutableAllocator *executableAllocator)
+template <typename TargetConfiguration>
+Assembler<TargetConfiguration>::Assembler(QV4::Compiler::JSUnitGenerator *jsGenerator, IR::Function* function, QV4::ExecutableAllocator *executableAllocator)
: _function(function)
, _nextBlock(0)
, _executableAllocator(executableAllocator)
@@ -159,14 +164,16 @@ Assembler::Assembler(QV4::Compiler::JSUnitGenerator *jsGenerator, IR::Function*
_labelPatches.resize(_function->basicBlockCount());
}
-void Assembler::registerBlock(IR::BasicBlock* block, IR::BasicBlock *nextBlock)
+template <typename TargetConfiguration>
+void Assembler<TargetConfiguration>::registerBlock(IR::BasicBlock* block, IR::BasicBlock *nextBlock)
{
_addrs[block->index()] = label();
catchBlock = block->catchBlock;
_nextBlock = nextBlock;
}
-void Assembler::jumpToBlock(IR::BasicBlock* current, IR::BasicBlock *target)
+template <typename TargetConfiguration>
+void Assembler<TargetConfiguration>::jumpToBlock(IR::BasicBlock* current, IR::BasicBlock *target)
{
Q_UNUSED(current);
@@ -174,12 +181,14 @@ void Assembler::jumpToBlock(IR::BasicBlock* current, IR::BasicBlock *target)
_patches[target->index()].push_back(jump());
}
-void Assembler::addPatch(IR::BasicBlock* targetBlock, Jump targetJump)
+template <typename TargetConfiguration>
+void Assembler<TargetConfiguration>::addPatch(IR::BasicBlock* targetBlock, Jump targetJump)
{
_patches[targetBlock->index()].push_back(targetJump);
}
-void Assembler::addPatch(DataLabelPtr patch, Label target)
+template <typename TargetConfiguration>
+void Assembler<TargetConfiguration>::addPatch(DataLabelPtr patch, Label target)
{
DataLabelPatch p;
p.dataLabel = patch;
@@ -187,37 +196,21 @@ void Assembler::addPatch(DataLabelPtr patch, Label target)
_dataLabelPatches.push_back(p);
}
-void Assembler::addPatch(DataLabelPtr patch, IR::BasicBlock *target)
+template <typename TargetConfiguration>
+void Assembler<TargetConfiguration>::addPatch(DataLabelPtr patch, IR::BasicBlock *target)
{
_labelPatches[target->index()].push_back(patch);
}
-void Assembler::generateCJumpOnNonZero(RegisterID reg, IR::BasicBlock *currentBlock,
+template <typename TargetConfiguration>
+void Assembler<TargetConfiguration>::generateCJumpOnNonZero(RegisterID reg, IR::BasicBlock *currentBlock,
IR::BasicBlock *trueBlock, IR::BasicBlock *falseBlock)
{
- generateCJumpOnCompare(NotEqual, reg, TrustedImm32(0), currentBlock, trueBlock, falseBlock);
-}
-
-#ifdef QV4_USE_64_BIT_VALUE_ENCODING
-void Assembler::generateCJumpOnCompare(RelationalCondition cond,
- RegisterID left,
- TrustedImm64 right,
- IR::BasicBlock *currentBlock,
- IR::BasicBlock *trueBlock,
- IR::BasicBlock *falseBlock)
-{
- if (trueBlock == _nextBlock) {
- Jump target = branch64(invert(cond), left, right);
- addPatch(falseBlock, target);
- } else {
- Jump target = branch64(cond, left, right);
- addPatch(trueBlock, target);
- jumpToBlock(currentBlock, falseBlock);
- }
+ generateCJumpOnCompare(RelationalCondition::NotEqual, reg, TrustedImm32(0), currentBlock, trueBlock, falseBlock);
}
-#endif
-void Assembler::generateCJumpOnCompare(RelationalCondition cond,
+template <typename TargetConfiguration>
+void Assembler<TargetConfiguration>::generateCJumpOnCompare(RelationalCondition cond,
RegisterID left,
TrustedImm32 right,
IR::BasicBlock *currentBlock,
@@ -234,7 +227,8 @@ void Assembler::generateCJumpOnCompare(RelationalCondition cond,
}
}
-void Assembler::generateCJumpOnCompare(RelationalCondition cond,
+template <typename TargetConfiguration>
+void Assembler<TargetConfiguration>::generateCJumpOnCompare(RelationalCondition cond,
RegisterID left,
RegisterID right,
IR::BasicBlock *currentBlock,
@@ -251,7 +245,8 @@ void Assembler::generateCJumpOnCompare(RelationalCondition cond,
}
}
-Assembler::Pointer Assembler::loadAddress(RegisterID tmp, IR::Expr *e)
+template <typename TargetConfiguration>
+typename Assembler<TargetConfiguration>::Pointer Assembler<TargetConfiguration>::loadAddress(RegisterID tmp, IR::Expr *e)
{
IR::Temp *t = e->asTemp();
if (t)
@@ -260,7 +255,8 @@ Assembler::Pointer Assembler::loadAddress(RegisterID tmp, IR::Expr *e)
return loadArgLocalAddress(tmp, e->asArgLocal());
}
-Assembler::Pointer Assembler::loadTempAddress(IR::Temp *t)
+template <typename TargetConfiguration>
+typename Assembler<TargetConfiguration>::Pointer Assembler<TargetConfiguration>::loadTempAddress(IR::Temp *t)
{
if (t->kind == IR::Temp::StackSlot)
return stackSlotPointer(t);
@@ -268,7 +264,8 @@ Assembler::Pointer Assembler::loadTempAddress(IR::Temp *t)
Q_UNREACHABLE();
}
-Assembler::Pointer Assembler::loadArgLocalAddress(RegisterID baseReg, IR::ArgLocal *al)
+template <typename TargetConfiguration>
+typename Assembler<TargetConfiguration>::Pointer Assembler<TargetConfiguration>::loadArgLocalAddress(RegisterID baseReg, IR::ArgLocal *al)
{
int32_t offset = 0;
int scope = al->scope;
@@ -298,7 +295,8 @@ Assembler::Pointer Assembler::loadArgLocalAddress(RegisterID baseReg, IR::ArgLoc
return Pointer(baseReg, offset);
}
-Assembler::Pointer Assembler::loadStringAddress(RegisterID reg, const QString &string)
+template <typename TargetConfiguration>
+typename Assembler<TargetConfiguration>::Pointer Assembler<TargetConfiguration>::loadStringAddress(RegisterID reg, const QString &string)
{
loadPtr(Address(Assembler::EngineRegister, qOffsetOf(QV4::ExecutionEngine, current)), Assembler::ScratchRegister);
loadPtr(Address(Assembler::ScratchRegister, qOffsetOf(QV4::Heap::ExecutionContext, compilationUnit)), Assembler::ScratchRegister);
@@ -307,12 +305,14 @@ Assembler::Pointer Assembler::loadStringAddress(RegisterID reg, const QString &s
return Pointer(reg, id * sizeof(QV4::String*));
}
-Assembler::Address Assembler::loadConstant(IR::Const *c, RegisterID baseReg)
+template <typename TargetConfiguration>
+typename Assembler<TargetConfiguration>::Address Assembler<TargetConfiguration>::loadConstant(IR::Const *c, RegisterID baseReg)
{
return loadConstant(convertToValue(c), baseReg);
}
-Assembler::Address Assembler::loadConstant(const Primitive &v, RegisterID baseReg)
+template <typename TargetConfiguration>
+typename Assembler<TargetConfiguration>::Address Assembler<TargetConfiguration>::loadConstant(const Primitive &v, RegisterID baseReg)
{
loadPtr(Address(Assembler::EngineRegister, qOffsetOf(QV4::ExecutionEngine, current)), baseReg);
loadPtr(Address(baseReg, qOffsetOf(QV4::Heap::ExecutionContext, constantTable)), baseReg);
@@ -320,33 +320,36 @@ Assembler::Address Assembler::loadConstant(const Primitive &v, RegisterID baseRe
return Address(baseReg, index * sizeof(QV4::Value));
}
-void Assembler::loadStringRef(RegisterID reg, const QString &string)
+template <typename TargetConfiguration>
+void Assembler<TargetConfiguration>::loadStringRef(RegisterID reg, const QString &string)
{
const int id = _jsGenerator->registerString(string);
move(TrustedImm32(id), reg);
}
-void Assembler::storeValue(QV4::Primitive value, IR::Expr *destination)
+template <typename TargetConfiguration>
+void Assembler<TargetConfiguration>::storeValue(QV4::Primitive value, IR::Expr *destination)
{
Address addr = loadAddress(ScratchRegister, destination);
storeValue(value, addr);
}
-void Assembler::enterStandardStackFrame(const RegisterInformation &regularRegistersToSave,
+template <typename TargetConfiguration>
+void Assembler<TargetConfiguration>::enterStandardStackFrame(const RegisterInformation &regularRegistersToSave,
const RegisterInformation &fpRegistersToSave)
{
platformEnterStandardStackFrame(this);
- move(StackPointerRegister, FramePointerRegister);
+ move(StackPointerRegister, JITTargetPlatform::FramePointerRegister);
const int frameSize = _stackLayout->calculateStackFrameSize();
subPtr(TrustedImm32(frameSize), StackPointerRegister);
- Address slotAddr(FramePointerRegister, 0);
+ Address slotAddr(JITTargetPlatform::FramePointerRegister, 0);
for (int i = 0, ei = fpRegistersToSave.size(); i < ei; ++i) {
Q_ASSERT(fpRegistersToSave.at(i).isFloatingPoint());
slotAddr.offset -= sizeof(double);
- JSC::MacroAssembler::storeDouble(fpRegistersToSave.at(i).reg<FPRegisterID>(), slotAddr);
+ TargetConfiguration::MacroAssembler::storeDouble(fpRegistersToSave.at(i).reg<FPRegisterID>(), slotAddr);
}
for (int i = 0, ei = regularRegistersToSave.size(); i < ei; ++i) {
Q_ASSERT(regularRegistersToSave.at(i).isRegularRegister());
@@ -355,10 +358,11 @@ void Assembler::enterStandardStackFrame(const RegisterInformation &regularRegist
}
}
-void Assembler::leaveStandardStackFrame(const RegisterInformation &regularRegistersToSave,
+template <typename TargetConfiguration>
+void Assembler<TargetConfiguration>::leaveStandardStackFrame(const RegisterInformation &regularRegistersToSave,
const RegisterInformation &fpRegistersToSave)
{
- Address slotAddr(FramePointerRegister, -regularRegistersToSave.size() * RegisterSize - fpRegistersToSave.size() * sizeof(double));
+ Address slotAddr(JITTargetPlatform::FramePointerRegister, -regularRegistersToSave.size() * RegisterSize - fpRegistersToSave.size() * sizeof(double));
// restore the callee saved registers
for (int i = regularRegistersToSave.size() - 1; i >= 0; --i) {
@@ -368,7 +372,7 @@ void Assembler::leaveStandardStackFrame(const RegisterInformation &regularRegist
}
for (int i = fpRegistersToSave.size() - 1; i >= 0; --i) {
Q_ASSERT(fpRegistersToSave.at(i).isFloatingPoint());
- JSC::MacroAssembler::loadDouble(slotAddr, fpRegistersToSave.at(i).reg<FPRegisterID>());
+ TargetConfiguration::MacroAssembler::loadDouble(slotAddr, fpRegistersToSave.at(i).reg<FPRegisterID>());
slotAddr.offset += sizeof(double);
}
@@ -393,7 +397,8 @@ void Assembler::leaveStandardStackFrame(const RegisterInformation &regularRegist
// Try to load the source expression into the destination FP register. This assumes that two
// general purpose (integer) registers are available: the ScratchRegister and the
// ReturnValueRegister. It returns a Jump if no conversion can be performed.
-Assembler::Jump Assembler::genTryDoubleConversion(IR::Expr *src, Assembler::FPRegisterID dest)
+template <typename TargetConfiguration>
+typename Assembler<TargetConfiguration>::Jump Assembler<TargetConfiguration>::genTryDoubleConversion(IR::Expr *src, FPRegisterID dest)
{
switch (src->type) {
case IR::DoubleType:
@@ -436,11 +441,10 @@ Assembler::Jump Assembler::genTryDoubleConversion(IR::Expr *src, Assembler::FPRe
isNoInt.link(this);
#ifdef QV4_USE_64_BIT_VALUE_ENCODING
rshift32(TrustedImm32(Value::IsDoubleTag_Shift), ScratchRegister);
- Assembler::Jump isNoDbl = branch32(Equal, ScratchRegister, TrustedImm32(0));
+ Assembler::Jump isNoDbl = branch32(RelationalCondition::Equal, JITTargetPlatform::ScratchRegister, TrustedImm32(0));
#else
and32(Assembler::TrustedImm32(Value::NotDouble_Mask), Assembler::ScratchRegister);
- Assembler::Jump isNoDbl = branch32(Assembler::Equal, Assembler::ScratchRegister,
- Assembler::TrustedImm32(Value::NotDouble_Mask));
+ Assembler::Jump isNoDbl = branch32(RelationalCondition::Equal, JITTargetPlatform::ScratchRegister, TrustedImm32(Value::NotDouble_Mask));
#endif
toDoubleRegister(src, dest);
intDone.link(this);
@@ -448,10 +452,11 @@ Assembler::Jump Assembler::genTryDoubleConversion(IR::Expr *src, Assembler::FPRe
return isNoDbl;
}
-Assembler::Jump Assembler::branchDouble(bool invertCondition, IR::AluOp op,
+template <typename TargetConfiguration>
+typename Assembler<TargetConfiguration>::Jump Assembler<TargetConfiguration>::branchDouble(bool invertCondition, IR::AluOp op,
IR::Expr *left, IR::Expr *right)
{
- Assembler::DoubleCondition cond;
+ DoubleCondition cond;
switch (op) {
case IR::OpGt: cond = Assembler::DoubleGreaterThan; break;
case IR::OpLt: cond = Assembler::DoubleLessThan; break;
@@ -465,12 +470,13 @@ Assembler::Jump Assembler::branchDouble(bool invertCondition, IR::AluOp op,
Q_UNREACHABLE();
}
if (invertCondition)
- cond = JSC::MacroAssembler::invert(cond);
+ cond = TargetConfiguration::MacroAssembler::invert(cond);
- return JSC::MacroAssembler::branchDouble(cond, toDoubleRegister(left, FPGpr0), toDoubleRegister(right, FPGpr1));
+ return TargetConfiguration::MacroAssembler::branchDouble(cond, toDoubleRegister(left, FPGpr0), toDoubleRegister(right, JITTargetPlatform::FPGpr1));
}
-Assembler::Jump Assembler::branchInt32(bool invertCondition, IR::AluOp op, IR::Expr *left, IR::Expr *right)
+template <typename TargetConfiguration>
+typename Assembler<TargetConfiguration>::Jump Assembler<TargetConfiguration>::branchInt32(bool invertCondition, IR::AluOp op, IR::Expr *left, IR::Expr *right)
{
Assembler::RelationalCondition cond;
switch (op) {
@@ -486,18 +492,51 @@ Assembler::Jump Assembler::branchInt32(bool invertCondition, IR::AluOp op, IR::E
Q_UNREACHABLE();
}
if (invertCondition)
- cond = JSC::MacroAssembler::invert(cond);
+ cond = TargetConfiguration::MacroAssembler::invert(cond);
- return JSC::MacroAssembler::branch32(cond,
- toInt32Register(left, Assembler::ScratchRegister),
- toInt32Register(right, Assembler::ReturnValueRegister));
+ return TargetConfiguration::MacroAssembler::branch32(cond,
+ toInt32Register(left, Assembler::ScratchRegister),
+ toInt32Register(right, Assembler::ReturnValueRegister));
}
-void Assembler::setStackLayout(int maxArgCountForBuiltins, int regularRegistersToSave, int fpRegistersToSave)
+template <typename TargetConfiguration>
+void Assembler<TargetConfiguration>::setStackLayout(int maxArgCountForBuiltins, int regularRegistersToSave, int fpRegistersToSave)
{
_stackLayout.reset(new StackLayout(_function, maxArgCountForBuiltins, regularRegistersToSave, fpRegistersToSave));
}
+template <typename TargetConfiguration>
+void Assembler<TargetConfiguration>::returnFromFunction(IR::Ret *s, RegisterInformation regularRegistersToSave, RegisterInformation fpRegistersToSave)
+{
+ if (!s) {
+ // this only happens if the method doesn't have a return statement and can
+ // only exit through an exception
+ } else if (IR::Temp *t = s->expr->asTemp()) {
+ RegisterSizeDependentOps::setFunctionReturnValueFromTemp(this, t);
+ } else if (IR::Const *c = s->expr->asConst()) {
+ QV4::Primitive retVal = convertToValue(c);
+ RegisterSizeDependentOps::setFunctionReturnValueFromConst(this, retVal);
+ } else {
+ Q_UNREACHABLE();
+ Q_UNUSED(s);
+ }
+
+ Label leaveStackFrame = label();
+
+ const int locals = stackLayout().calculateJSStackFrameSize();
+ subPtr(TrustedImm32(sizeof(QV4::Value)*locals), JITTargetPlatform::LocalsRegister);
+ loadPtr(Address(JITTargetPlatform::EngineRegister, qOffsetOf(QV4::ExecutionEngine, current)), JITTargetPlatform::ScratchRegister);
+ loadPtr(Address(JITTargetPlatform::ScratchRegister, qOffsetOf(ExecutionContext::Data, engine)), JITTargetPlatform::ScratchRegister);
+ storePtr(JITTargetPlatform::LocalsRegister, Address(JITTargetPlatform::ScratchRegister, qOffsetOf(ExecutionEngine, jsStackTop)));
+
+ leaveStandardStackFrame(regularRegistersToSave, fpRegistersToSave);
+ ret();
+
+ exceptionReturnLabel = label();
+ QV4::Primitive retVal = Primitive::undefinedValue();
+ RegisterSizeDependentOps::setFunctionReturnValueFromConst(this, retVal);
+ jump(leaveStackFrame);
+}
namespace {
class QIODevicePrintStream: public FilePrintStream
@@ -563,7 +602,8 @@ static void qt_closePmap()
#endif
-JSC::MacroAssemblerCodeRef Assembler::link(int *codeSize)
+template <typename TargetConfiguration>
+JSC::MacroAssemblerCodeRef Assembler<TargetConfiguration>::link(int *codeSize)
{
Label endOfCode = label();
@@ -577,7 +617,7 @@ JSC::MacroAssemblerCodeRef Assembler::link(int *codeSize)
}
JSC::JSGlobalData dummy(_executableAllocator);
- JSC::LinkBuffer linkBuffer(dummy, this, 0);
+ JSC::LinkBuffer<typename TargetConfiguration::MacroAssembler> linkBuffer(dummy, this, 0);
for (const DataLabelPatch &p : qAsConst(_dataLabelPatches))
linkBuffer.patch(p.dataLabel, linkBuffer.locationOf(p.target));
@@ -668,4 +708,9 @@ JSC::MacroAssemblerCodeRef Assembler::link(int *codeSize)
return codeRef;
}
+template class QV4::JIT::Assembler<DefaultAssemblerTargetConfiguration>;
+#if defined(V4_BOOTSTRAP) && CPU(X86_64)
+template class QV4::JIT::Assembler<AssemblerTargetConfiguration<JSC::MacroAssemblerARMv7, NoOperatingSystemSpecialization>>;
+#endif
+
#endif
diff --git a/src/qml/jit/qv4assembler_p.h b/src/qml/jit/qv4assembler_p.h
index de9c246ed6..6d8d773ff0 100644
--- a/src/qml/jit/qv4assembler_p.h
+++ b/src/qml/jit/qv4assembler_p.h
@@ -55,7 +55,8 @@
#include "private/qv4isel_p.h"
#include "private/qv4isel_util_p.h"
#include "private/qv4value_p.h"
-#include "private/qv4lookup_p.h"
+#include "private/qv4context_p.h"
+#include "private/qv4engine_p.h"
#include "qv4targetplatform_p.h"
#include <config.h>
@@ -73,46 +74,637 @@ QT_BEGIN_NAMESPACE
namespace QV4 {
namespace JIT {
-class InstructionSelection;
-
struct CompilationUnit : public QV4::CompiledData::CompilationUnit
{
virtual ~CompilationUnit();
+#if !defined(V4_BOOTSTRAP)
void linkBackendToEngine(QV4::ExecutionEngine *engine) Q_DECL_OVERRIDE;
+ bool memoryMapCode(QString *errorString) Q_DECL_OVERRIDE;
+#endif
void prepareCodeOffsetsForDiskStorage(CompiledData::Unit *unit) Q_DECL_OVERRIDE;
bool saveCodeToDisk(QIODevice *device, const CompiledData::Unit *unit, QString *errorString) Q_DECL_OVERRIDE;
- bool memoryMapCode(QString *errorString) Q_DECL_OVERRIDE;
// Coderef + execution engine
QVector<JSC::MacroAssemblerCodeRef> codeRefs;
};
-struct LookupCall {
- JSC::MacroAssembler::Address addr;
- uint getterSetterOffset;
+template <typename PlatformAssembler, TargetOperatingSystemSpecialization Specialization>
+struct AssemblerTargetConfiguration
+{
+ typedef JSC::MacroAssembler<PlatformAssembler> MacroAssembler;
+ typedef TargetPlatform<PlatformAssembler, Specialization> Platform;
+ // More things coming here in the future, such as Target OS
+};
+
+#if CPU(ARM_THUMB2)
+typedef JSC::MacroAssemblerARMv7 DefaultPlatformMacroAssembler;
+typedef AssemblerTargetConfiguration<DefaultPlatformMacroAssembler, NoOperatingSystemSpecialization> DefaultAssemblerTargetConfiguration;
+#elif CPU(ARM64)
+typedef JSC::MacroAssemblerARM64 DefaultPlatformMacroAssembler;
+typedef AssemblerTargetConfiguration<DefaultPlatformMacroAssembler, NoOperatingSystemSpecialization> DefaultAssemblerTargetConfiguration;
+#elif CPU(ARM_TRADITIONAL)
+typedef JSC::MacroAssemblerARM DefaultPlatformMacroAssembler;
+typedef AssemblerTargetConfiguration<DefaultPlatformMacroAssembler, NoOperatingSystemSpecialization> DefaultAssemblerTargetConfiguration;
+#elif CPU(MIPS)
+typedef JSC::MacroAssemblerMIPS DefaultPlatformMacroAssembler;
+typedef AssemblerTargetConfiguration<DefaultPlatformMacroAssembler, NoOperatingSystemSpecialization> DefaultAssemblerTargetConfiguration;
+#elif CPU(X86)
+typedef JSC::MacroAssemblerX86 DefaultPlatformMacroAssembler;
+typedef AssemblerTargetConfiguration<DefaultPlatformMacroAssembler, NoOperatingSystemSpecialization> DefaultAssemblerTargetConfiguration;
+#elif CPU(X86_64)
+typedef JSC::MacroAssemblerX86_64 DefaultPlatformMacroAssembler;
+
+#if OS(WINDOWS)
+typedef AssemblerTargetConfiguration<DefaultPlatformMacroAssembler, WindowsSpecialization> DefaultAssemblerTargetConfiguration;
+#else
+typedef AssemblerTargetConfiguration<DefaultPlatformMacroAssembler, NoOperatingSystemSpecialization> DefaultAssemblerTargetConfiguration;
+#endif
- LookupCall(const JSC::MacroAssembler::Address &addr, uint getterSetterOffset)
- : addr(addr)
- , getterSetterOffset(getterSetterOffset)
- {}
+#elif CPU(SH4)
+typedef JSC::MacroAssemblerSH4 DefaultPlatformMacroAssembler;
+typedef AssemblerTargetConfiguration<DefaultPlatformMacroAssembler, NoOperatingSystemSpecialization> DefaultAssemblerTargetConfiguration;
+#endif
+
+#define isel_stringIfyx(s) #s
+#define isel_stringIfy(s) isel_stringIfyx(s)
+
+#define generateRuntimeCall(as, t, function, ...) \
+ as->generateFunctionCallImp(Runtime::Method_##function##_NeedsExceptionCheck, t, "Runtime::" isel_stringIfy(function), typename JITAssembler::RuntimeCall(qOffsetOf(QV4::Runtime, function)), __VA_ARGS__)
+
+
+template <typename JITAssembler, typename MacroAssembler, typename TargetPlatform, int RegisterSize>
+struct RegisterSizeDependentAssembler
+{
};
-struct RuntimeCall {
- JSC::MacroAssembler::Address addr;
+template <typename JITAssembler, typename MacroAssembler, typename TargetPlatform>
+struct RegisterSizeDependentAssembler<JITAssembler, MacroAssembler, TargetPlatform, 4>
+{
+ using RegisterID = typename JITAssembler::RegisterID;
+ using FPRegisterID = typename JITAssembler::FPRegisterID;
+ using RelationalCondition = typename JITAssembler::RelationalCondition;
+ using Address = typename JITAssembler::Address;
+ using Pointer = typename JITAssembler::Pointer;
+ using TrustedImm32 = typename JITAssembler::TrustedImm32;
+ using TrustedImm64 = typename JITAssembler::TrustedImm64;
+ using Jump = typename JITAssembler::Jump;
+
+ static void loadDouble(JITAssembler *as, Address addr, FPRegisterID dest)
+ {
+ as->MacroAssembler::loadDouble(addr, dest);
+ }
+
+ static void storeDouble(JITAssembler *as, FPRegisterID source, Address addr)
+ {
+ as->MacroAssembler::storeDouble(source, addr);
+ }
+
+ static void storeDouble(JITAssembler *as, FPRegisterID source, IR::Expr* target)
+ {
+ Pointer ptr = as->loadAddress(TargetPlatform::ScratchRegister, target);
+ as->storeDouble(source, ptr);
+ }
+
+ static void storeValue(JITAssembler *as, QV4::Primitive value, Address destination)
+ {
+ as->store32(TrustedImm32(value.int_32()), destination);
+ destination.offset += 4;
+ as->store32(TrustedImm32(value.tag()), destination);
+ }
+
+ template <typename Source, typename Destination>
+ static void copyValueViaRegisters(JITAssembler *as, Source source, Destination destination)
+ {
+ as->loadDouble(source, TargetPlatform::FPGpr0);
+ as->storeDouble(TargetPlatform::FPGpr0, destination);
+ }
+
+ static void loadDoubleConstant(JITAssembler *as, IR::Const *c, FPRegisterID target)
+ {
+ as->MacroAssembler::loadDouble(as->loadConstant(c, TargetPlatform::ScratchRegister), target);
+ }
+
+ static void storeReturnValue(JITAssembler *as, FPRegisterID dest)
+ {
+ as->moveIntsToDouble(TargetPlatform::LowReturnValueRegister, TargetPlatform::HighReturnValueRegister, dest, TargetPlatform::FPGpr0);
+ }
+
+ static void storeReturnValue(JITAssembler *as, const Pointer &dest)
+ {
+ Address destination = dest;
+ as->store32(TargetPlatform::LowReturnValueRegister, destination);
+ destination.offset += 4;
+ as->store32(TargetPlatform::HighReturnValueRegister, destination);
+ }
+
+ static void setFunctionReturnValueFromTemp(JITAssembler *as, IR::Temp *t)
+ {
+ const auto lowReg = TargetPlatform::LowReturnValueRegister;
+ const auto highReg = TargetPlatform::HighReturnValueRegister;
+
+ if (t->kind == IR::Temp::PhysicalRegister) {
+ switch (t->type) {
+ case IR::DoubleType:
+ as->moveDoubleToInts((FPRegisterID) t->index, lowReg, highReg);
+ break;
+ case IR::UInt32Type: {
+ RegisterID srcReg = (RegisterID) t->index;
+ Jump intRange = as->branch32(JITAssembler::GreaterThanOrEqual, srcReg, TrustedImm32(0));
+ as->convertUInt32ToDouble(srcReg, TargetPlatform::FPGpr0, TargetPlatform::ReturnValueRegister);
+ as->moveDoubleToInts(TargetPlatform::FPGpr0, lowReg, highReg);
+ Jump done = as->jump();
+ intRange.link(as);
+ as->move(srcReg, lowReg);
+ as->move(TrustedImm32(QV4::Value::Integer_Type_Internal), highReg);
+ done.link(as);
+ } break;
+ case IR::SInt32Type:
+ as->move((RegisterID) t->index, lowReg);
+ as->move(TrustedImm32(QV4::Value::Integer_Type_Internal), highReg);
+ break;
+ case IR::BoolType:
+ as->move((RegisterID) t->index, lowReg);
+ as->move(TrustedImm32(QV4::Value::Boolean_Type_Internal), highReg);
+ break;
+ default:
+ Q_UNREACHABLE();
+ }
+ } else {
+ Pointer addr = as->loadAddress(TargetPlatform::ScratchRegister, t);
+ as->load32(addr, lowReg);
+ addr.offset += 4;
+ as->load32(addr, highReg);
+ }
+ }
+
+ static void setFunctionReturnValueFromConst(JITAssembler *as, QV4::Primitive retVal)
+ {
+ as->move(TrustedImm32(retVal.int_32()), TargetPlatform::LowReturnValueRegister);
+ as->move(TrustedImm32(retVal.tag()), TargetPlatform::HighReturnValueRegister);
+ }
+
+ static void loadArgumentInRegister(JITAssembler *as, IR::Temp* temp, RegisterID dest, int argumentNumber)
+ {
+ Q_UNUSED(as);
+ Q_UNUSED(temp);
+ Q_UNUSED(dest);
+ Q_UNUSED(argumentNumber);
+ }
+
+ static void loadArgumentInRegister(JITAssembler *as, IR::ArgLocal* al, RegisterID dest, int argumentNumber)
+ {
+ Q_UNUSED(as);
+ Q_UNUSED(al);
+ Q_UNUSED(dest);
+ Q_UNUSED(argumentNumber);
+ }
+
+ static void loadArgumentInRegister(JITAssembler *as, IR::Const* c, RegisterID dest, int argumentNumber)
+ {
+ Q_UNUSED(as);
+ Q_UNUSED(c);
+ Q_UNUSED(dest);
+ Q_UNUSED(argumentNumber);
+ }
+
+ static void loadArgumentInRegister(JITAssembler *as, IR::Expr* expr, RegisterID dest, int argumentNumber)
+ {
+ Q_UNUSED(as);
+ Q_UNUSED(expr);
+ Q_UNUSED(dest);
+ Q_UNUSED(argumentNumber);
+ }
+
+ static void zeroRegister(JITAssembler *as, RegisterID reg)
+ {
+ as->move(TrustedImm32(0), reg);
+ }
+
+ static void zeroStackSlot(JITAssembler *as, int slot)
+ {
+ as->poke(TrustedImm32(0), slot);
+ }
+
+ static void generateCJumpOnUndefined(JITAssembler *as,
+ RelationalCondition cond, IR::Expr *right,
+ RegisterID scratchRegister, RegisterID tagRegister,
+ IR::BasicBlock *nextBlock, IR::BasicBlock *currentBlock,
+ IR::BasicBlock *trueBlock, IR::BasicBlock *falseBlock)
+ {
+ Pointer tagAddr = as->loadAddress(scratchRegister, right);
+ as->load32(tagAddr, tagRegister);
+ Jump j = as->branch32(JITAssembler::invert(cond), tagRegister, TrustedImm32(0));
+ as->addPatch(falseBlock, j);
+
+ tagAddr.offset += 4;
+ as->load32(tagAddr, tagRegister);
+ const TrustedImm32 tag(QV4::Value::Managed_Type_Internal);
+ Q_ASSERT(nextBlock == as->nextBlock());
+ Q_UNUSED(nextBlock);
+ as->generateCJumpOnCompare(cond, tagRegister, tag, currentBlock, trueBlock, falseBlock);
+ }
+
+ static void convertVarToSInt32(JITAssembler *as, IR::Expr *source, IR::Expr *target)
+ {
+ Q_ASSERT(source->type == IR::VarType);
+ // load the tag:
+ Pointer addr = as->loadAddress(TargetPlatform::ScratchRegister, source);
+ Pointer tagAddr = addr;
+ tagAddr.offset += 4;
+ as->load32(tagAddr, TargetPlatform::ReturnValueRegister);
+
+ // check if it's an int32:
+ Jump fallback = as->branch32(RelationalCondition::NotEqual, TargetPlatform::ReturnValueRegister,
+ TrustedImm32(Value::Integer_Type_Internal));
+ IR::Temp *targetTemp = target->asTemp();
+ if (!targetTemp || targetTemp->kind == IR::Temp::StackSlot) {
+ as->load32(addr, TargetPlatform::ReturnValueRegister);
+ Pointer targetAddr = as->loadAddress(TargetPlatform::ScratchRegister, target);
+ as->store32(TargetPlatform::ReturnValueRegister, targetAddr);
+ targetAddr.offset += 4;
+ as->store32(TrustedImm32(Value::Integer_Type_Internal), targetAddr);
+ } else {
+ as->load32(addr, (RegisterID) targetTemp->index);
+ }
+ Jump intDone = as->jump();
+
+ // not an int:
+ fallback.link(as);
+ generateRuntimeCall(as, TargetPlatform::ReturnValueRegister, toInt,
+ as->loadAddress(TargetPlatform::ScratchRegister, source));
+ as->storeInt32(TargetPlatform::ReturnValueRegister, target);
+
+ intDone.link(as);
+ }
+
+ static void loadManagedPointer(JITAssembler *as, RegisterID registerWithPtr, Pointer destAddr)
+ {
+ as->store32(registerWithPtr, destAddr);
+ destAddr.offset += 4;
+ as->store32(TrustedImm32(QV4::Value::Managed_Type_Internal_32), destAddr);
+ }
+
+ static Jump generateIsDoubleCheck(JITAssembler *as, RegisterID tagOrValueRegister)
+ {
+ as->and32(TrustedImm32(Value::NotDouble_Mask), tagOrValueRegister);
+ return as->branch32(RelationalCondition::NotEqual, tagOrValueRegister,
+ TrustedImm32(Value::NotDouble_Mask));
+ }
+};
+
+template <typename JITAssembler, typename MacroAssembler, typename TargetPlatform>
+struct RegisterSizeDependentAssembler<JITAssembler, MacroAssembler, TargetPlatform, 8>
+{
+ using RegisterID = typename JITAssembler::RegisterID;
+ using FPRegisterID = typename JITAssembler::FPRegisterID;
+ using Address = typename JITAssembler::Address;
+ using TrustedImm32 = typename JITAssembler::TrustedImm32;
+ using TrustedImm64 = typename JITAssembler::TrustedImm64;
+ using Pointer = typename JITAssembler::Pointer;
+ using RelationalCondition = typename JITAssembler::RelationalCondition;
+ using BranchTruncateType = typename JITAssembler::BranchTruncateType;
+ using Jump = typename JITAssembler::Jump;
+
+ static void loadDouble(JITAssembler *as, Address addr, FPRegisterID dest)
+ {
+ as->load64(addr, TargetPlatform::ReturnValueRegister);
+ as->move(TrustedImm64(QV4::Value::NaNEncodeMask), TargetPlatform::ScratchRegister);
+ as->xor64(TargetPlatform::ScratchRegister, TargetPlatform::ReturnValueRegister);
+ as->move64ToDouble(TargetPlatform::ReturnValueRegister, dest);
+ }
+
+ static void storeDouble(JITAssembler *as, FPRegisterID source, Address addr)
+ {
+ as->moveDoubleTo64(source, TargetPlatform::ReturnValueRegister);
+ as->move(TrustedImm64(QV4::Value::NaNEncodeMask), TargetPlatform::ScratchRegister);
+ as->xor64(TargetPlatform::ScratchRegister, TargetPlatform::ReturnValueRegister);
+ as->store64(TargetPlatform::ReturnValueRegister, addr);
+ }
+
+ static void storeDouble(JITAssembler *as, FPRegisterID source, IR::Expr* target)
+ {
+ as->moveDoubleTo64(source, TargetPlatform::ReturnValueRegister);
+ as->move(TrustedImm64(QV4::Value::NaNEncodeMask), TargetPlatform::ScratchRegister);
+ as->xor64(TargetPlatform::ScratchRegister, TargetPlatform::ReturnValueRegister);
+ Pointer ptr = as->loadAddress(TargetPlatform::ScratchRegister, target);
+ as->store64(TargetPlatform::ReturnValueRegister, ptr);
+ }
+
+ static void storeReturnValue(JITAssembler *as, FPRegisterID dest)
+ {
+ as->move(TrustedImm64(QV4::Value::NaNEncodeMask), TargetPlatform::ScratchRegister);
+ as->xor64(TargetPlatform::ScratchRegister, TargetPlatform::ReturnValueRegister);
+ as->move64ToDouble(TargetPlatform::ReturnValueRegister, dest);
+ }
+
+ static void storeReturnValue(JITAssembler *as, const Pointer &dest)
+ {
+ as->store64(TargetPlatform::ReturnValueRegister, dest);
+ }
+
+ static void setFunctionReturnValueFromTemp(JITAssembler *as, IR::Temp *t)
+ {
+ if (t->kind == IR::Temp::PhysicalRegister) {
+ if (t->type == IR::DoubleType) {
+ as->moveDoubleTo64((FPRegisterID) t->index,
+ TargetPlatform::ReturnValueRegister);
+ as->move(TrustedImm64(QV4::Value::NaNEncodeMask),
+ TargetPlatform::ScratchRegister);
+ as->xor64(TargetPlatform::ScratchRegister, TargetPlatform::ReturnValueRegister);
+ } else if (t->type == IR::UInt32Type) {
+ RegisterID srcReg = (RegisterID) t->index;
+ Jump intRange = as->branch32(RelationalCondition::GreaterThanOrEqual, srcReg, TrustedImm32(0));
+ as->convertUInt32ToDouble(srcReg, TargetPlatform::FPGpr0, TargetPlatform::ReturnValueRegister);
+ as->moveDoubleTo64(TargetPlatform::FPGpr0, TargetPlatform::ReturnValueRegister);
+ as->move(TrustedImm64(QV4::Value::NaNEncodeMask), TargetPlatform::ScratchRegister);
+ as->xor64(TargetPlatform::ScratchRegister, TargetPlatform::ReturnValueRegister);
+ Jump done = as->jump();
+ intRange.link(as);
+ as->zeroExtend32ToPtr(srcReg, TargetPlatform::ReturnValueRegister);
+ quint64 tag = QV4::Value::Integer_Type_Internal;
+ as->or64(TrustedImm64(tag << 32),
+ TargetPlatform::ReturnValueRegister);
+ done.link(as);
+ } else {
+ as->zeroExtend32ToPtr((RegisterID) t->index, TargetPlatform::ReturnValueRegister);
+ quint64 tag;
+ switch (t->type) {
+ case IR::SInt32Type:
+ tag = QV4::Value::Integer_Type_Internal;
+ break;
+ case IR::BoolType:
+ tag = QV4::Value::Boolean_Type_Internal;
+ break;
+ default:
+ tag = 31337; // bogus value
+ Q_UNREACHABLE();
+ }
+ as->or64(TrustedImm64(tag << 32),
+ TargetPlatform::ReturnValueRegister);
+ }
+ } else {
+ as->copyValue(TargetPlatform::ReturnValueRegister, t);
+ }
+ }
+
+ static void setFunctionReturnValueFromConst(JITAssembler *as, QV4::Primitive retVal)
+ {
+ as->move(TrustedImm64(retVal.rawValue()), TargetPlatform::ReturnValueRegister);
+ }
+
+ static void storeValue(JITAssembler *as, QV4::Primitive value, Address destination)
+ {
+ as->store64(TrustedImm64(value.rawValue()), destination);
+ }
+
+ template <typename Source, typename Destination>
+ static void copyValueViaRegisters(JITAssembler *as, Source source, Destination destination)
+ {
+ // Use ReturnValueRegister as "scratch" register because loadArgument
+ // and storeArgument are functions that may need a scratch register themselves.
+ as->loadArgumentInRegister(source, TargetPlatform::ReturnValueRegister, 0);
+ as->storeReturnValue(destination);
+ }
+
+ static void loadDoubleConstant(JITAssembler *as, IR::Const *c, FPRegisterID target)
+ {
+ Q_STATIC_ASSERT(sizeof(int64_t) == sizeof(double));
+ int64_t i;
+ memcpy(&i, &c->value, sizeof(double));
+ as->move(TrustedImm64(i), TargetPlatform::ReturnValueRegister);
+ as->move64ToDouble(TargetPlatform::ReturnValueRegister, target);
+ }
+
+ static void loadArgumentInRegister(JITAssembler *as, IR::Temp* temp, RegisterID dest, int argumentNumber)
+ {
+ Q_UNUSED(argumentNumber);
+
+ if (temp) {
+ Pointer addr = as->loadTempAddress(temp);
+ as->load64(addr, dest);
+ } else {
+ QV4::Value undefined = QV4::Primitive::undefinedValue();
+ as->move(TrustedImm64(undefined.rawValue()), dest);
+ }
+ }
+
+ static void loadArgumentInRegister(JITAssembler *as, IR::ArgLocal* al, RegisterID dest, int argumentNumber)
+ {
+ Q_UNUSED(argumentNumber);
+
+ if (al) {
+ Pointer addr = as->loadArgLocalAddress(dest, al);
+ as->load64(addr, dest);
+ } else {
+ QV4::Value undefined = QV4::Primitive::undefinedValue();
+ as->move(TrustedImm64(undefined.rawValue()), dest);
+ }
+ }
+
+ static void loadArgumentInRegister(JITAssembler *as, IR::Const* c, RegisterID dest, int argumentNumber)
+ {
+ Q_UNUSED(argumentNumber);
+
+ QV4::Value v = convertToValue(c);
+ as->move(TrustedImm64(v.rawValue()), dest);
+ }
+
+ static void loadArgumentInRegister(JITAssembler *as, IR::Expr* expr, RegisterID dest, int argumentNumber)
+ {
+ Q_UNUSED(argumentNumber);
+
+ if (!expr) {
+ QV4::Value undefined = QV4::Primitive::undefinedValue();
+ as->move(TrustedImm64(undefined.rawValue()), dest);
+ } else if (IR::Temp *t = expr->asTemp()){
+ loadArgumentInRegister(as, t, dest, argumentNumber);
+ } else if (IR::ArgLocal *al = expr->asArgLocal()) {
+ loadArgumentInRegister(as, al, dest, argumentNumber);
+ } else if (IR::Const *c = expr->asConst()) {
+ loadArgumentInRegister(as, c, dest, argumentNumber);
+ } else {
+ Q_ASSERT(!"unimplemented expression type in loadArgument");
+ }
+ }
+
+ static void zeroRegister(JITAssembler *as, RegisterID reg)
+ {
+ as->move(TrustedImm64(0), reg);
+ }
- inline RuntimeCall(uint offset = uint(INT_MIN));
- bool isValid() const { return addr.offset >= 0; }
+ static void zeroStackSlot(JITAssembler *as, int slot)
+ {
+ as->store64(TrustedImm64(0), as->addressForPoke(slot));
+ }
+
+ static void generateCJumpOnCompare(JITAssembler *as,
+ RelationalCondition cond,
+ RegisterID left,
+ TrustedImm64 right,
+ IR::BasicBlock *nextBlock,
+ IR::BasicBlock *currentBlock,
+ IR::BasicBlock *trueBlock,
+ IR::BasicBlock *falseBlock)
+ {
+ if (trueBlock == nextBlock) {
+ Jump target = as->branch64(as->invert(cond), left, right);
+ as->addPatch(falseBlock, target);
+ } else {
+ Jump target = as->branch64(cond, left, right);
+ as->addPatch(trueBlock, target);
+ as->jumpToBlock(currentBlock, falseBlock);
+ }
+ }
+
+ static void generateCJumpOnUndefined(JITAssembler *as,
+ RelationalCondition cond, IR::Expr *right,
+ RegisterID scratchRegister, RegisterID tagRegister,
+ IR::BasicBlock *nextBlock, IR::BasicBlock *currentBlock,
+ IR::BasicBlock *trueBlock, IR::BasicBlock *falseBlock)
+ {
+ Pointer addr = as->loadAddress(scratchRegister, right);
+ as->load64(addr, tagRegister);
+ const TrustedImm64 tag(0);
+ generateCJumpOnCompare(as, cond, tagRegister, tag, nextBlock, currentBlock, trueBlock, falseBlock);
+ }
+
+ static void convertVarToSInt32(JITAssembler *as, IR::Expr *source, IR::Expr *target)
+ {
+ Q_ASSERT(source->type == IR::VarType);
+ Pointer addr = as->loadAddress(TargetPlatform::ScratchRegister, source);
+ as->load64(addr, TargetPlatform::ScratchRegister);
+ as->move(TargetPlatform::ScratchRegister, TargetPlatform::ReturnValueRegister);
+
+ // check if it's integer convertible
+ as->urshift64(TrustedImm32(QV4::Value::IsIntegerConvertible_Shift), TargetPlatform::ScratchRegister);
+ Jump isIntConvertible = as->branch32(RelationalCondition::Equal, TargetPlatform::ScratchRegister, TrustedImm32(3));
+
+ // nope, not integer convertible, so check for a double:
+ as->urshift64(TrustedImm32(
+ QV4::Value::IsDoubleTag_Shift - QV4::Value::IsIntegerConvertible_Shift),
+ TargetPlatform::ScratchRegister);
+ Jump fallback = as->branch32(RelationalCondition::GreaterThan, TargetPlatform::ScratchRegister, TrustedImm32(0));
+
+ // it's a double
+ as->move(TrustedImm64(QV4::Value::NaNEncodeMask), TargetPlatform::ScratchRegister);
+ as->xor64(TargetPlatform::ScratchRegister, TargetPlatform::ReturnValueRegister);
+ as->move64ToDouble(TargetPlatform::ReturnValueRegister, TargetPlatform::FPGpr0);
+ Jump success =
+ as->branchTruncateDoubleToInt32(TargetPlatform::FPGpr0, TargetPlatform::ReturnValueRegister,
+ BranchTruncateType::BranchIfTruncateSuccessful);
+
+ // not an int:
+ fallback.link(as);
+ generateRuntimeCall(as, TargetPlatform::ReturnValueRegister, toInt,
+ as->loadAddress(TargetPlatform::ScratchRegister, source));
+
+
+ isIntConvertible.link(as);
+ success.link(as);
+ IR::Temp *targetTemp = target->asTemp();
+ if (!targetTemp || targetTemp->kind == IR::Temp::StackSlot) {
+ Pointer targetAddr = as->loadAddress(TargetPlatform::ScratchRegister, target);
+ as->store32(TargetPlatform::ReturnValueRegister, targetAddr);
+ targetAddr.offset += 4;
+ as->store32(TrustedImm32(Value::Integer_Type_Internal), targetAddr);
+ } else {
+ as->storeInt32(TargetPlatform::ReturnValueRegister, target);
+ }
+ }
+
+ static void loadManagedPointer(JITAssembler *as, RegisterID registerWithPtr, Pointer destAddr)
+ {
+ as->store64(registerWithPtr, destAddr);
+ }
+
+ static Jump generateIsDoubleCheck(JITAssembler *as, RegisterID tagOrValueRegister)
+ {
+ as->rshift32(TrustedImm32(Value::IsDoubleTag_Shift), tagOrValueRegister);
+ return as->branch32(RelationalCondition::NotEqual, tagOrValueRegister,
+ TrustedImm32(0));
+ }
};
-class Assembler : public JSC::MacroAssembler, public TargetPlatform
+template <typename TargetConfiguration>
+class Assembler : public TargetConfiguration::MacroAssembler, public TargetConfiguration::Platform
{
Q_DISABLE_COPY(Assembler)
public:
Assembler(QV4::Compiler::JSUnitGenerator *jsGenerator, IR::Function* function, QV4::ExecutableAllocator *executableAllocator);
+ using MacroAssembler = typename TargetConfiguration::MacroAssembler;
+ using RegisterID = typename MacroAssembler::RegisterID;
+ using FPRegisterID = typename MacroAssembler::FPRegisterID;
+ using Address = typename MacroAssembler::Address;
+ using Label = typename MacroAssembler::Label;
+ using Jump = typename MacroAssembler::Jump;
+ using DataLabelPtr = typename MacroAssembler::DataLabelPtr;
+ using TrustedImm32 = typename MacroAssembler::TrustedImm32;
+ using TrustedImm64 = typename MacroAssembler::TrustedImm64;
+ using TrustedImmPtr = typename MacroAssembler::TrustedImmPtr;
+ using RelationalCondition = typename MacroAssembler::RelationalCondition;
+ using typename MacroAssembler::DoubleCondition;
+ using MacroAssembler::label;
+ using MacroAssembler::move;
+ using MacroAssembler::jump;
+ using MacroAssembler::add32;
+ using MacroAssembler::and32;
+ using MacroAssembler::store32;
+ using MacroAssembler::loadPtr;
+ using MacroAssembler::load32;
+ using MacroAssembler::branch32;
+ using MacroAssembler::subDouble;
+ using MacroAssembler::subPtr;
+ using MacroAssembler::addPtr;
+ using MacroAssembler::call;
+ using MacroAssembler::poke;
+ using MacroAssembler::branchTruncateDoubleToUint32;
+ using MacroAssembler::or32;
+ using MacroAssembler::moveDouble;
+ using MacroAssembler::convertUInt32ToDouble;
+ using MacroAssembler::invert;
+ using MacroAssembler::convertInt32ToDouble;
+ using MacroAssembler::rshift32;
+ using MacroAssembler::storePtr;
+ using MacroAssembler::ret;
+
+ using JITTargetPlatform = typename TargetConfiguration::Platform;
+ using JITTargetPlatform::RegisterArgumentCount;
+ using JITTargetPlatform::StackSpaceAllocatedUponFunctionEntry;
+ using JITTargetPlatform::RegisterSize;
+ using JITTargetPlatform::StackAlignment;
+ using JITTargetPlatform::ReturnValueRegister;
+ using JITTargetPlatform::StackPointerRegister;
+ using JITTargetPlatform::ScratchRegister;
+ using JITTargetPlatform::EngineRegister;
+ using JITTargetPlatform::StackShadowSpace;
+ using JITTargetPlatform::registerForArgument;
+ using JITTargetPlatform::FPGpr0;
+ using JITTargetPlatform::platformEnterStandardStackFrame;
+ using JITTargetPlatform::platformLeaveStandardStackFrame;
+
+ using RegisterSizeDependentOps = RegisterSizeDependentAssembler<Assembler<TargetConfiguration>, MacroAssembler, JITTargetPlatform, RegisterSize>;
+
+ struct LookupCall {
+ Address addr;
+ uint getterSetterOffset;
+
+ LookupCall(const Address &addr, uint getterSetterOffset)
+ : addr(addr)
+ , getterSetterOffset(getterSetterOffset)
+ {}
+ };
+
+ struct RuntimeCall {
+ Address addr;
+
+ inline RuntimeCall(uint offset = uint(INT_MIN));
+ bool isValid() const { return addr.offset >= 0; }
+ };
+
// Explicit type to allow distinguishing between
// pushing an address itself or the value it points
// to onto the stack when calling functions.
@@ -319,20 +911,29 @@ public:
void addPatch(DataLabelPtr patch, IR::BasicBlock *target);
void generateCJumpOnNonZero(RegisterID reg, IR::BasicBlock *currentBlock,
IR::BasicBlock *trueBlock, IR::BasicBlock *falseBlock);
-#ifdef QV4_USE_64_BIT_VALUE_ENCODING
- void generateCJumpOnCompare(RelationalCondition cond, RegisterID left, TrustedImm64 right,
- IR::BasicBlock *currentBlock, IR::BasicBlock *trueBlock,
- IR::BasicBlock *falseBlock);
-#endif
void generateCJumpOnCompare(RelationalCondition cond, RegisterID left, TrustedImm32 right,
IR::BasicBlock *currentBlock, IR::BasicBlock *trueBlock,
IR::BasicBlock *falseBlock);
void generateCJumpOnCompare(RelationalCondition cond, RegisterID left, RegisterID right,
IR::BasicBlock *currentBlock, IR::BasicBlock *trueBlock,
IR::BasicBlock *falseBlock);
- Jump genTryDoubleConversion(IR::Expr *src, Assembler::FPRegisterID dest);
- Assembler::Jump branchDouble(bool invertCondition, IR::AluOp op, IR::Expr *left, IR::Expr *right);
- Assembler::Jump branchInt32(bool invertCondition, IR::AluOp op, IR::Expr *left, IR::Expr *right);
+ void generateCJumpOnUndefined(RelationalCondition cond, IR::Expr *right,
+ RegisterID scratchRegister, RegisterID tagRegister,
+ IR::BasicBlock *currentBlock, IR::BasicBlock *trueBlock,
+ IR::BasicBlock *falseBlock)
+ {
+ RegisterSizeDependentOps::generateCJumpOnUndefined(this, cond, right, scratchRegister, tagRegister,
+ _nextBlock, currentBlock, trueBlock, falseBlock);
+ }
+
+ Jump generateIsDoubleCheck(RegisterID tagOrValueRegister)
+ {
+ return RegisterSizeDependentOps::generateIsDoubleCheck(this, tagOrValueRegister);
+ }
+
+ Jump genTryDoubleConversion(IR::Expr *src, FPRegisterID dest);
+ Jump branchDouble(bool invertCondition, IR::AluOp op, IR::Expr *left, IR::Expr *right);
+ Jump branchInt32(bool invertCondition, IR::AluOp op, IR::Expr *left, IR::Expr *right);
Pointer loadAddress(RegisterID tmp, IR::Expr *t);
Pointer loadTempAddress(IR::Temp *t);
@@ -396,7 +997,7 @@ public:
void loadArgumentInRegister(PointerToValue temp, RegisterID dest, int argumentNumber)
{
if (!temp.value) {
- move(TrustedImmPtr(0), dest);
+ RegisterSizeDependentOps::zeroRegister(this, dest);
} else {
Pointer addr = toAddress(dest, temp.value, argumentNumber);
loadArgumentInRegister(addr, dest, argumentNumber);
@@ -415,70 +1016,31 @@ public:
loadArgumentInRegister(addr, dest, argumentNumber);
}
-#ifdef VALUE_FITS_IN_REGISTER
void loadArgumentInRegister(IR::Temp* temp, RegisterID dest, int argumentNumber)
{
- Q_UNUSED(argumentNumber);
-
- if (temp) {
- Pointer addr = loadTempAddress(temp);
- load64(addr, dest);
- } else {
- QV4::Value undefined = QV4::Primitive::undefinedValue();
- move(TrustedImm64(undefined.rawValue()), dest);
- }
+ RegisterSizeDependentOps::loadArgumentInRegister(this, temp, dest, argumentNumber);
}
void loadArgumentInRegister(IR::ArgLocal* al, RegisterID dest, int argumentNumber)
{
- Q_UNUSED(argumentNumber);
-
- if (al) {
- Pointer addr = loadArgLocalAddress(dest, al);
- load64(addr, dest);
- } else {
- QV4::Value undefined = QV4::Primitive::undefinedValue();
- move(TrustedImm64(undefined.rawValue()), dest);
- }
+ RegisterSizeDependentOps::loadArgumentInRegister(this, al, dest, argumentNumber);
}
void loadArgumentInRegister(IR::Const* c, RegisterID dest, int argumentNumber)
{
- Q_UNUSED(argumentNumber);
-
- QV4::Value v = convertToValue(c);
- move(TrustedImm64(v.rawValue()), dest);
+ RegisterSizeDependentOps::loadArgumentInRegister(this, c, dest, argumentNumber);
}
void loadArgumentInRegister(IR::Expr* expr, RegisterID dest, int argumentNumber)
{
- Q_UNUSED(argumentNumber);
-
- if (!expr) {
- QV4::Value undefined = QV4::Primitive::undefinedValue();
- move(TrustedImm64(undefined.rawValue()), dest);
- } else if (IR::Temp *t = expr->asTemp()){
- loadArgumentInRegister(t, dest, argumentNumber);
- } else if (IR::ArgLocal *al = expr->asArgLocal()) {
- loadArgumentInRegister(al, dest, argumentNumber);
- } else if (IR::Const *c = expr->asConst()) {
- loadArgumentInRegister(c, dest, argumentNumber);
- } else {
- Q_ASSERT(!"unimplemented expression type in loadArgument");
- }
- }
-#else
- void loadArgumentInRegister(IR::Expr*, RegisterID)
- {
- Q_ASSERT(!"unimplemented: expression in loadArgument");
+ RegisterSizeDependentOps::loadArgumentInRegister(this, expr, dest, argumentNumber);
}
-#endif
void loadArgumentInRegister(TrustedImm32 imm32, RegisterID dest, int argumentNumber)
{
Q_UNUSED(argumentNumber);
- xorPtr(dest, dest);
+ RegisterSizeDependentOps::zeroRegister(this, dest);
if (imm32.m_value)
move(imm32, dest);
}
@@ -499,55 +1061,13 @@ public:
void storeReturnValue(FPRegisterID dest)
{
-#ifdef VALUE_FITS_IN_REGISTER
- move(TrustedImm64(QV4::Value::NaNEncodeMask), ScratchRegister);
- xor64(ScratchRegister, ReturnValueRegister);
- move64ToDouble(ReturnValueRegister, dest);
-#elif defined(Q_PROCESSOR_ARM)
- moveIntsToDouble(JSC::ARMRegisters::r0, JSC::ARMRegisters::r1, dest, FPGpr0);
-#elif defined(Q_PROCESSOR_X86)
- moveIntsToDouble(JSC::X86Registers::eax, JSC::X86Registers::edx, dest, FPGpr0);
-#elif defined(Q_PROCESSOR_MIPS)
- moveIntsToDouble(JSC::MIPSRegisters::v0, JSC::MIPSRegisters::v1, dest, FPGpr0);
-#else
- subPtr(TrustedImm32(sizeof(QV4::Value)), StackPointerRegister);
- Pointer tmp(StackPointerRegister, 0);
- storeReturnValue(tmp);
- loadDouble(tmp, dest);
- addPtr(TrustedImm32(sizeof(QV4::Value)), StackPointerRegister);
-#endif
+ RegisterSizeDependentOps::storeReturnValue(this, dest);
}
-#ifdef VALUE_FITS_IN_REGISTER
- void storeReturnValue(const Pointer &dest)
- {
- store64(ReturnValueRegister, dest);
- }
-#elif defined(Q_PROCESSOR_X86)
- void storeReturnValue(const Pointer &dest)
- {
- Pointer destination = dest;
- store32(JSC::X86Registers::eax, destination);
- destination.offset += 4;
- store32(JSC::X86Registers::edx, destination);
- }
-#elif defined(Q_PROCESSOR_ARM)
- void storeReturnValue(const Pointer &dest)
- {
- Pointer destination = dest;
- store32(JSC::ARMRegisters::r0, destination);
- destination.offset += 4;
- store32(JSC::ARMRegisters::r1, destination);
- }
-#elif defined(Q_PROCESSOR_MIPS)
void storeReturnValue(const Pointer &dest)
{
- Pointer destination = dest;
- store32(JSC::MIPSRegisters::v0, destination);
- destination.offset += 4;
- store32(JSC::MIPSRegisters::v1, destination);
+ RegisterSizeDependentOps::storeReturnValue(this, dest);
}
-#endif
void storeReturnValue(IR::Expr *target)
{
@@ -609,7 +1129,7 @@ public:
Pointer ptr = toAddress(ScratchRegister, temp.value, argumentNumber);
loadArgumentOnStack<StackSlot>(ptr, argumentNumber);
} else {
- poke(TrustedImmPtr(0), StackSlot);
+ RegisterSizeDependentOps::zeroStackSlot(this, StackSlot);
}
}
@@ -648,38 +1168,18 @@ public:
moveDouble(source, (FPRegisterID) targetTemp->index);
return;
}
-#ifdef QV4_USE_64_BIT_VALUE_ENCODING
- moveDoubleTo64(source, ReturnValueRegister);
- move(TrustedImm64(QV4::Value::NaNEncodeMask), ScratchRegister);
- xor64(ScratchRegister, ReturnValueRegister);
- Pointer ptr = loadAddress(ScratchRegister, target);
- store64(ReturnValueRegister, ptr);
-#else
- Pointer ptr = loadAddress(ScratchRegister, target);
- storeDouble(source, ptr);
-#endif
+ RegisterSizeDependentOps::storeDouble(this, source, target);
}
-#ifdef QV4_USE_64_BIT_VALUE_ENCODING
- // We need to (de)mangle the double
+
void loadDouble(Address addr, FPRegisterID dest)
{
- load64(addr, ReturnValueRegister);
- move(TrustedImm64(QV4::Value::NaNEncodeMask), ScratchRegister);
- xor64(ScratchRegister, ReturnValueRegister);
- move64ToDouble(ReturnValueRegister, dest);
+ RegisterSizeDependentOps::loadDouble(this, addr, dest);
}
void storeDouble(FPRegisterID source, Address addr)
{
- moveDoubleTo64(source, ReturnValueRegister);
- move(TrustedImm64(QV4::Value::NaNEncodeMask), ScratchRegister);
- xor64(ScratchRegister, ReturnValueRegister);
- store64(ReturnValueRegister, addr);
+ RegisterSizeDependentOps::storeDouble(this, source, addr);
}
-#else
- using JSC::MacroAssembler::loadDouble;
- using JSC::MacroAssembler::storeDouble;
-#endif
template <typename Result, typename Source>
void copyValue(Result result, Source source);
@@ -691,8 +1191,15 @@ public:
{
Q_ASSERT(!source->asTemp() || source->asTemp()->kind != IR::Temp::PhysicalRegister);
Q_ASSERT(target.base != scratchRegister);
- JSC::MacroAssembler::loadDouble(loadAddress(scratchRegister, source), FPGpr0);
- JSC::MacroAssembler::storeDouble(FPGpr0, target);
+ TargetConfiguration::MacroAssembler::loadDouble(loadAddress(scratchRegister, source), FPGpr0);
+ TargetConfiguration::MacroAssembler::storeDouble(FPGpr0, target);
+ }
+
+ // The scratch register is used to calculate the temp address for the source.
+ void memcopyValue(IR::Expr *target, Pointer source, FPRegisterID fpScratchRegister, RegisterID scratchRegister)
+ {
+ TargetConfiguration::MacroAssembler::loadDouble(source, fpScratchRegister);
+ TargetConfiguration::MacroAssembler::storeDouble(fpScratchRegister, loadAddress(scratchRegister, target));
}
void storeValue(QV4::Primitive value, RegisterID destination)
@@ -704,13 +1211,7 @@ public:
void storeValue(QV4::Primitive value, Address destination)
{
-#ifdef VALUE_FITS_IN_REGISTER
- store64(TrustedImm64(value.rawValue()), destination);
-#else
- store32(TrustedImm32(value.int_32()), destination);
- destination.offset += 4;
- store32(TrustedImm32(value.tag()), destination);
-#endif
+ RegisterSizeDependentOps::storeValue(this, value, destination);
}
void storeValue(QV4::Primitive value, IR::Expr* temp);
@@ -722,7 +1223,7 @@ public:
void checkException() {
load32(Address(EngineRegister, qOffsetOf(QV4::ExecutionEngine, hasException)), ScratchRegister);
- Jump exceptionThrown = branch32(NotEqual, ScratchRegister, TrustedImm32(0));
+ Jump exceptionThrown = branch32(RelationalCondition::NotEqual, ScratchRegister, TrustedImm32(0));
if (catchBlock)
addPatch(catchBlock, exceptionThrown);
else
@@ -781,6 +1282,27 @@ public:
enum { Size = 0 };
};
+ template <typename T> bool prepareCall(T &)
+ { return true; }
+
+ bool prepareCall(LookupCall &lookupCall)
+ {
+ // IMPORTANT! See generateLookupCall in qv4isel_masm_p.h for details!
+
+ // load the table from the context
+ loadPtr(Address(EngineRegister, qOffsetOf(QV4::ExecutionEngine, current)), ScratchRegister);
+ loadPtr(Address(ScratchRegister, qOffsetOf(QV4::Heap::ExecutionContext, lookups)),
+ lookupCall.addr.base);
+ // pre-calculate the indirect address for the lookupCall table:
+ if (lookupCall.addr.offset)
+ addPtr(TrustedImm32(lookupCall.addr.offset), lookupCall.addr.base);
+ // store it as the first argument
+ loadArgumentOnStackOrRegister<0>(lookupCall.addr.base);
+ // set the destination addresses offset to the getterSetterOffset. The base is the lookupCall table's address
+ lookupCall.addr.offset = lookupCall.getterSetterOffset;
+ return false;
+ }
+
template <typename ArgRet, typename Callable, typename Arg1, typename Arg2, typename Arg3, typename Arg4, typename Arg5, typename Arg6>
void generateFunctionCallImp(bool needsExceptionCheck, ArgRet r, const char* functionName, Callable function, Arg1 arg1, Arg2 arg2, Arg3 arg3, Arg4 arg4, Arg5 arg5, Arg6 arg6)
{
@@ -813,11 +1335,11 @@ public:
loadArgumentOnStackOrRegister<2>(arg3);
loadArgumentOnStackOrRegister<1>(arg2);
- if (prepareCall(function, this))
+ if (prepareCall(function))
loadArgumentOnStackOrRegister<0>(arg1);
#ifdef RESTORE_EBX_ON_CALL
- load32(ebxAddressOnStack(), JSC::X86Registers::ebx); // restore the GOT ptr
+ load32(this->ebxAddressOnStack(), JSC::X86Registers::ebx); // restore the GOT ptr
#endif
callAbsolute(functionName, function);
@@ -957,7 +1479,7 @@ public:
void storeUInt32(RegisterID reg, Pointer addr)
{
// The UInt32 representation in QV4::Value is really convoluted. See also toUInt32Register.
- Jump intRange = branch32(GreaterThanOrEqual, reg, TrustedImm32(0));
+ Jump intRange = branch32(RelationalCondition::GreaterThanOrEqual, reg, TrustedImm32(0));
convertUInt32ToDouble(reg, FPGpr0, ReturnValueRegister);
storeDouble(FPGpr0, addr);
Jump done = jump();
@@ -980,15 +1502,7 @@ public:
FPRegisterID toDoubleRegister(IR::Expr *e, FPRegisterID target = FPGpr0)
{
if (IR::Const *c = e->asConst()) {
-#ifdef QV4_USE_64_BIT_VALUE_ENCODING
- Q_STATIC_ASSERT(sizeof(int64_t) == sizeof(double));
- int64_t i;
- memcpy(&i, &c->value, sizeof(double));
- move(TrustedImm64(i), ReturnValueRegister);
- move64ToDouble(ReturnValueRegister, target);
-#else
- JSC::MacroAssembler::loadDouble(loadConstant(c, ScratchRegister), target);
-#endif
+ RegisterSizeDependentOps::loadDoubleConstant(this, c, target);
return target;
}
@@ -1047,7 +1561,7 @@ public:
Pointer tagAddr = addr;
tagAddr.offset += 4;
load32(tagAddr, scratchReg);
- Jump inIntRange = branch32(Equal, scratchReg, TrustedImm32(QV4::Value::Integer_Type_Internal));
+ Jump inIntRange = branch32(RelationalCondition::Equal, scratchReg, TrustedImm32(QV4::Value::Integer_Type_Internal));
// it's not in signed int range, so load it as a double, and truncate it down
loadDouble(addr, FPGpr0);
@@ -1065,6 +1579,8 @@ public:
return scratchReg;
}
+ void returnFromFunction(IR::Ret *s, RegisterInformation regularRegistersToSave, RegisterInformation fpRegistersToSave);
+
JSC::MacroAssemblerCodeRef link(int *codeSize);
void setStackLayout(int maxArgCountForBuiltins, int regularRegistersToSave, int fpRegistersToSave);
@@ -1095,22 +1611,16 @@ private:
QV4::Compiler::JSUnitGenerator *_jsGenerator;
};
+template <typename TargetConfiguration>
template <typename Result, typename Source>
-void Assembler::copyValue(Result result, Source source)
+void Assembler<TargetConfiguration>::copyValue(Result result, Source source)
{
-#ifdef VALUE_FITS_IN_REGISTER
- // Use ReturnValueRegister as "scratch" register because loadArgument
- // and storeArgument are functions that may need a scratch register themselves.
- loadArgumentInRegister(source, ReturnValueRegister, 0);
- storeReturnValue(result);
-#else
- loadDouble(source, FPGpr0);
- storeDouble(FPGpr0, result);
-#endif
+ RegisterSizeDependentOps::copyValueViaRegisters(this, source, result);
}
+template <typename TargetConfiguration>
template <typename Result>
-void Assembler::copyValue(Result result, IR::Expr* source)
+void Assembler<TargetConfiguration>::copyValue(Result result, IR::Expr* source)
{
if (source->type == IR::BoolType) {
RegisterID reg = toInt32Register(source, ScratchRegister);
@@ -1124,15 +1634,7 @@ void Assembler::copyValue(Result result, IR::Expr* source)
} else if (source->type == IR::DoubleType) {
storeDouble(toDoubleRegister(source), result);
} else if (source->asTemp() || source->asArgLocal()) {
-#ifdef VALUE_FITS_IN_REGISTER
- // Use ReturnValueRegister as "scratch" register because loadArgument
- // and storeArgument are functions that may need a scratch register themselves.
- loadArgumentInRegister(source, ReturnValueRegister, 0);
- storeReturnValue(result);
-#else
- loadDouble(source, FPGpr0);
- storeDouble(FPGpr0, result);
-#endif
+ RegisterSizeDependentOps::copyValueViaRegisters(this, source, result);
} else if (IR::Const *c = source->asConst()) {
QV4::Primitive v = convertToValue(c);
storeValue(v, result);
@@ -1141,34 +1643,12 @@ void Assembler::copyValue(Result result, IR::Expr* source)
}
}
-inline RuntimeCall::RuntimeCall(uint offset)
+template <typename TargetConfiguration>
+inline Assembler<TargetConfiguration>::RuntimeCall::RuntimeCall(uint offset)
: addr(Assembler::EngineRegister, offset + qOffsetOf(QV4::ExecutionEngine, runtime))
{
}
-
-
-template <typename T> inline bool prepareCall(T &, Assembler *)
-{ return true; }
-
-template <> inline bool prepareCall(LookupCall &lookupCall, Assembler *as)
-{
- // IMPORTANT! See generateLookupCall in qv4isel_masm_p.h for details!
-
- // load the table from the context
- as->loadPtr(Assembler::Address(Assembler::EngineRegister, qOffsetOf(QV4::ExecutionEngine, current)), Assembler::ScratchRegister);
- as->loadPtr(Assembler::Address(Assembler::ScratchRegister, qOffsetOf(QV4::Heap::ExecutionContext, lookups)),
- lookupCall.addr.base);
- // pre-calculate the indirect address for the lookupCall table:
- if (lookupCall.addr.offset)
- as->addPtr(Assembler::TrustedImm32(lookupCall.addr.offset), lookupCall.addr.base);
- // store it as the first argument
- as->loadArgumentOnStackOrRegister<0>(lookupCall.addr.base);
- // set the destination addresses offset to the getterSetterOffset. The base is the lookupCall table's address
- lookupCall.addr.offset = lookupCall.getterSetterOffset;
- return false;
-}
-
} // end of namespace JIT
} // end of namespace QV4
diff --git a/src/qml/jit/qv4binop.cpp b/src/qml/jit/qv4binop.cpp
index d2758c4a47..8468bf65a6 100644
--- a/src/qml/jit/qv4binop.cpp
+++ b/src/qml/jit/qv4binop.cpp
@@ -57,7 +57,8 @@ using namespace JIT;
#define NULL_OP \
{ 0, 0, 0, 0, 0, false }
-const Binop::OpInfo Binop::operations[IR::LastAluOp + 1] = {
+template <typename JITAssembler>
+const typename Binop<JITAssembler>::OpInfo Binop<JITAssembler>::operations[IR::LastAluOp + 1] = {
NULL_OP, // OpInvalid
NULL_OP, // OpIfTrue
NULL_OP, // OpNot
@@ -67,20 +68,20 @@ const Binop::OpInfo Binop::operations[IR::LastAluOp + 1] = {
NULL_OP, // OpIncrement
NULL_OP, // OpDecrement
- INLINE_OP(bitAnd, &Binop::inline_and32, &Binop::inline_and32), // OpBitAnd
- INLINE_OP(bitOr, &Binop::inline_or32, &Binop::inline_or32), // OpBitOr
- INLINE_OP(bitXor, &Binop::inline_xor32, &Binop::inline_xor32), // OpBitXor
+ INLINE_OP(bitAnd, &Binop<JITAssembler>::inline_and32, &Binop<JITAssembler>::inline_and32), // OpBitAnd
+ INLINE_OP(bitOr, &Binop<JITAssembler>::inline_or32, &Binop<JITAssembler>::inline_or32), // OpBitOr
+ INLINE_OP(bitXor, &Binop<JITAssembler>::inline_xor32, &Binop<JITAssembler>::inline_xor32), // OpBitXor
- INLINE_OPCONTEXT(add, &Binop::inline_add32, &Binop::inline_add32), // OpAdd
- INLINE_OP(sub, &Binop::inline_sub32, &Binop::inline_sub32), // OpSub
- INLINE_OP(mul, &Binop::inline_mul32, &Binop::inline_mul32), // OpMul
+ INLINE_OPCONTEXT(add, &Binop<JITAssembler>::inline_add32, &Binop<JITAssembler>::inline_add32), // OpAdd
+ INLINE_OP(sub, &Binop<JITAssembler>::inline_sub32, &Binop<JITAssembler>::inline_sub32), // OpSub
+ INLINE_OP(mul, &Binop<JITAssembler>::inline_mul32, &Binop<JITAssembler>::inline_mul32), // OpMul
OP(div), // OpDiv
OP(mod), // OpMod
- INLINE_OP(shl, &Binop::inline_shl32, &Binop::inline_shl32), // OpLShift
- INLINE_OP(shr, &Binop::inline_shr32, &Binop::inline_shr32), // OpRShift
- INLINE_OP(ushr, &Binop::inline_ushr32, &Binop::inline_ushr32), // OpURShift
+ INLINE_OP(shl, &Binop<JITAssembler>::inline_shl32, &Binop<JITAssembler>::inline_shl32), // OpLShift
+ INLINE_OP(shr, &Binop<JITAssembler>::inline_shr32, &Binop<JITAssembler>::inline_shr32), // OpRShift
+ INLINE_OP(ushr, &Binop<JITAssembler>::inline_ushr32, &Binop<JITAssembler>::inline_ushr32), // OpURShift
OP(greaterThan), // OpGt
OP(lessThan), // OpLt
@@ -100,7 +101,8 @@ const Binop::OpInfo Binop::operations[IR::LastAluOp + 1] = {
-void Binop::generate(IR::Expr *lhs, IR::Expr *rhs, IR::Expr *target)
+template <typename JITAssembler>
+void Binop<JITAssembler>::generate(IR::Expr *lhs, IR::Expr *rhs, IR::Expr *target)
{
if (op != IR::OpMod
&& lhs->type == IR::DoubleType && rhs->type == IR::DoubleType) {
@@ -125,15 +127,15 @@ void Binop::generate(IR::Expr *lhs, IR::Expr *rhs, IR::Expr *target)
info = stringAdd;
}
- RuntimeCall fallBack(info.fallbackImplementation);
- RuntimeCall context(info.contextImplementation);
+ typename JITAssembler::RuntimeCall fallBack(info.fallbackImplementation);
+ typename JITAssembler::RuntimeCall context(info.contextImplementation);
if (fallBack.isValid()) {
as->generateFunctionCallImp(info.needsExceptionCheck, target, info.name, fallBack,
PointerToValue(lhs),
PointerToValue(rhs));
} else if (context.isValid()) {
as->generateFunctionCallImp(info.needsExceptionCheck, target, info.name, context,
- Assembler::EngineRegister,
+ JITAssembler::EngineRegister,
PointerToValue(lhs),
PointerToValue(rhs));
} else {
@@ -145,14 +147,15 @@ void Binop::generate(IR::Expr *lhs, IR::Expr *rhs, IR::Expr *target)
}
-void Binop::doubleBinop(IR::Expr *lhs, IR::Expr *rhs, IR::Expr *target)
+template <typename JITAssembler>
+void Binop<JITAssembler>::doubleBinop(IR::Expr *lhs, IR::Expr *rhs, IR::Expr *target)
{
IR::Temp *targetTemp = target->asTemp();
FPRegisterID targetReg;
if (targetTemp && targetTemp->kind == IR::Temp::PhysicalRegister)
targetReg = (FPRegisterID) targetTemp->index;
else
- targetReg = Assembler::FPGpr0;
+ targetReg = JITAssembler::FPGpr0;
switch (op) {
case IR::OpAdd:
@@ -162,7 +165,7 @@ void Binop::doubleBinop(IR::Expr *lhs, IR::Expr *rhs, IR::Expr *target)
#if CPU(X86)
if (IR::Const *c = rhs->asConst()) { // Y = X + constant -> Y = X; Y += [constant-address]
as->moveDouble(as->toDoubleRegister(lhs, targetReg), targetReg);
- Address addr = as->loadConstant(c, Assembler::ScratchRegister);
+ Address addr = as->loadConstant(c, JITAssembler::ScratchRegister);
as->addDouble(addr, targetReg);
break;
}
@@ -174,7 +177,7 @@ void Binop::doubleBinop(IR::Expr *lhs, IR::Expr *rhs, IR::Expr *target)
}
}
#endif
- as->addDouble(as->toDoubleRegister(lhs, Assembler::FPGpr0), as->toDoubleRegister(rhs, Assembler::FPGpr1), targetReg);
+ as->addDouble(as->toDoubleRegister(lhs, JITAssembler::FPGpr0), as->toDoubleRegister(rhs, JITAssembler::FPGpr1), targetReg);
break;
case IR::OpMul:
@@ -184,7 +187,7 @@ void Binop::doubleBinop(IR::Expr *lhs, IR::Expr *rhs, IR::Expr *target)
#if CPU(X86)
if (IR::Const *c = rhs->asConst()) { // Y = X * constant -> Y = X; Y *= [constant-address]
as->moveDouble(as->toDoubleRegister(lhs, targetReg), targetReg);
- Address addr = as->loadConstant(c, Assembler::ScratchRegister);
+ Address addr = as->loadConstant(c, JITAssembler::ScratchRegister);
as->mulDouble(addr, targetReg);
break;
}
@@ -196,14 +199,14 @@ void Binop::doubleBinop(IR::Expr *lhs, IR::Expr *rhs, IR::Expr *target)
}
}
#endif
- as->mulDouble(as->toDoubleRegister(lhs, Assembler::FPGpr0), as->toDoubleRegister(rhs, Assembler::FPGpr1), targetReg);
+ as->mulDouble(as->toDoubleRegister(lhs, JITAssembler::FPGpr0), as->toDoubleRegister(rhs, JITAssembler::FPGpr1), targetReg);
break;
case IR::OpSub:
#if CPU(X86)
if (IR::Const *c = rhs->asConst()) { // Y = X - constant -> Y = X; Y -= [constant-address]
as->moveDouble(as->toDoubleRegister(lhs, targetReg), targetReg);
- Address addr = as->loadConstant(c, Assembler::ScratchRegister);
+ Address addr = as->loadConstant(c, JITAssembler::ScratchRegister);
as->subDouble(addr, targetReg);
break;
}
@@ -219,19 +222,19 @@ void Binop::doubleBinop(IR::Expr *lhs, IR::Expr *rhs, IR::Expr *target)
&& targetTemp
&& targetTemp->kind == IR::Temp::PhysicalRegister
&& targetTemp->index == rhs->asTemp()->index) { // Y = X - Y -> Tmp = Y; Y = X - Tmp
- as->moveDouble(as->toDoubleRegister(rhs, Assembler::FPGpr1), Assembler::FPGpr1);
- as->subDouble(as->toDoubleRegister(lhs, Assembler::FPGpr0), Assembler::FPGpr1, targetReg);
+ as->moveDouble(as->toDoubleRegister(rhs, JITAssembler::FPGpr1), JITAssembler::FPGpr1);
+ as->subDouble(as->toDoubleRegister(lhs, JITAssembler::FPGpr0), JITAssembler::FPGpr1, targetReg);
break;
}
- as->subDouble(as->toDoubleRegister(lhs, Assembler::FPGpr0), as->toDoubleRegister(rhs, Assembler::FPGpr1), targetReg);
+ as->subDouble(as->toDoubleRegister(lhs, JITAssembler::FPGpr0), as->toDoubleRegister(rhs, JITAssembler::FPGpr1), targetReg);
break;
case IR::OpDiv:
#if CPU(X86)
if (IR::Const *c = rhs->asConst()) { // Y = X / constant -> Y = X; Y /= [constant-address]
as->moveDouble(as->toDoubleRegister(lhs, targetReg), targetReg);
- Address addr = as->loadConstant(c, Assembler::ScratchRegister);
+ Address addr = as->loadConstant(c, JITAssembler::ScratchRegister);
as->divDouble(addr, targetReg);
break;
}
@@ -248,12 +251,12 @@ void Binop::doubleBinop(IR::Expr *lhs, IR::Expr *rhs, IR::Expr *target)
&& targetTemp
&& targetTemp->kind == IR::Temp::PhysicalRegister
&& targetTemp->index == rhs->asTemp()->index) { // Y = X / Y -> Tmp = Y; Y = X / Tmp
- as->moveDouble(as->toDoubleRegister(rhs, Assembler::FPGpr1), Assembler::FPGpr1);
- as->divDouble(as->toDoubleRegister(lhs, Assembler::FPGpr0), Assembler::FPGpr1, targetReg);
+ as->moveDouble(as->toDoubleRegister(rhs, JITAssembler::FPGpr1), JITAssembler::FPGpr1);
+ as->divDouble(as->toDoubleRegister(lhs, JITAssembler::FPGpr0), JITAssembler::FPGpr1, targetReg);
break;
}
- as->divDouble(as->toDoubleRegister(lhs, Assembler::FPGpr0), as->toDoubleRegister(rhs, Assembler::FPGpr1), targetReg);
+ as->divDouble(as->toDoubleRegister(lhs, JITAssembler::FPGpr0), as->toDoubleRegister(rhs, JITAssembler::FPGpr1), targetReg);
break;
default: {
@@ -271,8 +274,8 @@ void Binop::doubleBinop(IR::Expr *lhs, IR::Expr *rhs, IR::Expr *target)
as->storeDouble(targetReg, target);
}
-
-bool Binop::int32Binop(IR::Expr *leftSource, IR::Expr *rightSource, IR::Expr *target)
+template <typename JITAssembler>
+bool Binop<JITAssembler>::int32Binop(IR::Expr *leftSource, IR::Expr *rightSource, IR::Expr *target)
{
Q_ASSERT(leftSource->type == IR::SInt32Type);
Q_ASSERT(rightSource->type == IR::SInt32Type);
@@ -305,7 +308,7 @@ bool Binop::int32Binop(IR::Expr *leftSource, IR::Expr *rightSource, IR::Expr *ta
bool inplaceOpWithAddress = false;
IR::Temp *targetTemp = target->asTemp();
- RegisterID targetReg = Assembler::ReturnValueRegister;
+ RegisterID targetReg = JITAssembler::ReturnValueRegister;
if (targetTemp && targetTemp->kind == IR::Temp::PhysicalRegister) {
IR::Temp *rhs = rightSource->asTemp();
if (!rhs || rhs->kind != IR::Temp::PhysicalRegister || rhs->index != targetTemp->index) {
@@ -369,12 +372,12 @@ bool Binop::int32Binop(IR::Expr *leftSource, IR::Expr *rightSource, IR::Expr *ta
&& targetTemp->index == rightSource->asTemp()->index) {
// X = Y - X -> Tmp = X; X = Y; X -= Tmp
targetReg = (RegisterID) targetTemp->index;
- as->move(targetReg, Assembler::ScratchRegister);
+ as->move(targetReg, JITAssembler::ScratchRegister);
as->move(as->toInt32Register(leftSource, targetReg), targetReg);
- as->sub32(Assembler::ScratchRegister, targetReg);
+ as->sub32(JITAssembler::ScratchRegister, targetReg);
} else {
as->move(as->toInt32Register(leftSource, targetReg), targetReg);
- as->sub32(as->toInt32Register(rightSource, Assembler::ScratchRegister), targetReg);
+ as->sub32(as->toInt32Register(rightSource, JITAssembler::ScratchRegister), targetReg);
}
as->storeInt32(targetReg, target);
return true;
@@ -419,7 +422,7 @@ bool Binop::int32Binop(IR::Expr *leftSource, IR::Expr *rightSource, IR::Expr *ta
return false;
}
} else if (inplaceOpWithAddress) { // All cases of X = X op [address-of-Y]
- Pointer rhsAddr = as->loadAddress(Assembler::ScratchRegister, rightSource);
+ Pointer rhsAddr = as->loadAddress(JITAssembler::ScratchRegister, rightSource);
switch (op) {
case IR::OpBitAnd: as->and32(rhsAddr, targetReg); break;
case IR::OpBitOr: as->or32 (rhsAddr, targetReg); break;
@@ -433,7 +436,7 @@ bool Binop::int32Binop(IR::Expr *leftSource, IR::Expr *rightSource, IR::Expr *ta
return false;
}
} else { // All cases of Z = X op Y
- RegisterID r = as->toInt32Register(rightSource, Assembler::ScratchRegister);
+ RegisterID r = as->toInt32Register(rightSource, JITAssembler::ScratchRegister);
switch (op) {
case IR::OpBitAnd: as->and32(l, r, targetReg); break;
case IR::OpBitOr: as->or32 (l, r, targetReg); break;
@@ -452,18 +455,18 @@ bool Binop::int32Binop(IR::Expr *leftSource, IR::Expr *rightSource, IR::Expr *ta
// Not all CPUs accept shifts over more than 31 bits, and some CPUs (like ARM) will do
// surprising stuff when shifting over 0 bits.
#define CHECK_RHS(op) { \
- as->and32(TrustedImm32(0x1f), r, Assembler::ScratchRegister); \
- Jump notZero = as->branch32(RelationalCondition::NotEqual, Assembler::ScratchRegister, TrustedImm32(0)); \
+ as->and32(TrustedImm32(0x1f), r, JITAssembler::ScratchRegister); \
+ Jump notZero = as->branch32(RelationalCondition::NotEqual, JITAssembler::ScratchRegister, TrustedImm32(0)); \
as->move(l, targetReg); \
Jump done = as->jump(); \
notZero.link(as); \
op; \
done.link(as); \
}
- case IR::OpLShift: CHECK_RHS(as->lshift32(l, Assembler::ScratchRegister, targetReg)); break;
- case IR::OpRShift: CHECK_RHS(as->rshift32(l, Assembler::ScratchRegister, targetReg)); break;
+ case IR::OpLShift: CHECK_RHS(as->lshift32(l, JITAssembler::ScratchRegister, targetReg)); break;
+ case IR::OpRShift: CHECK_RHS(as->rshift32(l, JITAssembler::ScratchRegister, targetReg)); break;
case IR::OpURShift:
- CHECK_RHS(as->urshift32(l, Assembler::ScratchRegister, targetReg));
+ CHECK_RHS(as->urshift32(l, JITAssembler::ScratchRegister, targetReg));
as->storeUInt32(targetReg, target);
// IMPORTANT: do NOT do a break here! The stored type of an urshift is different from the other binary operations!
return true;
@@ -481,17 +484,19 @@ bool Binop::int32Binop(IR::Expr *leftSource, IR::Expr *rightSource, IR::Expr *ta
return true;
}
-static inline Assembler::FPRegisterID getFreeFPReg(IR::Expr *shouldNotOverlap, unsigned hint)
+template <typename JITAssembler>
+inline typename JITAssembler::FPRegisterID getFreeFPReg(IR::Expr *shouldNotOverlap, unsigned hint)
{
if (IR::Temp *t = shouldNotOverlap->asTemp())
if (t->type == IR::DoubleType)
if (t->kind == IR::Temp::PhysicalRegister)
if (t->index == hint)
- return Assembler::FPRegisterID(hint + 1);
- return Assembler::FPRegisterID(hint);
+ return typename JITAssembler::FPRegisterID(hint + 1);
+ return typename JITAssembler::FPRegisterID(hint);
}
-Assembler::Jump Binop::genInlineBinop(IR::Expr *leftSource, IR::Expr *rightSource, IR::Expr *target)
+template <typename JITAssembler>
+typename JITAssembler::Jump Binop<JITAssembler>::genInlineBinop(IR::Expr *leftSource, IR::Expr *rightSource, IR::Expr *target)
{
Jump done;
@@ -505,8 +510,8 @@ Assembler::Jump Binop::genInlineBinop(IR::Expr *leftSource, IR::Expr *rightSourc
// register.
switch (op) {
case IR::OpAdd: {
- FPRegisterID lReg = getFreeFPReg(rightSource, 2);
- FPRegisterID rReg = getFreeFPReg(leftSource, 4);
+ FPRegisterID lReg = getFreeFPReg<JITAssembler>(rightSource, 2);
+ FPRegisterID rReg = getFreeFPReg<JITAssembler>(leftSource, 4);
Jump leftIsNoDbl = as->genTryDoubleConversion(leftSource, lReg);
Jump rightIsNoDbl = as->genTryDoubleConversion(rightSource, rReg);
@@ -520,8 +525,8 @@ Assembler::Jump Binop::genInlineBinop(IR::Expr *leftSource, IR::Expr *rightSourc
rightIsNoDbl.link(as);
} break;
case IR::OpMul: {
- FPRegisterID lReg = getFreeFPReg(rightSource, 2);
- FPRegisterID rReg = getFreeFPReg(leftSource, 4);
+ FPRegisterID lReg = getFreeFPReg<JITAssembler>(rightSource, 2);
+ FPRegisterID rReg = getFreeFPReg<JITAssembler>(leftSource, 4);
Jump leftIsNoDbl = as->genTryDoubleConversion(leftSource, lReg);
Jump rightIsNoDbl = as->genTryDoubleConversion(rightSource, rReg);
@@ -535,8 +540,8 @@ Assembler::Jump Binop::genInlineBinop(IR::Expr *leftSource, IR::Expr *rightSourc
rightIsNoDbl.link(as);
} break;
case IR::OpSub: {
- FPRegisterID lReg = getFreeFPReg(rightSource, 2);
- FPRegisterID rReg = getFreeFPReg(leftSource, 4);
+ FPRegisterID lReg = getFreeFPReg<JITAssembler>(rightSource, 2);
+ FPRegisterID rReg = getFreeFPReg<JITAssembler>(leftSource, 4);
Jump leftIsNoDbl = as->genTryDoubleConversion(leftSource, lReg);
Jump rightIsNoDbl = as->genTryDoubleConversion(rightSource, rReg);
@@ -550,8 +555,8 @@ Assembler::Jump Binop::genInlineBinop(IR::Expr *leftSource, IR::Expr *rightSourc
rightIsNoDbl.link(as);
} break;
case IR::OpDiv: {
- FPRegisterID lReg = getFreeFPReg(rightSource, 2);
- FPRegisterID rReg = getFreeFPReg(leftSource, 4);
+ FPRegisterID lReg = getFreeFPReg<JITAssembler>(rightSource, 2);
+ FPRegisterID rReg = getFreeFPReg<JITAssembler>(leftSource, 4);
Jump leftIsNoDbl = as->genTryDoubleConversion(leftSource, lReg);
Jump rightIsNoDbl = as->genTryDoubleConversion(rightSource, rReg);
@@ -571,4 +576,9 @@ Assembler::Jump Binop::genInlineBinop(IR::Expr *leftSource, IR::Expr *rightSourc
return done;
}
+template struct QV4::JIT::Binop<QV4::JIT::Assembler<DefaultAssemblerTargetConfiguration>>;
+#if defined(V4_BOOTSTRAP) && CPU(X86_64)
+template struct QV4::JIT::Binop<QV4::JIT::Assembler<AssemblerTargetConfiguration<JSC::MacroAssemblerARMv7, NoOperatingSystemSpecialization>>>;
+#endif
+
#endif
diff --git a/src/qml/jit/qv4binop_p.h b/src/qml/jit/qv4binop_p.h
index 3742e99e5a..d2d9ba7753 100644
--- a/src/qml/jit/qv4binop_p.h
+++ b/src/qml/jit/qv4binop_p.h
@@ -61,21 +61,22 @@ QT_BEGIN_NAMESPACE
namespace QV4 {
namespace JIT {
+template <typename JITAssembler>
struct Binop {
- Binop(Assembler *assembler, IR::AluOp operation)
+ Binop(JITAssembler *assembler, IR::AluOp operation)
: as(assembler)
, op(operation)
{}
- using Jump = Assembler::Jump;
- using Address = Assembler::Address;
- using RegisterID = Assembler::RegisterID;
- using FPRegisterID = Assembler::FPRegisterID;
- using TrustedImm32 = Assembler::TrustedImm32;
- using ResultCondition = Assembler::ResultCondition;
- using RelationalCondition = Assembler::RelationalCondition;
- using Pointer = Assembler::Pointer;
- using PointerToValue = Assembler::PointerToValue;
+ using Jump = typename JITAssembler::Jump;
+ using Address = typename JITAssembler::Address;
+ using RegisterID = typename JITAssembler::RegisterID;
+ using FPRegisterID = typename JITAssembler::FPRegisterID;
+ using TrustedImm32 = typename JITAssembler::TrustedImm32;
+ using ResultCondition = typename JITAssembler::ResultCondition;
+ using RelationalCondition = typename JITAssembler::RelationalCondition;
+ using Pointer = typename JITAssembler::Pointer;
+ using PointerToValue = typename JITAssembler::PointerToValue;
void generate(IR::Expr *lhs, IR::Expr *rhs, IR::Expr *target);
void doubleBinop(IR::Expr *lhs, IR::Expr *rhs, IR::Expr *target);
@@ -103,8 +104,8 @@ struct Binop {
#if HAVE(ALU_OPS_WITH_MEM_OPERAND)
return as->branchAdd32(ResultCondition::Overflow, addr, reg);
#else
- as->load32(addr, Assembler::ScratchRegister);
- return as->branchAdd32(ResultCondition::Overflow, Assembler::ScratchRegister, reg);
+ as->load32(addr, JITAssembler::ScratchRegister);
+ return as->branchAdd32(ResultCondition::Overflow, JITAssembler::ScratchRegister, reg);
#endif
}
@@ -118,8 +119,8 @@ struct Binop {
#if HAVE(ALU_OPS_WITH_MEM_OPERAND)
return as->branchSub32(ResultCondition::Overflow, addr, reg);
#else
- as->load32(addr, Assembler::ScratchRegister);
- return as->branchSub32(ResultCondition::Overflow, Assembler::ScratchRegister, reg);
+ as->load32(addr, JITAssembler::ScratchRegister);
+ return as->branchSub32(ResultCondition::Overflow, JITAssembler::ScratchRegister, reg);
#endif
}
@@ -131,10 +132,10 @@ struct Binop {
Jump inline_mul32(Address addr, RegisterID reg)
{
#if HAVE(ALU_OPS_WITH_MEM_OPERAND)
- return as->branchMul32(Assembler::Overflow, addr, reg);
+ return as->branchMul32(JITAssembler::Overflow, addr, reg);
#else
- as->load32(addr, Assembler::ScratchRegister);
- return as->branchMul32(ResultCondition::Overflow, Assembler::ScratchRegister, reg);
+ as->load32(addr, JITAssembler::ScratchRegister);
+ return as->branchMul32(ResultCondition::Overflow, JITAssembler::ScratchRegister, reg);
#endif
}
@@ -145,9 +146,9 @@ struct Binop {
Jump inline_shl32(Address addr, RegisterID reg)
{
- as->load32(addr, Assembler::ScratchRegister);
- as->and32(TrustedImm32(0x1f), Assembler::ScratchRegister);
- as->lshift32(Assembler::ScratchRegister, reg);
+ as->load32(addr, JITAssembler::ScratchRegister);
+ as->and32(TrustedImm32(0x1f), JITAssembler::ScratchRegister);
+ as->lshift32(JITAssembler::ScratchRegister, reg);
return Jump();
}
@@ -160,9 +161,9 @@ struct Binop {
Jump inline_shr32(Address addr, RegisterID reg)
{
- as->load32(addr, Assembler::ScratchRegister);
- as->and32(TrustedImm32(0x1f), Assembler::ScratchRegister);
- as->rshift32(Assembler::ScratchRegister, reg);
+ as->load32(addr, JITAssembler::ScratchRegister);
+ as->and32(TrustedImm32(0x1f), JITAssembler::ScratchRegister);
+ as->rshift32(JITAssembler::ScratchRegister, reg);
return Jump();
}
@@ -175,9 +176,9 @@ struct Binop {
Jump inline_ushr32(Address addr, RegisterID reg)
{
- as->load32(addr, Assembler::ScratchRegister);
- as->and32(TrustedImm32(0x1f), Assembler::ScratchRegister);
- as->urshift32(Assembler::ScratchRegister, reg);
+ as->load32(addr, JITAssembler::ScratchRegister);
+ as->and32(TrustedImm32(0x1f), JITAssembler::ScratchRegister);
+ as->urshift32(JITAssembler::ScratchRegister, reg);
return as->branchTest32(ResultCondition::Signed, reg, reg);
}
@@ -193,8 +194,8 @@ struct Binop {
#if HAVE(ALU_OPS_WITH_MEM_OPERAND)
as->and32(addr, reg);
#else
- as->load32(addr, Assembler::ScratchRegister);
- as->and32(Assembler::ScratchRegister, reg);
+ as->load32(addr, JITAssembler::ScratchRegister);
+ as->and32(JITAssembler::ScratchRegister, reg);
#endif
return Jump();
}
@@ -210,8 +211,8 @@ struct Binop {
#if HAVE(ALU_OPS_WITH_MEM_OPERAND)
as->or32(addr, reg);
#else
- as->load32(addr, Assembler::ScratchRegister);
- as->or32(Assembler::ScratchRegister, reg);
+ as->load32(addr, JITAssembler::ScratchRegister);
+ as->or32(JITAssembler::ScratchRegister, reg);
#endif
return Jump();
}
@@ -227,8 +228,8 @@ struct Binop {
#if HAVE(ALU_OPS_WITH_MEM_OPERAND)
as->xor32(addr, reg);
#else
- as->load32(addr, Assembler::ScratchRegister);
- as->xor32(Assembler::ScratchRegister, reg);
+ as->load32(addr, JITAssembler::ScratchRegister);
+ as->xor32(JITAssembler::ScratchRegister, reg);
#endif
return Jump();
}
@@ -241,7 +242,7 @@ struct Binop {
- Assembler *as;
+ JITAssembler *as;
IR::AluOp op;
};
diff --git a/src/qml/jit/qv4isel_masm.cpp b/src/qml/jit/qv4isel_masm.cpp
index 279ccabf81..b1134d2bec 100644
--- a/src/qml/jit/qv4isel_masm.cpp
+++ b/src/qml/jit/qv4isel_masm.cpp
@@ -39,11 +39,7 @@
#include "qv4isel_masm_p.h"
#include "qv4runtime_p.h"
-#include "qv4object_p.h"
-#include "qv4functionobject_p.h"
-#include "qv4regexpobject_p.h"
#include "qv4lookup_p.h"
-#include "qv4function_p.h"
#include "qv4ssa_p.h"
#include "qv4regalloc_p.h"
#include "qv4assembler_p.h"
@@ -68,7 +64,8 @@ using namespace QV4;
using namespace QV4::JIT;
-InstructionSelection::InstructionSelection(QQmlEnginePrivate *qmlEngine, QV4::ExecutableAllocator *execAllocator, IR::Module *module, Compiler::JSUnitGenerator *jsGenerator, EvalISelFactory *iselFactory)
+template <typename JITAssembler>
+InstructionSelection<JITAssembler>::InstructionSelection(QQmlEnginePrivate *qmlEngine, QV4::ExecutableAllocator *execAllocator, IR::Module *module, Compiler::JSUnitGenerator *jsGenerator, EvalISelFactory *iselFactory)
: EvalInstructionSelection(execAllocator, module, jsGenerator, iselFactory)
, _block(0)
, _as(0)
@@ -79,12 +76,14 @@ InstructionSelection::InstructionSelection(QQmlEnginePrivate *qmlEngine, QV4::Ex
module->unitFlags |= QV4::CompiledData::Unit::ContainsMachineCode;
}
-InstructionSelection::~InstructionSelection()
+template <typename JITAssembler>
+InstructionSelection<JITAssembler>::~InstructionSelection()
{
delete _as;
}
-void InstructionSelection::run(int functionIndex)
+template <typename JITAssembler>
+void InstructionSelection<JITAssembler>::run(int functionIndex)
{
IR::Function *function = irModule->functions[functionIndex];
qSwap(_function, function);
@@ -93,8 +92,8 @@ void InstructionSelection::run(int functionIndex)
opt.run(qmlEngine);
static const bool withRegisterAllocator = qEnvironmentVariableIsEmpty("QV4_NO_REGALLOC");
- if (Assembler::RegAllocIsSupported && opt.isInSSA() && withRegisterAllocator) {
- RegisterAllocator regalloc(Assembler::getRegisterInfo());
+ if (JITTargetPlatform::RegAllocIsSupported && opt.isInSSA() && withRegisterAllocator) {
+ RegisterAllocator regalloc(JITTargetPlatform::getRegisterInfo());
regalloc.run(_function, opt);
calculateRegistersToSave(regalloc.usedRegisters());
} else {
@@ -103,47 +102,47 @@ void InstructionSelection::run(int functionIndex)
opt.convertOutOfSSA();
ConvertTemps().toStackSlots(_function);
IR::Optimizer::showMeTheCode(_function, "After stack slot allocation");
- calculateRegistersToSave(Assembler::getRegisterInfo()); // FIXME: this saves all registers. We can probably do with a subset: those that are not used by the register allocator.
+ calculateRegistersToSave(JITTargetPlatform::getRegisterInfo()); // FIXME: this saves all registers. We can probably do with a subset: those that are not used by the register allocator.
}
BitVector removableJumps = opt.calculateOptionalJumps();
qSwap(_removableJumps, removableJumps);
- Assembler* oldAssembler = _as;
- _as = new Assembler(jsGenerator, _function, executableAllocator);
+ JITAssembler* oldAssembler = _as;
+ _as = new JITAssembler(jsGenerator, _function, executableAllocator);
_as->setStackLayout(6, // 6 == max argc for calls to built-ins with an argument array
regularRegistersToSave.size(),
fpRegistersToSave.size());
_as->enterStandardStackFrame(regularRegistersToSave, fpRegistersToSave);
#ifdef ARGUMENTS_IN_REGISTERS
- _as->move(_as->registerForArgument(0), Assembler::EngineRegister);
+ _as->move(_as->registerForArgument(0), JITTargetPlatform::EngineRegister);
#else
- _as->loadPtr(addressForArgument(0), Assembler::EngineRegister);
+ _as->loadPtr(addressForArgument(0), JITTargetPlatform::EngineRegister);
#endif
const int locals = _as->stackLayout().calculateJSStackFrameSize();
if (locals > 0) {
- _as->loadPtr(Address(Assembler::EngineRegister, qOffsetOf(ExecutionEngine, jsStackTop)), Assembler::LocalsRegister);
+ _as->loadPtr(Address(JITTargetPlatform::EngineRegister, qOffsetOf(ExecutionEngine, jsStackTop)), JITTargetPlatform::LocalsRegister);
#ifdef VALUE_FITS_IN_REGISTER
- _as->move(Assembler::TrustedImm64(0), Assembler::ReturnValueRegister);
- _as->move(Assembler::TrustedImm32(locals), Assembler::ScratchRegister);
- Assembler::Label loop = _as->label();
- _as->store64(Assembler::ReturnValueRegister, Assembler::Address(Assembler::LocalsRegister));
- _as->add64(Assembler::TrustedImm32(8), Assembler::LocalsRegister);
- Assembler::Jump jump = _as->branchSub32(Assembler::NonZero, Assembler::TrustedImm32(1), Assembler::ScratchRegister);
+ _as->move(TrustedImm64(0), JITTargetPlatform::ReturnValueRegister);
+ _as->move(TrustedImm32(locals), JITTargetPlatform::ScratchRegister);
+ Label loop = _as->label();
+ _as->store64(JITTargetPlatform::ReturnValueRegister, Address(JITTargetPlatform::LocalsRegister));
+ _as->add64(TrustedImm32(8), JITTargetPlatform::LocalsRegister);
+ Jump jump = _as->branchSub32(ResultCondition::NonZero, TrustedImm32(1), JITTargetPlatform::ScratchRegister);
jump.linkTo(loop, _as);
#else
- _as->move(Assembler::TrustedImm32(0), Assembler::ReturnValueRegister);
- _as->move(Assembler::TrustedImm32(locals), Assembler::ScratchRegister);
- Assembler::Label loop = _as->label();
- _as->store32(Assembler::ReturnValueRegister, Assembler::Address(Assembler::LocalsRegister));
- _as->add32(Assembler::TrustedImm32(4), Assembler::LocalsRegister);
- _as->store32(Assembler::ReturnValueRegister, Assembler::Address(Assembler::LocalsRegister));
- _as->add32(Assembler::TrustedImm32(4), Assembler::LocalsRegister);
- Assembler::Jump jump = _as->branchSub32(Assembler::NonZero, Assembler::TrustedImm32(1), Assembler::ScratchRegister);
+ _as->move(TrustedImm32(0), JITTargetPlatform::ReturnValueRegister);
+ _as->move(TrustedImm32(locals), JITTargetPlatform::ScratchRegister);
+ Label loop = _as->label();
+ _as->store32(JITTargetPlatform::ReturnValueRegister, Address(JITTargetPlatform::LocalsRegister));
+ _as->add32(TrustedImm32(4), JITTargetPlatform::LocalsRegister);
+ _as->store32(JITTargetPlatform::ReturnValueRegister, Address(JITTargetPlatform::LocalsRegister));
+ _as->add32(TrustedImm32(4), JITTargetPlatform::LocalsRegister);
+ Jump jump = _as->branchSub32(ResultCondition::NonZero, TrustedImm32(1), JITTargetPlatform::ScratchRegister);
jump.linkTo(loop, _as);
#endif
- _as->storePtr(Assembler::LocalsRegister, Address(Assembler::EngineRegister, qOffsetOf(ExecutionEngine, jsStackTop)));
+ _as->storePtr(JITTargetPlatform::LocalsRegister, Address(JITTargetPlatform::EngineRegister, qOffsetOf(ExecutionEngine, jsStackTop)));
}
@@ -158,9 +157,9 @@ void InstructionSelection::run(int functionIndex)
for (IR::Stmt *s : _block->statements()) {
if (s->location.isValid()) {
if (int(s->location.startLine) != lastLine) {
- _as->loadPtr(Address(Assembler::EngineRegister, qOffsetOf(QV4::ExecutionEngine, current)), Assembler::ScratchRegister);
- Assembler::Address lineAddr(Assembler::ScratchRegister, qOffsetOf(QV4::ExecutionContext::Data, lineNumber));
- _as->store32(Assembler::TrustedImm32(s->location.startLine), lineAddr);
+ _as->loadPtr(Address(JITTargetPlatform::EngineRegister, qOffsetOf(QV4::ExecutionEngine, current)), JITTargetPlatform::ScratchRegister);
+ Address lineAddr(JITTargetPlatform::ScratchRegister, qOffsetOf(QV4::ExecutionContext::Data, lineNumber));
+ _as->store32(TrustedImm32(s->location.startLine), lineAddr);
lastLine = s->location.startLine;
}
}
@@ -181,165 +180,187 @@ void InstructionSelection::run(int functionIndex)
qSwap(_removableJumps, removableJumps);
}
-QQmlRefPointer<QV4::CompiledData::CompilationUnit> InstructionSelection::backendCompileStep()
+template <typename JITAssembler>
+QQmlRefPointer<QV4::CompiledData::CompilationUnit> InstructionSelection<JITAssembler>::backendCompileStep()
{
QQmlRefPointer<QV4::CompiledData::CompilationUnit> result;
result.adopt(compilationUnit.take());
return result;
}
-void InstructionSelection::callBuiltinInvalid(IR::Name *func, IR::ExprList *args, IR::Expr *result)
+template <typename JITAssembler>
+void InstructionSelection<JITAssembler>::callBuiltinInvalid(IR::Name *func, IR::ExprList *args, IR::Expr *result)
{
prepareCallData(args, 0);
if (useFastLookups && func->global) {
uint index = registerGlobalGetterLookup(*func->id);
- generateRuntimeCall(result, callGlobalLookup,
- Assembler::EngineRegister,
- Assembler::TrustedImm32(index),
+ generateRuntimeCall(_as, result, callGlobalLookup,
+ JITTargetPlatform::EngineRegister,
+ TrustedImm32(index),
baseAddressForCallData());
} else {
- generateRuntimeCall(result, callActivationProperty,
- Assembler::EngineRegister,
- Assembler::StringToIndex(*func->id),
+ generateRuntimeCall(_as, result, callActivationProperty,
+ JITTargetPlatform::EngineRegister,
+ StringToIndex(*func->id),
baseAddressForCallData());
}
}
-void InstructionSelection::callBuiltinTypeofQmlContextProperty(IR::Expr *base,
+template <typename JITAssembler>
+void InstructionSelection<JITAssembler>::callBuiltinTypeofQmlContextProperty(IR::Expr *base,
IR::Member::MemberKind kind,
int propertyIndex, IR::Expr *result)
{
if (kind == IR::Member::MemberOfQmlScopeObject) {
- generateRuntimeCall(result, typeofScopeObjectProperty, Assembler::EngineRegister,
- Assembler::PointerToValue(base),
- Assembler::TrustedImm32(propertyIndex));
+ generateRuntimeCall(_as, result, typeofScopeObjectProperty, JITTargetPlatform::EngineRegister,
+ PointerToValue(base),
+ TrustedImm32(propertyIndex));
} else if (kind == IR::Member::MemberOfQmlContextObject) {
- generateRuntimeCall(result, typeofContextObjectProperty,
- Assembler::EngineRegister, Assembler::PointerToValue(base),
- Assembler::TrustedImm32(propertyIndex));
+ generateRuntimeCall(_as, result, typeofContextObjectProperty,
+ JITTargetPlatform::EngineRegister, PointerToValue(base),
+ TrustedImm32(propertyIndex));
} else {
Q_UNREACHABLE();
}
}
-void InstructionSelection::callBuiltinTypeofMember(IR::Expr *base, const QString &name,
+template <typename JITAssembler>
+void InstructionSelection<JITAssembler>::callBuiltinTypeofMember(IR::Expr *base, const QString &name,
IR::Expr *result)
{
- generateRuntimeCall(result, typeofMember, Assembler::EngineRegister,
- Assembler::PointerToValue(base), Assembler::StringToIndex(name));
+ generateRuntimeCall(_as, result, typeofMember, JITTargetPlatform::EngineRegister,
+ PointerToValue(base), StringToIndex(name));
}
-void InstructionSelection::callBuiltinTypeofSubscript(IR::Expr *base, IR::Expr *index,
+template <typename JITAssembler>
+void InstructionSelection<JITAssembler>::callBuiltinTypeofSubscript(IR::Expr *base, IR::Expr *index,
IR::Expr *result)
{
- generateRuntimeCall(result, typeofElement,
- Assembler::EngineRegister,
- Assembler::PointerToValue(base), Assembler::PointerToValue(index));
+ generateRuntimeCall(_as, result, typeofElement,
+ JITTargetPlatform::EngineRegister,
+ PointerToValue(base), PointerToValue(index));
}
-void InstructionSelection::callBuiltinTypeofName(const QString &name, IR::Expr *result)
+template <typename JITAssembler>
+void InstructionSelection<JITAssembler>::callBuiltinTypeofName(const QString &name, IR::Expr *result)
{
- generateRuntimeCall(result, typeofName, Assembler::EngineRegister,
- Assembler::StringToIndex(name));
+ generateRuntimeCall(_as, result, typeofName, JITTargetPlatform::EngineRegister,
+ StringToIndex(name));
}
-void InstructionSelection::callBuiltinTypeofValue(IR::Expr *value, IR::Expr *result)
+template <typename JITAssembler>
+void InstructionSelection<JITAssembler>::callBuiltinTypeofValue(IR::Expr *value, IR::Expr *result)
{
- generateRuntimeCall(result, typeofValue, Assembler::EngineRegister,
- Assembler::PointerToValue(value));
+ generateRuntimeCall(_as, result, typeofValue, JITTargetPlatform::EngineRegister,
+ PointerToValue(value));
}
-void InstructionSelection::callBuiltinDeleteMember(IR::Expr *base, const QString &name, IR::Expr *result)
+template <typename JITAssembler>
+void InstructionSelection<JITAssembler>::callBuiltinDeleteMember(IR::Expr *base, const QString &name, IR::Expr *result)
{
- generateRuntimeCall(result, deleteMember, Assembler::EngineRegister,
- Assembler::Reference(base), Assembler::StringToIndex(name));
+ generateRuntimeCall(_as, result, deleteMember, JITTargetPlatform::EngineRegister,
+ Reference(base), StringToIndex(name));
}
-void InstructionSelection::callBuiltinDeleteSubscript(IR::Expr *base, IR::Expr *index,
+template <typename JITAssembler>
+void InstructionSelection<JITAssembler>::callBuiltinDeleteSubscript(IR::Expr *base, IR::Expr *index,
IR::Expr *result)
{
- generateRuntimeCall(result, deleteElement, Assembler::EngineRegister,
- Assembler::Reference(base), Assembler::PointerToValue(index));
+ generateRuntimeCall(_as, result, deleteElement, JITTargetPlatform::EngineRegister,
+ Reference(base), PointerToValue(index));
}
-void InstructionSelection::callBuiltinDeleteName(const QString &name, IR::Expr *result)
+template <typename JITAssembler>
+void InstructionSelection<JITAssembler>::callBuiltinDeleteName(const QString &name, IR::Expr *result)
{
- generateRuntimeCall(result, deleteName, Assembler::EngineRegister,
- Assembler::StringToIndex(name));
+ generateRuntimeCall(_as, result, deleteName, JITTargetPlatform::EngineRegister,
+ StringToIndex(name));
}
-void InstructionSelection::callBuiltinDeleteValue(IR::Expr *result)
+template <typename JITAssembler>
+void InstructionSelection<JITAssembler>::callBuiltinDeleteValue(IR::Expr *result)
{
_as->storeValue(Primitive::fromBoolean(false), result);
}
-void InstructionSelection::callBuiltinThrow(IR::Expr *arg)
+template <typename JITAssembler>
+void InstructionSelection<JITAssembler>::callBuiltinThrow(IR::Expr *arg)
{
- generateRuntimeCall(Assembler::ReturnValueRegister, throwException, Assembler::EngineRegister,
- Assembler::PointerToValue(arg));
+ generateRuntimeCall(_as, JITTargetPlatform::ReturnValueRegister, throwException, JITTargetPlatform::EngineRegister,
+ PointerToValue(arg));
}
-void InstructionSelection::callBuiltinReThrow()
+template <typename JITAssembler>
+void InstructionSelection<JITAssembler>::callBuiltinReThrow()
{
_as->jumpToExceptionHandler();
}
-void InstructionSelection::callBuiltinUnwindException(IR::Expr *result)
+template <typename JITAssembler>
+void InstructionSelection<JITAssembler>::callBuiltinUnwindException(IR::Expr *result)
{
- generateRuntimeCall(result, unwindException, Assembler::EngineRegister);
+ generateRuntimeCall(_as, result, unwindException, JITTargetPlatform::EngineRegister);
}
-void InstructionSelection::callBuiltinPushCatchScope(const QString &exceptionName)
+template <typename JITAssembler>
+void InstructionSelection<JITAssembler>::callBuiltinPushCatchScope(const QString &exceptionName)
{
- generateRuntimeCall(Assembler::Void, pushCatchScope, Assembler::EngineRegister, Assembler::StringToIndex(exceptionName));
+ generateRuntimeCall(_as, JITAssembler::Void, pushCatchScope, JITTargetPlatform::EngineRegister, StringToIndex(exceptionName));
}
-void InstructionSelection::callBuiltinForeachIteratorObject(IR::Expr *arg, IR::Expr *result)
+template <typename JITAssembler>
+void InstructionSelection<JITAssembler>::callBuiltinForeachIteratorObject(IR::Expr *arg, IR::Expr *result)
{
Q_ASSERT(arg);
Q_ASSERT(result);
- generateRuntimeCall(result, foreachIterator, Assembler::EngineRegister, Assembler::PointerToValue(arg));
+ generateRuntimeCall(_as, result, foreachIterator, JITTargetPlatform::EngineRegister, PointerToValue(arg));
}
-void InstructionSelection::callBuiltinForeachNextPropertyname(IR::Expr *arg, IR::Expr *result)
+template <typename JITAssembler>
+void InstructionSelection<JITAssembler>::callBuiltinForeachNextPropertyname(IR::Expr *arg, IR::Expr *result)
{
Q_ASSERT(arg);
Q_ASSERT(result);
- generateRuntimeCall(result, foreachNextPropertyName, Assembler::Reference(arg));
+ generateRuntimeCall(_as, result, foreachNextPropertyName, Reference(arg));
}
-void InstructionSelection::callBuiltinPushWithScope(IR::Expr *arg)
+template <typename JITAssembler>
+void InstructionSelection<JITAssembler>::callBuiltinPushWithScope(IR::Expr *arg)
{
Q_ASSERT(arg);
- generateRuntimeCall(Assembler::Void, pushWithScope, Assembler::Reference(arg), Assembler::EngineRegister);
+ generateRuntimeCall(_as, JITAssembler::Void, pushWithScope, Reference(arg), JITTargetPlatform::EngineRegister);
}
-void InstructionSelection::callBuiltinPopScope()
+template <typename JITAssembler>
+void InstructionSelection<JITAssembler>::callBuiltinPopScope()
{
- generateRuntimeCall(Assembler::Void, popScope, Assembler::EngineRegister);
+ generateRuntimeCall(_as, JITAssembler::Void, popScope, JITTargetPlatform::EngineRegister);
}
-void InstructionSelection::callBuiltinDeclareVar(bool deletable, const QString &name)
+template <typename JITAssembler>
+void InstructionSelection<JITAssembler>::callBuiltinDeclareVar(bool deletable, const QString &name)
{
- generateRuntimeCall(Assembler::Void, declareVar, Assembler::EngineRegister,
- Assembler::TrustedImm32(deletable), Assembler::StringToIndex(name));
+ generateRuntimeCall(_as, JITAssembler::Void, declareVar, JITTargetPlatform::EngineRegister,
+ TrustedImm32(deletable), StringToIndex(name));
}
-void InstructionSelection::callBuiltinDefineArray(IR::Expr *result, IR::ExprList *args)
+template <typename JITAssembler>
+void InstructionSelection<JITAssembler>::callBuiltinDefineArray(IR::Expr *result, IR::ExprList *args)
{
Q_ASSERT(result);
int length = prepareVariableArguments(args);
- generateRuntimeCall(result, arrayLiteral, Assembler::EngineRegister,
- baseAddressForCallArguments(), Assembler::TrustedImm32(length));
+ generateRuntimeCall(_as, result, arrayLiteral, JITTargetPlatform::EngineRegister,
+ baseAddressForCallArguments(), TrustedImm32(length));
}
-void InstructionSelection::callBuiltinDefineObjectLiteral(IR::Expr *result, int keyValuePairCount, IR::ExprList *keyValuePairs, IR::ExprList *arrayEntries, bool needSparseArray)
+template <typename JITAssembler>
+void InstructionSelection<JITAssembler>::callBuiltinDefineObjectLiteral(IR::Expr *result, int keyValuePairCount, IR::ExprList *keyValuePairs, IR::ExprList *arrayEntries, bool needSparseArray)
{
Q_ASSERT(result);
@@ -415,81 +436,89 @@ void InstructionSelection::callBuiltinDefineObjectLiteral(IR::Expr *result, int
it = it->next;
}
- generateRuntimeCall(result, objectLiteral, Assembler::EngineRegister,
- baseAddressForCallArguments(), Assembler::TrustedImm32(classId),
- Assembler::TrustedImm32(arrayValueCount), Assembler::TrustedImm32(arrayGetterSetterCount | (needSparseArray << 30)));
+ generateRuntimeCall(_as, result, objectLiteral, JITTargetPlatform::EngineRegister,
+ baseAddressForCallArguments(), TrustedImm32(classId),
+ TrustedImm32(arrayValueCount), TrustedImm32(arrayGetterSetterCount | (needSparseArray << 30)));
}
-void InstructionSelection::callBuiltinSetupArgumentObject(IR::Expr *result)
+template <typename JITAssembler>
+void InstructionSelection<JITAssembler>::callBuiltinSetupArgumentObject(IR::Expr *result)
{
- generateRuntimeCall(result, setupArgumentsObject, Assembler::EngineRegister);
+ generateRuntimeCall(_as, result, setupArgumentsObject, JITTargetPlatform::EngineRegister);
}
-void InstructionSelection::callBuiltinConvertThisToObject()
+template <typename JITAssembler>
+void InstructionSelection<JITAssembler>::callBuiltinConvertThisToObject()
{
- generateRuntimeCall(Assembler::Void, convertThisToObject, Assembler::EngineRegister);
+ generateRuntimeCall(_as, JITAssembler::Void, convertThisToObject, JITTargetPlatform::EngineRegister);
}
-void InstructionSelection::callValue(IR::Expr *value, IR::ExprList *args, IR::Expr *result)
+template <typename JITAssembler>
+void InstructionSelection<JITAssembler>::callValue(IR::Expr *value, IR::ExprList *args, IR::Expr *result)
{
Q_ASSERT(value);
prepareCallData(args, 0);
if (value->asConst())
- generateRuntimeCall(result, callValue, Assembler::EngineRegister,
- Assembler::PointerToValue(value),
+ generateRuntimeCall(_as, result, callValue, JITTargetPlatform::EngineRegister,
+ PointerToValue(value),
baseAddressForCallData());
else
- generateRuntimeCall(result, callValue, Assembler::EngineRegister,
- Assembler::Reference(value),
+ generateRuntimeCall(_as, result, callValue, JITTargetPlatform::EngineRegister,
+ Reference(value),
baseAddressForCallData());
}
-void InstructionSelection::loadThisObject(IR::Expr *temp)
+template <typename JITAssembler>
+void InstructionSelection<JITAssembler>::loadThisObject(IR::Expr *temp)
{
- _as->loadPtr(Address(Assembler::EngineRegister, qOffsetOf(QV4::ExecutionEngine, current)), Assembler::ScratchRegister);
- _as->loadPtr(Address(Assembler::ScratchRegister, qOffsetOf(ExecutionContext::Data, callData)), Assembler::ScratchRegister);
+ _as->loadPtr(Address(JITTargetPlatform::EngineRegister, qOffsetOf(QV4::ExecutionEngine, current)), JITTargetPlatform::ScratchRegister);
+ _as->loadPtr(Address(JITTargetPlatform::ScratchRegister, qOffsetOf(ExecutionContext::Data, callData)), JITTargetPlatform::ScratchRegister);
#if defined(VALUE_FITS_IN_REGISTER)
- _as->load64(Pointer(Assembler::ScratchRegister, qOffsetOf(CallData, thisObject)),
- Assembler::ReturnValueRegister);
+ _as->load64(Pointer(JITTargetPlatform::ScratchRegister, qOffsetOf(CallData, thisObject)),
+ JITTargetPlatform::ReturnValueRegister);
_as->storeReturnValue(temp);
#else
- _as->copyValue(temp, Pointer(Assembler::ScratchRegister, qOffsetOf(CallData, thisObject)));
+ _as->copyValue(temp, Pointer(JITTargetPlatform::ScratchRegister, qOffsetOf(CallData, thisObject)));
#endif
}
-void InstructionSelection::loadQmlContext(IR::Expr *temp)
+template <typename JITAssembler>
+void InstructionSelection<JITAssembler>::loadQmlContext(IR::Expr *temp)
{
- generateRuntimeCall(temp, getQmlContext, Assembler::EngineRegister);
+ generateRuntimeCall(_as, temp, getQmlContext, JITTargetPlatform::EngineRegister);
}
-void InstructionSelection::loadQmlImportedScripts(IR::Expr *temp)
+template <typename JITAssembler>
+void InstructionSelection<JITAssembler>::loadQmlImportedScripts(IR::Expr *temp)
{
- generateRuntimeCall(temp, getQmlImportedScripts, Assembler::EngineRegister);
+ generateRuntimeCall(_as, temp, getQmlImportedScripts, JITTargetPlatform::EngineRegister);
}
-void InstructionSelection::loadQmlSingleton(const QString &name, IR::Expr *temp)
+template <typename JITAssembler>
+void InstructionSelection<JITAssembler>::loadQmlSingleton(const QString &name, IR::Expr *temp)
{
- generateRuntimeCall(temp, getQmlSingleton, Assembler::EngineRegister, Assembler::StringToIndex(name));
+ generateRuntimeCall(_as, temp, getQmlSingleton, JITTargetPlatform::EngineRegister, StringToIndex(name));
}
-void InstructionSelection::loadConst(IR::Const *sourceConst, IR::Expr *target)
+template <typename JITAssembler>
+void InstructionSelection<JITAssembler>::loadConst(IR::Const *sourceConst, IR::Expr *target)
{
if (IR::Temp *targetTemp = target->asTemp()) {
if (targetTemp->kind == IR::Temp::PhysicalRegister) {
if (targetTemp->type == IR::DoubleType) {
Q_ASSERT(sourceConst->type == IR::DoubleType);
- _as->toDoubleRegister(sourceConst, (Assembler::FPRegisterID) targetTemp->index);
+ _as->toDoubleRegister(sourceConst, (FPRegisterID) targetTemp->index);
} else if (targetTemp->type == IR::SInt32Type) {
Q_ASSERT(sourceConst->type == IR::SInt32Type);
- _as->toInt32Register(sourceConst, (Assembler::RegisterID) targetTemp->index);
+ _as->toInt32Register(sourceConst, (RegisterID) targetTemp->index);
} else if (targetTemp->type == IR::UInt32Type) {
Q_ASSERT(sourceConst->type == IR::UInt32Type);
- _as->toUInt32Register(sourceConst, (Assembler::RegisterID) targetTemp->index);
+ _as->toUInt32Register(sourceConst, (RegisterID) targetTemp->index);
} else if (targetTemp->type == IR::BoolType) {
Q_ASSERT(sourceConst->type == IR::BoolType);
- _as->move(Assembler::TrustedImm32(convertToValue(sourceConst).int_32()),
- (Assembler::RegisterID) targetTemp->index);
+ _as->move(TrustedImm32(convertToValue(sourceConst).int_32()),
+ (RegisterID) targetTemp->index);
} else {
Q_UNREACHABLE();
}
@@ -500,147 +529,155 @@ void InstructionSelection::loadConst(IR::Const *sourceConst, IR::Expr *target)
_as->storeValue(convertToValue(sourceConst), target);
}
-void InstructionSelection::loadString(const QString &str, IR::Expr *target)
+template <typename JITAssembler>
+void InstructionSelection<JITAssembler>::loadString(const QString &str, IR::Expr *target)
{
- Pointer srcAddr = _as->loadStringAddress(Assembler::ReturnValueRegister, str);
- _as->loadPtr(srcAddr, Assembler::ReturnValueRegister);
- Pointer destAddr = _as->loadAddress(Assembler::ScratchRegister, target);
-#ifdef QV4_USE_64_BIT_VALUE_ENCODING
- _as->store64(Assembler::ReturnValueRegister, destAddr);
-#else
- _as->store32(Assembler::ReturnValueRegister, destAddr);
- destAddr.offset += 4;
- _as->store32(Assembler::TrustedImm32(QV4::Value::Managed_Type_Internal), destAddr);
-#endif
+ Pointer srcAddr = _as->loadStringAddress(JITTargetPlatform::ReturnValueRegister, str);
+ _as->loadPtr(srcAddr, JITTargetPlatform::ReturnValueRegister);
+ Pointer destAddr = _as->loadAddress(JITTargetPlatform::ScratchRegister, target);
+ JITAssembler::RegisterSizeDependentOps::loadManagedPointer(_as, JITTargetPlatform::ReturnValueRegister, destAddr);
}
-void InstructionSelection::loadRegexp(IR::RegExp *sourceRegexp, IR::Expr *target)
+template <typename JITAssembler>
+void InstructionSelection<JITAssembler>::loadRegexp(IR::RegExp *sourceRegexp, IR::Expr *target)
{
int id = registerRegExp(sourceRegexp);
- generateRuntimeCall(target, regexpLiteral, Assembler::EngineRegister, Assembler::TrustedImm32(id));
+ generateRuntimeCall(_as, target, regexpLiteral, JITTargetPlatform::EngineRegister, TrustedImm32(id));
}
-void InstructionSelection::getActivationProperty(const IR::Name *name, IR::Expr *target)
+template <typename JITAssembler>
+void InstructionSelection<JITAssembler>::getActivationProperty(const IR::Name *name, IR::Expr *target)
{
if (useFastLookups && name->global) {
uint index = registerGlobalGetterLookup(*name->id);
- generateLookupCall(target, index, qOffsetOf(QV4::Lookup, globalGetter), Assembler::EngineRegister, Assembler::Void);
+ generateLookupCall(target, index, qOffsetOf(QV4::Lookup, globalGetter), JITTargetPlatform::EngineRegister, JITAssembler::Void);
return;
}
- generateRuntimeCall(target, getActivationProperty, Assembler::EngineRegister, Assembler::StringToIndex(*name->id));
+ generateRuntimeCall(_as, target, getActivationProperty, JITTargetPlatform::EngineRegister, StringToIndex(*name->id));
}
-void InstructionSelection::setActivationProperty(IR::Expr *source, const QString &targetName)
+template <typename JITAssembler>
+void InstructionSelection<JITAssembler>::setActivationProperty(IR::Expr *source, const QString &targetName)
{
// ### should use a lookup call here
- generateRuntimeCall(Assembler::Void, setActivationProperty,
- Assembler::EngineRegister, Assembler::StringToIndex(targetName), Assembler::PointerToValue(source));
+ generateRuntimeCall(_as, JITAssembler::Void, setActivationProperty,
+ JITTargetPlatform::EngineRegister, StringToIndex(targetName), PointerToValue(source));
}
-void InstructionSelection::initClosure(IR::Closure *closure, IR::Expr *target)
+template <typename JITAssembler>
+void InstructionSelection<JITAssembler>::initClosure(IR::Closure *closure, IR::Expr *target)
{
int id = closure->value;
- generateRuntimeCall(target, closure, Assembler::EngineRegister, Assembler::TrustedImm32(id));
+ generateRuntimeCall(_as, target, closure, JITTargetPlatform::EngineRegister, TrustedImm32(id));
}
-void InstructionSelection::getProperty(IR::Expr *base, const QString &name, IR::Expr *target)
+template <typename JITAssembler>
+void InstructionSelection<JITAssembler>::getProperty(IR::Expr *base, const QString &name, IR::Expr *target)
{
if (useFastLookups) {
uint index = registerGetterLookup(name);
- generateLookupCall(target, index, qOffsetOf(QV4::Lookup, getter), Assembler::EngineRegister, Assembler::PointerToValue(base), Assembler::Void);
+ generateLookupCall(target, index, qOffsetOf(QV4::Lookup, getter), JITTargetPlatform::EngineRegister, PointerToValue(base), JITAssembler::Void);
} else {
- generateRuntimeCall(target, getProperty, Assembler::EngineRegister,
- Assembler::PointerToValue(base), Assembler::StringToIndex(name));
+ generateRuntimeCall(_as, target, getProperty, JITTargetPlatform::EngineRegister,
+ PointerToValue(base), StringToIndex(name));
}
}
-void InstructionSelection::getQmlContextProperty(IR::Expr *base, IR::Member::MemberKind kind, int index, bool captureRequired, IR::Expr *target)
+template <typename JITAssembler>
+void InstructionSelection<JITAssembler>::getQmlContextProperty(IR::Expr *base, IR::Member::MemberKind kind, int index, bool captureRequired, IR::Expr *target)
{
if (kind == IR::Member::MemberOfQmlScopeObject)
- generateRuntimeCall(target, getQmlScopeObjectProperty, Assembler::EngineRegister, Assembler::PointerToValue(base), Assembler::TrustedImm32(index), Assembler::TrustedImm32(captureRequired));
+ generateRuntimeCall(_as, target, getQmlScopeObjectProperty, JITTargetPlatform::EngineRegister, PointerToValue(base), TrustedImm32(index), TrustedImm32(captureRequired));
else if (kind == IR::Member::MemberOfQmlContextObject)
- generateRuntimeCall(target, getQmlContextObjectProperty, Assembler::EngineRegister, Assembler::PointerToValue(base), Assembler::TrustedImm32(index), Assembler::TrustedImm32(captureRequired));
+ generateRuntimeCall(_as, target, getQmlContextObjectProperty, JITTargetPlatform::EngineRegister, PointerToValue(base), TrustedImm32(index), TrustedImm32(captureRequired));
else if (kind == IR::Member::MemberOfIdObjectsArray)
- generateRuntimeCall(target, getQmlIdObject, Assembler::EngineRegister, Assembler::PointerToValue(base), Assembler::TrustedImm32(index));
+ generateRuntimeCall(_as, target, getQmlIdObject, JITTargetPlatform::EngineRegister, PointerToValue(base), TrustedImm32(index));
else
Q_ASSERT(false);
}
-void InstructionSelection::getQObjectProperty(IR::Expr *base, int propertyIndex, bool captureRequired, bool isSingleton, int attachedPropertiesId, IR::Expr *target)
+template <typename JITAssembler>
+void InstructionSelection<JITAssembler>::getQObjectProperty(IR::Expr *base, int propertyIndex, bool captureRequired, bool isSingleton, int attachedPropertiesId, IR::Expr *target)
{
if (attachedPropertiesId != 0)
- generateRuntimeCall(target, getQmlAttachedProperty, Assembler::EngineRegister, Assembler::TrustedImm32(attachedPropertiesId), Assembler::TrustedImm32(propertyIndex));
+ generateRuntimeCall(_as, target, getQmlAttachedProperty, JITTargetPlatform::EngineRegister, TrustedImm32(attachedPropertiesId), TrustedImm32(propertyIndex));
else if (isSingleton)
- generateRuntimeCall(target, getQmlSingletonQObjectProperty, Assembler::EngineRegister, Assembler::PointerToValue(base), Assembler::TrustedImm32(propertyIndex),
- Assembler::TrustedImm32(captureRequired));
+ generateRuntimeCall(_as, target, getQmlSingletonQObjectProperty, JITTargetPlatform::EngineRegister, PointerToValue(base), TrustedImm32(propertyIndex),
+ TrustedImm32(captureRequired));
else
- generateRuntimeCall(target, getQmlQObjectProperty, Assembler::EngineRegister, Assembler::PointerToValue(base), Assembler::TrustedImm32(propertyIndex),
- Assembler::TrustedImm32(captureRequired));
+ generateRuntimeCall(_as, target, getQmlQObjectProperty, JITTargetPlatform::EngineRegister, PointerToValue(base), TrustedImm32(propertyIndex),
+ TrustedImm32(captureRequired));
}
-void InstructionSelection::setProperty(IR::Expr *source, IR::Expr *targetBase,
+template <typename JITAssembler>
+void InstructionSelection<JITAssembler>::setProperty(IR::Expr *source, IR::Expr *targetBase,
const QString &targetName)
{
if (useFastLookups) {
uint index = registerSetterLookup(targetName);
- generateLookupCall(Assembler::Void, index, qOffsetOf(QV4::Lookup, setter),
- Assembler::EngineRegister,
- Assembler::PointerToValue(targetBase),
- Assembler::PointerToValue(source));
+ generateLookupCall(JITAssembler::Void, index, qOffsetOf(QV4::Lookup, setter),
+ JITTargetPlatform::EngineRegister,
+ PointerToValue(targetBase),
+ PointerToValue(source));
} else {
- generateRuntimeCall(Assembler::Void, setProperty, Assembler::EngineRegister,
- Assembler::PointerToValue(targetBase), Assembler::StringToIndex(targetName),
- Assembler::PointerToValue(source));
+ generateRuntimeCall(_as, JITAssembler::Void, setProperty, JITTargetPlatform::EngineRegister,
+ PointerToValue(targetBase), StringToIndex(targetName),
+ PointerToValue(source));
}
}
-void InstructionSelection::setQmlContextProperty(IR::Expr *source, IR::Expr *targetBase, IR::Member::MemberKind kind, int propertyIndex)
+template <typename JITAssembler>
+void InstructionSelection<JITAssembler>::setQmlContextProperty(IR::Expr *source, IR::Expr *targetBase, IR::Member::MemberKind kind, int propertyIndex)
{
if (kind == IR::Member::MemberOfQmlScopeObject)
- generateRuntimeCall(Assembler::Void, setQmlScopeObjectProperty, Assembler::EngineRegister, Assembler::PointerToValue(targetBase),
- Assembler::TrustedImm32(propertyIndex), Assembler::PointerToValue(source));
+ generateRuntimeCall(_as, JITAssembler::Void, setQmlScopeObjectProperty, JITTargetPlatform::EngineRegister, PointerToValue(targetBase),
+ TrustedImm32(propertyIndex), PointerToValue(source));
else if (kind == IR::Member::MemberOfQmlContextObject)
- generateRuntimeCall(Assembler::Void, setQmlContextObjectProperty, Assembler::EngineRegister, Assembler::PointerToValue(targetBase),
- Assembler::TrustedImm32(propertyIndex), Assembler::PointerToValue(source));
+ generateRuntimeCall(_as, JITAssembler::Void, setQmlContextObjectProperty, JITTargetPlatform::EngineRegister, PointerToValue(targetBase),
+ TrustedImm32(propertyIndex), PointerToValue(source));
else
Q_ASSERT(false);
}
-void InstructionSelection::setQObjectProperty(IR::Expr *source, IR::Expr *targetBase, int propertyIndex)
+template <typename JITAssembler>
+void InstructionSelection<JITAssembler>::setQObjectProperty(IR::Expr *source, IR::Expr *targetBase, int propertyIndex)
{
- generateRuntimeCall(Assembler::Void, setQmlQObjectProperty, Assembler::EngineRegister, Assembler::PointerToValue(targetBase),
- Assembler::TrustedImm32(propertyIndex), Assembler::PointerToValue(source));
+ generateRuntimeCall(_as, JITAssembler::Void, setQmlQObjectProperty, JITTargetPlatform::EngineRegister, PointerToValue(targetBase),
+ TrustedImm32(propertyIndex), PointerToValue(source));
}
-void InstructionSelection::getElement(IR::Expr *base, IR::Expr *index, IR::Expr *target)
+template <typename JITAssembler>
+void InstructionSelection<JITAssembler>::getElement(IR::Expr *base, IR::Expr *index, IR::Expr *target)
{
if (useFastLookups) {
uint lookup = registerIndexedGetterLookup();
generateLookupCall(target, lookup, qOffsetOf(QV4::Lookup, indexedGetter),
- Assembler::PointerToValue(base),
- Assembler::PointerToValue(index));
+ PointerToValue(base),
+ PointerToValue(index));
return;
}
- generateRuntimeCall(target, getElement, Assembler::EngineRegister,
- Assembler::PointerToValue(base), Assembler::PointerToValue(index));
+ generateRuntimeCall(_as, target, getElement, JITTargetPlatform::EngineRegister,
+ PointerToValue(base), PointerToValue(index));
}
-void InstructionSelection::setElement(IR::Expr *source, IR::Expr *targetBase, IR::Expr *targetIndex)
+template <typename JITAssembler>
+void InstructionSelection<JITAssembler>::setElement(IR::Expr *source, IR::Expr *targetBase, IR::Expr *targetIndex)
{
if (useFastLookups) {
uint lookup = registerIndexedSetterLookup();
- generateLookupCall(Assembler::Void, lookup, qOffsetOf(QV4::Lookup, indexedSetter),
- Assembler::PointerToValue(targetBase), Assembler::PointerToValue(targetIndex),
- Assembler::PointerToValue(source));
+ generateLookupCall(JITAssembler::Void, lookup, qOffsetOf(QV4::Lookup, indexedSetter),
+ PointerToValue(targetBase), PointerToValue(targetIndex),
+ PointerToValue(source));
return;
}
- generateRuntimeCall(Assembler::Void, setElement, Assembler::EngineRegister,
- Assembler::PointerToValue(targetBase), Assembler::PointerToValue(targetIndex),
- Assembler::PointerToValue(source));
+ generateRuntimeCall(_as, JITAssembler::Void, setElement, JITTargetPlatform::EngineRegister,
+ PointerToValue(targetBase), PointerToValue(targetIndex),
+ PointerToValue(source));
}
-void InstructionSelection::copyValue(IR::Expr *source, IR::Expr *target)
+template <typename JITAssembler>
+void InstructionSelection<JITAssembler>::copyValue(IR::Expr *source, IR::Expr *target)
{
IR::Temp *sourceTemp = source->asTemp();
IR::Temp *targetTemp = target->asTemp();
@@ -655,25 +692,25 @@ void InstructionSelection::copyValue(IR::Expr *source, IR::Expr *target)
if (sourceTemp && sourceTemp->kind == IR::Temp::PhysicalRegister) {
if (targetTemp && targetTemp->kind == IR::Temp::PhysicalRegister) {
if (sourceTemp->type == IR::DoubleType)
- _as->moveDouble((Assembler::FPRegisterID) sourceTemp->index,
- (Assembler::FPRegisterID) targetTemp->index);
+ _as->moveDouble((FPRegisterID) sourceTemp->index,
+ (FPRegisterID) targetTemp->index);
else
- _as->move((Assembler::RegisterID) sourceTemp->index,
- (Assembler::RegisterID) targetTemp->index);
+ _as->move((RegisterID) sourceTemp->index,
+ (RegisterID) targetTemp->index);
return;
} else {
switch (sourceTemp->type) {
case IR::DoubleType:
- _as->storeDouble((Assembler::FPRegisterID) sourceTemp->index, target);
+ _as->storeDouble((FPRegisterID) sourceTemp->index, target);
break;
case IR::SInt32Type:
- _as->storeInt32((Assembler::RegisterID) sourceTemp->index, target);
+ _as->storeInt32((RegisterID) sourceTemp->index, target);
break;
case IR::UInt32Type:
- _as->storeUInt32((Assembler::RegisterID) sourceTemp->index, target);
+ _as->storeUInt32((RegisterID) sourceTemp->index, target);
break;
case IR::BoolType:
- _as->storeBool((Assembler::RegisterID) sourceTemp->index, target);
+ _as->storeBool((RegisterID) sourceTemp->index, target);
break;
default:
Q_ASSERT(!"Unreachable");
@@ -685,19 +722,19 @@ void InstructionSelection::copyValue(IR::Expr *source, IR::Expr *target)
switch (targetTemp->type) {
case IR::DoubleType:
Q_ASSERT(source->type == IR::DoubleType);
- _as->toDoubleRegister(source, (Assembler::FPRegisterID) targetTemp->index);
+ _as->toDoubleRegister(source, (FPRegisterID) targetTemp->index);
return;
case IR::BoolType:
Q_ASSERT(source->type == IR::BoolType);
- _as->toInt32Register(source, (Assembler::RegisterID) targetTemp->index);
+ _as->toInt32Register(source, (RegisterID) targetTemp->index);
return;
case IR::SInt32Type:
Q_ASSERT(source->type == IR::SInt32Type);
- _as->toInt32Register(source, (Assembler::RegisterID) targetTemp->index);
+ _as->toInt32Register(source, (RegisterID) targetTemp->index);
return;
case IR::UInt32Type:
Q_ASSERT(source->type == IR::UInt32Type);
- _as->toUInt32Register(source, (Assembler::RegisterID) targetTemp->index);
+ _as->toUInt32Register(source, (RegisterID) targetTemp->index);
return;
default:
Q_ASSERT(!"Unreachable");
@@ -706,10 +743,11 @@ void InstructionSelection::copyValue(IR::Expr *source, IR::Expr *target)
}
// The target is not a physical register, nor is the source. So we can do a memory-to-memory copy:
- _as->memcopyValue(_as->loadAddress(Assembler::ReturnValueRegister, target), source, Assembler::ScratchRegister);
+ _as->memcopyValue(_as->loadAddress(JITTargetPlatform::ReturnValueRegister, target), source, JITTargetPlatform::ScratchRegister);
}
-void InstructionSelection::swapValues(IR::Expr *source, IR::Expr *target)
+template <typename JITAssembler>
+void InstructionSelection<JITAssembler>::swapValues(IR::Expr *source, IR::Expr *target)
{
IR::Temp *sourceTemp = source->asTemp();
IR::Temp *targetTemp = target->asTemp();
@@ -719,26 +757,27 @@ void InstructionSelection::swapValues(IR::Expr *source, IR::Expr *target)
Q_ASSERT(sourceTemp->type == targetTemp->type);
if (sourceTemp->type == IR::DoubleType) {
- _as->moveDouble((Assembler::FPRegisterID) targetTemp->index, Assembler::FPGpr0);
- _as->moveDouble((Assembler::FPRegisterID) sourceTemp->index,
- (Assembler::FPRegisterID) targetTemp->index);
- _as->moveDouble(Assembler::FPGpr0, (Assembler::FPRegisterID) sourceTemp->index);
+ _as->moveDouble((FPRegisterID) targetTemp->index, JITTargetPlatform::FPGpr0);
+ _as->moveDouble((FPRegisterID) sourceTemp->index,
+ (FPRegisterID) targetTemp->index);
+ _as->moveDouble(JITTargetPlatform::FPGpr0, (FPRegisterID) sourceTemp->index);
} else {
- _as->swap((Assembler::RegisterID) sourceTemp->index,
- (Assembler::RegisterID) targetTemp->index);
+ _as->swap((RegisterID) sourceTemp->index,
+ (RegisterID) targetTemp->index);
}
return;
}
} else if (!sourceTemp || sourceTemp->kind == IR::Temp::StackSlot) {
if (!targetTemp || targetTemp->kind == IR::Temp::StackSlot) {
// Note: a swap for two stack-slots can involve different types.
- Assembler::Pointer sAddr = _as->loadAddress(Assembler::ScratchRegister, source);
- Assembler::Pointer tAddr = _as->loadAddress(Assembler::ReturnValueRegister, target);
+ Pointer sAddr = _as->loadAddress(JITTargetPlatform::ScratchRegister, source);
+ Pointer tAddr = _as->loadAddress(JITTargetPlatform::ReturnValueRegister, target);
// use the implementation in JSC::MacroAssembler, as it doesn't do bit swizzling
- _as->JSC::MacroAssembler::loadDouble(sAddr, Assembler::FPGpr0);
- _as->JSC::MacroAssembler::loadDouble(tAddr, Assembler::FPGpr1);
- _as->JSC::MacroAssembler::storeDouble(Assembler::FPGpr1, sAddr);
- _as->JSC::MacroAssembler::storeDouble(Assembler::FPGpr0, tAddr);
+ auto platformAs = static_cast<typename JITAssembler::MacroAssembler*>(_as);
+ platformAs->loadDouble(sAddr, JITTargetPlatform::FPGpr0);
+ platformAs->loadDouble(tAddr, JITTargetPlatform::FPGpr1);
+ platformAs->storeDouble(JITTargetPlatform::FPGpr1, sAddr);
+ platformAs->storeDouble(JITTargetPlatform::FPGpr0, tAddr);
return;
}
}
@@ -749,18 +788,18 @@ void InstructionSelection::swapValues(IR::Expr *source, IR::Expr *target)
Q_ASSERT(memExpr);
Q_ASSERT(regTemp);
- Assembler::Pointer addr = _as->loadAddress(Assembler::ReturnValueRegister, memExpr);
+ Pointer addr = _as->loadAddress(JITTargetPlatform::ReturnValueRegister, memExpr);
if (regTemp->type == IR::DoubleType) {
- _as->loadDouble(addr, Assembler::FPGpr0);
- _as->storeDouble((Assembler::FPRegisterID) regTemp->index, addr);
- _as->moveDouble(Assembler::FPGpr0, (Assembler::FPRegisterID) regTemp->index);
+ _as->loadDouble(addr, JITTargetPlatform::FPGpr0);
+ _as->storeDouble((FPRegisterID) regTemp->index, addr);
+ _as->moveDouble(JITTargetPlatform::FPGpr0, (FPRegisterID) regTemp->index);
} else if (regTemp->type == IR::UInt32Type) {
- _as->toUInt32Register(addr, Assembler::ScratchRegister);
- _as->storeUInt32((Assembler::RegisterID) regTemp->index, addr);
- _as->move(Assembler::ScratchRegister, (Assembler::RegisterID) regTemp->index);
+ _as->toUInt32Register(addr, JITTargetPlatform::ScratchRegister);
+ _as->storeUInt32((RegisterID) regTemp->index, addr);
+ _as->move(JITTargetPlatform::ScratchRegister, (RegisterID) regTemp->index);
} else {
- _as->load32(addr, Assembler::ScratchRegister);
- _as->store32((Assembler::RegisterID) regTemp->index, addr);
+ _as->load32(addr, JITTargetPlatform::ScratchRegister);
+ _as->store32((RegisterID) regTemp->index, addr);
if (regTemp->type != memExpr->type) {
addr.offset += 4;
quint32 tag;
@@ -775,55 +814,59 @@ void InstructionSelection::swapValues(IR::Expr *source, IR::Expr *target)
tag = 31337; // bogus value
Q_UNREACHABLE();
}
- _as->store32(Assembler::TrustedImm32(tag), addr);
+ _as->store32(TrustedImm32(tag), addr);
}
- _as->move(Assembler::ScratchRegister, (Assembler::RegisterID) regTemp->index);
+ _as->move(JITTargetPlatform::ScratchRegister, (RegisterID) regTemp->index);
}
}
#define setOp(op, opName, operation) \
do { \
- op = RuntimeCall(qOffsetOf(QV4::Runtime, operation)); opName = "Runtime::" isel_stringIfy(operation); \
+ op = typename JITAssembler::RuntimeCall(qOffsetOf(QV4::Runtime, operation)); opName = "Runtime::" isel_stringIfy(operation); \
needsExceptionCheck = QV4::Runtime::Method_##operation##_NeedsExceptionCheck; \
} while (0)
#define setOpContext(op, opName, operation) \
do { \
- opContext = RuntimeCall(qOffsetOf(QV4::Runtime, operation)); opName = "Runtime::" isel_stringIfy(operation); \
+ opContext = typename JITAssembler::RuntimeCall(qOffsetOf(QV4::Runtime, operation)); opName = "Runtime::" isel_stringIfy(operation); \
needsExceptionCheck = QV4::Runtime::Method_##operation##_NeedsExceptionCheck; \
} while (0)
-void InstructionSelection::unop(IR::AluOp oper, IR::Expr *source, IR::Expr *target)
+template <typename JITAssembler>
+void InstructionSelection<JITAssembler>::unop(IR::AluOp oper, IR::Expr *source, IR::Expr *target)
{
- QV4::JIT::Unop unop(_as, oper);
+ QV4::JIT::Unop<JITAssembler> unop(_as, oper);
unop.generate(source, target);
}
-void InstructionSelection::binop(IR::AluOp oper, IR::Expr *leftSource, IR::Expr *rightSource, IR::Expr *target)
+template <typename JITAssembler>
+void InstructionSelection<JITAssembler>::binop(IR::AluOp oper, IR::Expr *leftSource, IR::Expr *rightSource, IR::Expr *target)
{
- QV4::JIT::Binop binop(_as, oper);
+ QV4::JIT::Binop<JITAssembler> binop(_as, oper);
binop.generate(leftSource, rightSource, target);
}
-void InstructionSelection::callQmlContextProperty(IR::Expr *base, IR::Member::MemberKind kind, int propertyIndex, IR::ExprList *args, IR::Expr *result)
+template <typename JITAssembler>
+void InstructionSelection<JITAssembler>::callQmlContextProperty(IR::Expr *base, IR::Member::MemberKind kind, int propertyIndex, IR::ExprList *args, IR::Expr *result)
{
prepareCallData(args, base);
if (kind == IR::Member::MemberOfQmlScopeObject)
- generateRuntimeCall(result, callQmlScopeObjectProperty,
- Assembler::EngineRegister,
- Assembler::TrustedImm32(propertyIndex),
+ generateRuntimeCall(_as, result, callQmlScopeObjectProperty,
+ JITTargetPlatform::EngineRegister,
+ TrustedImm32(propertyIndex),
baseAddressForCallData());
else if (kind == IR::Member::MemberOfQmlContextObject)
- generateRuntimeCall(result, callQmlContextObjectProperty,
- Assembler::EngineRegister,
- Assembler::TrustedImm32(propertyIndex),
+ generateRuntimeCall(_as, result, callQmlContextObjectProperty,
+ JITTargetPlatform::EngineRegister,
+ TrustedImm32(propertyIndex),
baseAddressForCallData());
else
Q_ASSERT(false);
}
-void InstructionSelection::callProperty(IR::Expr *base, const QString &name, IR::ExprList *args,
+template <typename JITAssembler>
+void InstructionSelection<JITAssembler>::callProperty(IR::Expr *base, const QString &name, IR::ExprList *args,
IR::Expr *result)
{
Q_ASSERT(base != 0);
@@ -832,29 +875,31 @@ void InstructionSelection::callProperty(IR::Expr *base, const QString &name, IR:
if (useFastLookups) {
uint index = registerGetterLookup(name);
- generateRuntimeCall(result, callPropertyLookup,
- Assembler::EngineRegister,
- Assembler::TrustedImm32(index),
+ generateRuntimeCall(_as, result, callPropertyLookup,
+ JITTargetPlatform::EngineRegister,
+ TrustedImm32(index),
baseAddressForCallData());
} else {
- generateRuntimeCall(result, callProperty, Assembler::EngineRegister,
- Assembler::StringToIndex(name),
+ generateRuntimeCall(_as, result, callProperty, JITTargetPlatform::EngineRegister,
+ StringToIndex(name),
baseAddressForCallData());
}
}
-void InstructionSelection::callSubscript(IR::Expr *base, IR::Expr *index, IR::ExprList *args,
+template <typename JITAssembler>
+void InstructionSelection<JITAssembler>::callSubscript(IR::Expr *base, IR::Expr *index, IR::ExprList *args,
IR::Expr *result)
{
Q_ASSERT(base != 0);
prepareCallData(args, base);
- generateRuntimeCall(result, callElement, Assembler::EngineRegister,
- Assembler::PointerToValue(index),
+ generateRuntimeCall(_as, result, callElement, JITTargetPlatform::EngineRegister,
+ PointerToValue(index),
baseAddressForCallData());
}
-void InstructionSelection::convertType(IR::Expr *source, IR::Expr *target)
+template <typename JITAssembler>
+void InstructionSelection<JITAssembler>::convertType(IR::Expr *source, IR::Expr *target)
{
switch (target->type) {
case IR::DoubleType:
@@ -875,7 +920,8 @@ void InstructionSelection::convertType(IR::Expr *source, IR::Expr *target)
}
}
-void InstructionSelection::convertTypeSlowPath(IR::Expr *source, IR::Expr *target)
+template <typename JITAssembler>
+void InstructionSelection<JITAssembler>::convertTypeSlowPath(IR::Expr *source, IR::Expr *target)
{
Q_ASSERT(target->type != IR::BoolType);
@@ -885,7 +931,8 @@ void InstructionSelection::convertTypeSlowPath(IR::Expr *source, IR::Expr *targe
copyValue(source, target);
}
-void InstructionSelection::convertTypeToDouble(IR::Expr *source, IR::Expr *target)
+template <typename JITAssembler>
+void InstructionSelection<JITAssembler>::convertTypeToDouble(IR::Expr *source, IR::Expr *target)
{
switch (source->type) {
case IR::SInt32Type:
@@ -897,51 +944,37 @@ void InstructionSelection::convertTypeToDouble(IR::Expr *source, IR::Expr *targe
convertUIntToDouble(source, target);
break;
case IR::UndefinedType:
- _as->loadDouble(_as->loadAddress(Assembler::ScratchRegister, source), Assembler::FPGpr0);
- _as->storeDouble(Assembler::FPGpr0, target);
+ _as->loadDouble(_as->loadAddress(JITTargetPlatform::ScratchRegister, source), JITTargetPlatform::FPGpr0);
+ _as->storeDouble(JITTargetPlatform::FPGpr0, target);
break;
case IR::StringType:
case IR::VarType: {
// load the tag:
- Assembler::Pointer tagAddr = _as->loadAddress(Assembler::ScratchRegister, source);
+ Pointer tagAddr = _as->loadAddress(JITTargetPlatform::ScratchRegister, source);
tagAddr.offset += 4;
- _as->load32(tagAddr, Assembler::ScratchRegister);
+ _as->load32(tagAddr, JITTargetPlatform::ScratchRegister);
// check if it's an int32:
- Assembler::Jump isNoInt = _as->branch32(Assembler::NotEqual, Assembler::ScratchRegister,
- Assembler::TrustedImm32(Value::Integer_Type_Internal));
+ Jump isNoInt = _as->branch32(RelationalCondition::NotEqual, JITTargetPlatform::ScratchRegister,
+ TrustedImm32(Value::Integer_Type_Internal));
convertIntToDouble(source, target);
- Assembler::Jump intDone = _as->jump();
+ Jump intDone = _as->jump();
// not an int, check if it's NOT a double:
isNoInt.link(_as);
-#ifdef QV4_USE_64_BIT_VALUE_ENCODING
- _as->rshift32(Assembler::TrustedImm32(Value::IsDoubleTag_Shift), Assembler::ScratchRegister);
- Assembler::Jump isDbl = _as->branch32(Assembler::NotEqual, Assembler::ScratchRegister,
- Assembler::TrustedImm32(0));
-#else
- _as->and32(Assembler::TrustedImm32(Value::NotDouble_Mask), Assembler::ScratchRegister);
- Assembler::Jump isDbl = _as->branch32(Assembler::NotEqual, Assembler::ScratchRegister,
- Assembler::TrustedImm32(Value::NotDouble_Mask));
-#endif
+ Jump isDbl = _as->generateIsDoubleCheck(JITTargetPlatform::ScratchRegister);
- generateRuntimeCall(target, toDouble, Assembler::PointerToValue(source));
- Assembler::Jump noDoubleDone = _as->jump();
+ generateRuntimeCall(_as, target, toDouble, PointerToValue(source));
+ Jump noDoubleDone = _as->jump();
// it is a double:
isDbl.link(_as);
- Assembler::Pointer addr2 = _as->loadAddress(Assembler::ScratchRegister, source);
+ Pointer addr2 = _as->loadAddress(JITTargetPlatform::ScratchRegister, source);
IR::Temp *targetTemp = target->asTemp();
if (!targetTemp || targetTemp->kind == IR::Temp::StackSlot) {
-#if Q_PROCESSOR_WORDSIZE == 8
- _as->load64(addr2, Assembler::ScratchRegister);
- _as->store64(Assembler::ScratchRegister, _as->loadAddress(Assembler::ReturnValueRegister, target));
-#else
- _as->loadDouble(addr2, Assembler::FPGpr0);
- _as->storeDouble(Assembler::FPGpr0, _as->loadAddress(Assembler::ReturnValueRegister, target));
-#endif
+ _as->memcopyValue(target, addr2, JITTargetPlatform::FPGpr0, JITTargetPlatform::ReturnValueRegister);
} else {
- _as->loadDouble(addr2, (Assembler::FPRegisterID) targetTemp->index);
+ _as->loadDouble(addr2, (FPRegisterID) targetTemp->index);
}
noDoubleDone.link(_as);
@@ -953,7 +986,8 @@ void InstructionSelection::convertTypeToDouble(IR::Expr *source, IR::Expr *targe
}
}
-void InstructionSelection::convertTypeToBool(IR::Expr *source, IR::Expr *target)
+template <typename JITAssembler>
+void InstructionSelection<JITAssembler>::convertTypeToBool(IR::Expr *source, IR::Expr *target)
{
IR::Temp *sourceTemp = source->asTemp();
switch (source->type) {
@@ -965,16 +999,16 @@ void InstructionSelection::convertTypeToBool(IR::Expr *source, IR::Expr *target)
// The source is in a register if the register allocator is used. If the register
// allocator was not used, then that means that we can use any register for to
// load the double into.
- Assembler::FPRegisterID reg;
+ FPRegisterID reg;
if (sourceTemp && sourceTemp->kind == IR::Temp::PhysicalRegister)
- reg = (Assembler::FPRegisterID) sourceTemp->index;
+ reg = (FPRegisterID) sourceTemp->index;
else
- reg = _as->toDoubleRegister(source, (Assembler::FPRegisterID) 1);
- Assembler::Jump nonZero = _as->branchDoubleNonZero(reg, Assembler::FPGpr0);
+ reg = _as->toDoubleRegister(source, (FPRegisterID) 1);
+ Jump nonZero = _as->branchDoubleNonZero(reg, JITTargetPlatform::FPGpr0);
// it's 0, so false:
_as->storeBool(false, target);
- Assembler::Jump done = _as->jump();
+ Jump done = _as->jump();
// it's non-zero, so true:
nonZero.link(_as);
@@ -988,283 +1022,220 @@ void InstructionSelection::convertTypeToBool(IR::Expr *source, IR::Expr *target)
_as->storeBool(false, target);
break;
case IR::StringType:
- generateRuntimeCall(Assembler::ReturnValueRegister, toBoolean,
- Assembler::PointerToValue(source));
- _as->storeBool(Assembler::ReturnValueRegister, target);
+ generateRuntimeCall(_as, JITTargetPlatform::ReturnValueRegister, toBoolean,
+ PointerToValue(source));
+ _as->storeBool(JITTargetPlatform::ReturnValueRegister, target);
case IR::VarType:
default:
- Assembler::Pointer addr = _as->loadAddress(Assembler::ScratchRegister, source);
- Assembler::Pointer tagAddr = addr;
+ Pointer addr = _as->loadAddress(JITTargetPlatform::ScratchRegister, source);
+ Pointer tagAddr = addr;
tagAddr.offset += 4;
- _as->load32(tagAddr, Assembler::ReturnValueRegister);
+ _as->load32(tagAddr, JITTargetPlatform::ReturnValueRegister);
// checkif it's a bool:
- Assembler::Jump notBool = _as->branch32(Assembler::NotEqual, Assembler::ReturnValueRegister,
- Assembler::TrustedImm32(Value::Boolean_Type_Internal));
- _as->load32(addr, Assembler::ReturnValueRegister);
- Assembler::Jump boolDone = _as->jump();
+ Jump notBool = _as->branch32(RelationalCondition::NotEqual, JITTargetPlatform::ReturnValueRegister,
+ TrustedImm32(Value::Boolean_Type_Internal));
+ _as->load32(addr, JITTargetPlatform::ReturnValueRegister);
+ Jump boolDone = _as->jump();
// check if it's an int32:
notBool.link(_as);
- Assembler::Jump fallback = _as->branch32(Assembler::NotEqual, Assembler::ReturnValueRegister,
- Assembler::TrustedImm32(Value::Integer_Type_Internal));
- _as->load32(addr, Assembler::ReturnValueRegister);
- Assembler::Jump isZero = _as->branch32(Assembler::Equal, Assembler::ReturnValueRegister,
- Assembler::TrustedImm32(0));
- _as->move(Assembler::TrustedImm32(1), Assembler::ReturnValueRegister);
- Assembler::Jump intDone = _as->jump();
+ Jump fallback = _as->branch32(RelationalCondition::NotEqual, JITTargetPlatform::ReturnValueRegister,
+ TrustedImm32(Value::Integer_Type_Internal));
+ _as->load32(addr, JITTargetPlatform::ReturnValueRegister);
+ Jump isZero = _as->branch32(RelationalCondition::Equal, JITTargetPlatform::ReturnValueRegister,
+ TrustedImm32(0));
+ _as->move(TrustedImm32(1), JITTargetPlatform::ReturnValueRegister);
+ Jump intDone = _as->jump();
// not an int:
fallback.link(_as);
- generateRuntimeCall(Assembler::ReturnValueRegister, toBoolean,
- Assembler::PointerToValue(source));
+ generateRuntimeCall(_as, JITTargetPlatform::ReturnValueRegister, toBoolean,
+ PointerToValue(source));
isZero.link(_as);
intDone.link(_as);
boolDone.link(_as);
- _as->storeBool(Assembler::ReturnValueRegister, target);
+ _as->storeBool(JITTargetPlatform::ReturnValueRegister, target);
break;
}
}
-void InstructionSelection::convertTypeToSInt32(IR::Expr *source, IR::Expr *target)
+template <typename JITAssembler>
+void InstructionSelection<JITAssembler>::convertTypeToSInt32(IR::Expr *source, IR::Expr *target)
{
switch (source->type) {
case IR::VarType: {
-
-#ifdef QV4_USE_64_BIT_VALUE_ENCODING
- Assembler::Pointer addr = _as->loadAddress(Assembler::ScratchRegister, source);
- _as->load64(addr, Assembler::ScratchRegister);
- _as->move(Assembler::ScratchRegister, Assembler::ReturnValueRegister);
-
- // check if it's integer convertible
- _as->urshift64(Assembler::TrustedImm32(QV4::Value::IsIntegerConvertible_Shift), Assembler::ScratchRegister);
- Assembler::Jump isIntConvertible = _as->branch32(Assembler::Equal, Assembler::ScratchRegister, Assembler::TrustedImm32(3));
-
- // nope, not integer convertible, so check for a double:
- _as->urshift64(Assembler::TrustedImm32(
- QV4::Value::IsDoubleTag_Shift - QV4::Value::IsIntegerConvertible_Shift),
- Assembler::ScratchRegister);
- Assembler::Jump fallback = _as->branch32(Assembler::GreaterThan, Assembler::ScratchRegister, Assembler::TrustedImm32(0));
-
- // it's a double
- _as->move(Assembler::TrustedImm64(QV4::Value::NaNEncodeMask), Assembler::ScratchRegister);
- _as->xor64(Assembler::ScratchRegister, Assembler::ReturnValueRegister);
- _as->move64ToDouble(Assembler::ReturnValueRegister, Assembler::FPGpr0);
- Assembler::Jump success =
- _as->branchTruncateDoubleToInt32(Assembler::FPGpr0, Assembler::ReturnValueRegister,
- Assembler::BranchIfTruncateSuccessful);
-
- // not an int:
- fallback.link(_as);
- generateRuntimeCall(Assembler::ReturnValueRegister, toInt,
- _as->loadAddress(Assembler::ScratchRegister, source));
-
- isIntConvertible.link(_as);
- success.link(_as);
- IR::Temp *targetTemp = target->asTemp();
- if (!targetTemp || targetTemp->kind == IR::Temp::StackSlot) {
- Assembler::Pointer targetAddr = _as->loadAddress(Assembler::ScratchRegister, target);
- _as->store32(Assembler::ReturnValueRegister, targetAddr);
- targetAddr.offset += 4;
- _as->store32(Assembler::TrustedImm32(Value::Integer_Type_Internal), targetAddr);
- } else {
- _as->storeInt32(Assembler::ReturnValueRegister, target);
- }
-#else
- // load the tag:
- Assembler::Pointer addr = _as->loadAddress(Assembler::ScratchRegister, source);
- Assembler::Pointer tagAddr = addr;
- tagAddr.offset += 4;
- _as->load32(tagAddr, Assembler::ReturnValueRegister);
-
- // check if it's an int32:
- Assembler::Jump fallback = _as->branch32(Assembler::NotEqual, Assembler::ReturnValueRegister,
- Assembler::TrustedImm32(Value::Integer_Type_Internal));
- IR::Temp *targetTemp = target->asTemp();
- if (!targetTemp || targetTemp->kind == IR::Temp::StackSlot) {
- _as->load32(addr, Assembler::ReturnValueRegister);
- Assembler::Pointer targetAddr = _as->loadAddress(Assembler::ScratchRegister, target);
- _as->store32(Assembler::ReturnValueRegister, targetAddr);
- targetAddr.offset += 4;
- _as->store32(Assembler::TrustedImm32(Value::Integer_Type_Internal), targetAddr);
- } else {
- _as->load32(addr, (Assembler::RegisterID) targetTemp->index);
- }
- Assembler::Jump intDone = _as->jump();
-
- // not an int:
- fallback.link(_as);
- generateRuntimeCall(Assembler::ReturnValueRegister, toInt,
- _as->loadAddress(Assembler::ScratchRegister, source));
- _as->storeInt32(Assembler::ReturnValueRegister, target);
-
- intDone.link(_as);
-#endif
-
+ JITAssembler::RegisterSizeDependentOps::convertVarToSInt32(_as, source, target);
} break;
case IR::DoubleType: {
- Assembler::Jump success =
+ Jump success =
_as->branchTruncateDoubleToInt32(_as->toDoubleRegister(source),
- Assembler::ReturnValueRegister,
- Assembler::BranchIfTruncateSuccessful);
- generateRuntimeCall(Assembler::ReturnValueRegister, doubleToInt,
- Assembler::PointerToValue(source));
+ JITTargetPlatform::ReturnValueRegister,
+ BranchTruncateType::BranchIfTruncateSuccessful);
+ generateRuntimeCall(_as, JITTargetPlatform::ReturnValueRegister, doubleToInt,
+ PointerToValue(source));
success.link(_as);
- _as->storeInt32(Assembler::ReturnValueRegister, target);
+ _as->storeInt32(JITTargetPlatform::ReturnValueRegister, target);
} break;
case IR::UInt32Type:
- _as->storeInt32(_as->toUInt32Register(source, Assembler::ReturnValueRegister), target);
+ _as->storeInt32(_as->toUInt32Register(source, JITTargetPlatform::ReturnValueRegister), target);
break;
case IR::NullType:
case IR::UndefinedType:
- _as->move(Assembler::TrustedImm32(0), Assembler::ReturnValueRegister);
- _as->storeInt32(Assembler::ReturnValueRegister, target);
+ _as->move(TrustedImm32(0), JITTargetPlatform::ReturnValueRegister);
+ _as->storeInt32(JITTargetPlatform::ReturnValueRegister, target);
break;
case IR::BoolType:
- _as->storeInt32(_as->toInt32Register(source, Assembler::ReturnValueRegister), target);
+ _as->storeInt32(_as->toInt32Register(source, JITTargetPlatform::ReturnValueRegister), target);
break;
case IR::StringType:
default:
- generateRuntimeCall(Assembler::ReturnValueRegister, toInt,
- _as->loadAddress(Assembler::ScratchRegister, source));
- _as->storeInt32(Assembler::ReturnValueRegister, target);
+ generateRuntimeCall(_as, JITTargetPlatform::ReturnValueRegister, toInt,
+ _as->loadAddress(JITTargetPlatform::ScratchRegister, source));
+ _as->storeInt32(JITTargetPlatform::ReturnValueRegister, target);
break;
} // switch (source->type)
}
-void InstructionSelection::convertTypeToUInt32(IR::Expr *source, IR::Expr *target)
+template <typename JITAssembler>
+void InstructionSelection<JITAssembler>::convertTypeToUInt32(IR::Expr *source, IR::Expr *target)
{
switch (source->type) {
case IR::VarType: {
// load the tag:
- Assembler::Pointer tagAddr = _as->loadAddress(Assembler::ScratchRegister, source);
+ Pointer tagAddr = _as->loadAddress(JITTargetPlatform::ScratchRegister, source);
tagAddr.offset += 4;
- _as->load32(tagAddr, Assembler::ScratchRegister);
+ _as->load32(tagAddr, JITTargetPlatform::ScratchRegister);
// check if it's an int32:
- Assembler::Jump isNoInt = _as->branch32(Assembler::NotEqual, Assembler::ScratchRegister,
- Assembler::TrustedImm32(Value::Integer_Type_Internal));
- Assembler::Pointer addr = _as->loadAddress(Assembler::ScratchRegister, source);
- _as->storeUInt32(_as->toInt32Register(addr, Assembler::ScratchRegister), target);
- Assembler::Jump intDone = _as->jump();
+ Jump isNoInt = _as->branch32(RelationalCondition::NotEqual, JITTargetPlatform::ScratchRegister,
+ TrustedImm32(Value::Integer_Type_Internal));
+ Pointer addr = _as->loadAddress(JITTargetPlatform::ScratchRegister, source);
+ _as->storeUInt32(_as->toInt32Register(addr, JITTargetPlatform::ScratchRegister), target);
+ Jump intDone = _as->jump();
// not an int:
isNoInt.link(_as);
- generateRuntimeCall(Assembler::ReturnValueRegister, toUInt,
- _as->loadAddress(Assembler::ScratchRegister, source));
- _as->storeInt32(Assembler::ReturnValueRegister, target);
+ generateRuntimeCall(_as, JITTargetPlatform::ReturnValueRegister, toUInt,
+ _as->loadAddress(JITTargetPlatform::ScratchRegister, source));
+ _as->storeInt32(JITTargetPlatform::ReturnValueRegister, target);
intDone.link(_as);
} break;
case IR::DoubleType: {
- Assembler::FPRegisterID reg = _as->toDoubleRegister(source);
- Assembler::Jump success =
- _as->branchTruncateDoubleToUint32(reg, Assembler::ReturnValueRegister,
- Assembler::BranchIfTruncateSuccessful);
- generateRuntimeCall(Assembler::ReturnValueRegister, doubleToUInt,
- Assembler::PointerToValue(source));
+ FPRegisterID reg = _as->toDoubleRegister(source);
+ Jump success =
+ _as->branchTruncateDoubleToUint32(reg, JITTargetPlatform::ReturnValueRegister,
+ BranchTruncateType::BranchIfTruncateSuccessful);
+ generateRuntimeCall(_as, JITTargetPlatform::ReturnValueRegister, doubleToUInt,
+ PointerToValue(source));
success.link(_as);
- _as->storeUInt32(Assembler::ReturnValueRegister, target);
+ _as->storeUInt32(JITTargetPlatform::ReturnValueRegister, target);
} break;
case IR::NullType:
case IR::UndefinedType:
- _as->move(Assembler::TrustedImm32(0), Assembler::ReturnValueRegister);
- _as->storeUInt32(Assembler::ReturnValueRegister, target);
+ _as->move(TrustedImm32(0), JITTargetPlatform::ReturnValueRegister);
+ _as->storeUInt32(JITTargetPlatform::ReturnValueRegister, target);
break;
case IR::StringType:
- generateRuntimeCall(Assembler::ReturnValueRegister, toUInt,
- Assembler::PointerToValue(source));
- _as->storeUInt32(Assembler::ReturnValueRegister, target);
+ generateRuntimeCall(_as, JITTargetPlatform::ReturnValueRegister, toUInt,
+ PointerToValue(source));
+ _as->storeUInt32(JITTargetPlatform::ReturnValueRegister, target);
break;
case IR::SInt32Type:
case IR::BoolType:
- _as->storeUInt32(_as->toInt32Register(source, Assembler::ReturnValueRegister), target);
+ _as->storeUInt32(_as->toInt32Register(source, JITTargetPlatform::ReturnValueRegister), target);
break;
default:
break;
} // switch (source->type)
}
-void InstructionSelection::constructActivationProperty(IR::Name *func, IR::ExprList *args, IR::Expr *result)
+template <typename JITAssembler>
+void InstructionSelection<JITAssembler>::constructActivationProperty(IR::Name *func, IR::ExprList *args, IR::Expr *result)
{
Q_ASSERT(func != 0);
prepareCallData(args, 0);
if (useFastLookups && func->global) {
uint index = registerGlobalGetterLookup(*func->id);
- generateRuntimeCall(result, constructGlobalLookup,
- Assembler::EngineRegister,
- Assembler::TrustedImm32(index), baseAddressForCallData());
+ generateRuntimeCall(_as, result, constructGlobalLookup,
+ JITTargetPlatform::EngineRegister,
+ TrustedImm32(index), baseAddressForCallData());
return;
}
- generateRuntimeCall(result, constructActivationProperty,
- Assembler::EngineRegister,
- Assembler::StringToIndex(*func->id),
+ generateRuntimeCall(_as, result, constructActivationProperty,
+ JITTargetPlatform::EngineRegister,
+ StringToIndex(*func->id),
baseAddressForCallData());
}
-void InstructionSelection::constructProperty(IR::Expr *base, const QString &name, IR::ExprList *args, IR::Expr *result)
+template <typename JITAssembler>
+void InstructionSelection<JITAssembler>::constructProperty(IR::Expr *base, const QString &name, IR::ExprList *args, IR::Expr *result)
{
prepareCallData(args, base);
if (useFastLookups) {
uint index = registerGetterLookup(name);
- generateRuntimeCall(result, constructPropertyLookup,
- Assembler::EngineRegister,
- Assembler::TrustedImm32(index),
+ generateRuntimeCall(_as, result, constructPropertyLookup,
+ JITTargetPlatform::EngineRegister,
+ TrustedImm32(index),
baseAddressForCallData());
return;
}
- generateRuntimeCall(result, constructProperty, Assembler::EngineRegister,
- Assembler::StringToIndex(name),
+ generateRuntimeCall(_as, result, constructProperty, JITTargetPlatform::EngineRegister,
+ StringToIndex(name),
baseAddressForCallData());
}
-void InstructionSelection::constructValue(IR::Expr *value, IR::ExprList *args, IR::Expr *result)
+template <typename JITAssembler>
+void InstructionSelection<JITAssembler>::constructValue(IR::Expr *value, IR::ExprList *args, IR::Expr *result)
{
Q_ASSERT(value != 0);
prepareCallData(args, 0);
- generateRuntimeCall(result, constructValue,
- Assembler::EngineRegister,
- Assembler::Reference(value),
+ generateRuntimeCall(_as, result, constructValue,
+ JITTargetPlatform::EngineRegister,
+ Reference(value),
baseAddressForCallData());
}
-void InstructionSelection::visitJump(IR::Jump *s)
+template <typename JITAssembler>
+void InstructionSelection<JITAssembler>::visitJump(IR::Jump *s)
{
if (!_removableJumps.at(_block->index()))
_as->jumpToBlock(_block, s->target);
}
-void InstructionSelection::visitCJump(IR::CJump *s)
+template <typename JITAssembler>
+void InstructionSelection<JITAssembler>::visitCJump(IR::CJump *s)
{
IR::Temp *t = s->cond->asTemp();
if (t || s->cond->asArgLocal()) {
- Assembler::RegisterID reg;
+ RegisterID reg;
if (t && t->kind == IR::Temp::PhysicalRegister) {
Q_ASSERT(t->type == IR::BoolType);
- reg = (Assembler::RegisterID) t->index;
+ reg = (RegisterID) t->index;
} else if (t && t->kind == IR::Temp::StackSlot && t->type == IR::BoolType) {
- reg = Assembler::ReturnValueRegister;
+ reg = JITTargetPlatform::ReturnValueRegister;
_as->toInt32Register(t, reg);
} else {
- Address temp = _as->loadAddress(Assembler::ScratchRegister, s->cond);
+ Address temp = _as->loadAddress(JITTargetPlatform::ScratchRegister, s->cond);
Address tag = temp;
tag.offset += QV4::Value::tagOffset();
- Assembler::Jump booleanConversion = _as->branch32(Assembler::NotEqual, tag, Assembler::TrustedImm32(QV4::Value::Boolean_Type_Internal));
+ Jump booleanConversion = _as->branch32(RelationalCondition::NotEqual, tag, TrustedImm32(QV4::Value::Boolean_Type_Internal));
Address data = temp;
data.offset += QV4::Value::valueOffset();
- _as->load32(data, Assembler::ReturnValueRegister);
- Assembler::Jump testBoolean = _as->jump();
+ _as->load32(data, JITTargetPlatform::ReturnValueRegister);
+ Jump testBoolean = _as->jump();
booleanConversion.link(_as);
- reg = Assembler::ReturnValueRegister;
- generateRuntimeCall(reg, toBoolean, Assembler::Reference(s->cond));
+ reg = JITTargetPlatform::ReturnValueRegister;
+ generateRuntimeCall(_as, reg, toBoolean, Reference(s->cond));
testBoolean.link(_as);
}
@@ -1274,9 +1245,9 @@ void InstructionSelection::visitCJump(IR::CJump *s)
} else if (IR::Const *c = s->cond->asConst()) {
// TODO: SSA optimization for constant condition evaluation should remove this.
// See also visitCJump() in RegAllocInfo.
- generateRuntimeCall(Assembler::ReturnValueRegister, toBoolean,
- Assembler::PointerToValue(c));
- _as->generateCJumpOnNonZero(Assembler::ReturnValueRegister, _block, s->iftrue, s->iffalse);
+ generateRuntimeCall(_as, JITTargetPlatform::ReturnValueRegister, toBoolean,
+ PointerToValue(c));
+ _as->generateCJumpOnNonZero(JITTargetPlatform::ReturnValueRegister, _block, s->iftrue, s->iffalse);
return;
} else if (IR::Binop *b = s->cond->asBinop()) {
if (b->left->type == IR::DoubleType && b->right->type == IR::DoubleType
@@ -1296,8 +1267,8 @@ void InstructionSelection::visitCJump(IR::CJump *s)
return;
}
- RuntimeCall op;
- RuntimeCall opContext;
+ typename JITAssembler::RuntimeCall op;
+ typename JITAssembler::RuntimeCall opContext;
const char *opName = 0;
bool needsExceptionCheck;
switch (b->op) {
@@ -1321,165 +1292,30 @@ void InstructionSelection::visitCJump(IR::CJump *s)
// elimination (which isn't there either) would remove the whole else block.
if (opContext.isValid())
_as->generateFunctionCallImp(needsExceptionCheck,
- Assembler::ReturnValueRegister, opName, opContext,
- Assembler::EngineRegister,
- Assembler::PointerToValue(b->left),
- Assembler::PointerToValue(b->right));
+ JITTargetPlatform::ReturnValueRegister, opName, opContext,
+ JITTargetPlatform::EngineRegister,
+ PointerToValue(b->left),
+ PointerToValue(b->right));
else
_as->generateFunctionCallImp(needsExceptionCheck,
- Assembler::ReturnValueRegister, opName, op,
- Assembler::PointerToValue(b->left),
- Assembler::PointerToValue(b->right));
+ JITTargetPlatform::ReturnValueRegister, opName, op,
+ PointerToValue(b->left),
+ PointerToValue(b->right));
- _as->generateCJumpOnNonZero(Assembler::ReturnValueRegister, _block, s->iftrue, s->iffalse);
+ _as->generateCJumpOnNonZero(JITTargetPlatform::ReturnValueRegister, _block, s->iftrue, s->iffalse);
return;
}
Q_UNREACHABLE();
}
-void InstructionSelection::visitRet(IR::Ret *s)
+template <typename JITAssembler>
+void InstructionSelection<JITAssembler>::visitRet(IR::Ret *s)
{
- if (!s) {
- // this only happens if the method doesn't have a return statement and can
- // only exit through an exception
- } else if (IR::Temp *t = s->expr->asTemp()) {
-#if CPU(X86) || CPU(ARM) || CPU(MIPS)
-
-# if CPU(X86)
- Assembler::RegisterID lowReg = JSC::X86Registers::eax;
- Assembler::RegisterID highReg = JSC::X86Registers::edx;
-# elif CPU(MIPS)
- Assembler::RegisterID lowReg = JSC::MIPSRegisters::v0;
- Assembler::RegisterID highReg = JSC::MIPSRegisters::v1;
-# else // CPU(ARM)
- Assembler::RegisterID lowReg = JSC::ARMRegisters::r0;
- Assembler::RegisterID highReg = JSC::ARMRegisters::r1;
-# endif
-
- if (t->kind == IR::Temp::PhysicalRegister) {
- switch (t->type) {
- case IR::DoubleType:
- _as->moveDoubleToInts((Assembler::FPRegisterID) t->index, lowReg, highReg);
- break;
- case IR::UInt32Type: {
- Assembler::RegisterID srcReg = (Assembler::RegisterID) t->index;
- Assembler::Jump intRange = _as->branch32(Assembler::GreaterThanOrEqual, srcReg, Assembler::TrustedImm32(0));
- _as->convertUInt32ToDouble(srcReg, Assembler::FPGpr0, Assembler::ReturnValueRegister);
- _as->moveDoubleToInts(Assembler::FPGpr0, lowReg, highReg);
- Assembler::Jump done = _as->jump();
- intRange.link(_as);
- _as->move(srcReg, lowReg);
- _as->move(Assembler::TrustedImm32(QV4::Value::Integer_Type_Internal), highReg);
- done.link(_as);
- } break;
- case IR::SInt32Type:
- _as->move((Assembler::RegisterID) t->index, lowReg);
- _as->move(Assembler::TrustedImm32(QV4::Value::Integer_Type_Internal), highReg);
- break;
- case IR::BoolType:
- _as->move((Assembler::RegisterID) t->index, lowReg);
- _as->move(Assembler::TrustedImm32(QV4::Value::Boolean_Type_Internal), highReg);
- break;
- default:
- Q_UNREACHABLE();
- }
- } else {
- Pointer addr = _as->loadAddress(Assembler::ScratchRegister, t);
- _as->load32(addr, lowReg);
- addr.offset += 4;
- _as->load32(addr, highReg);
- }
-#else
- if (t->kind == IR::Temp::PhysicalRegister) {
- if (t->type == IR::DoubleType) {
- _as->moveDoubleTo64((Assembler::FPRegisterID) t->index,
- Assembler::ReturnValueRegister);
- _as->move(Assembler::TrustedImm64(QV4::Value::NaNEncodeMask),
- Assembler::ScratchRegister);
- _as->xor64(Assembler::ScratchRegister, Assembler::ReturnValueRegister);
- } else if (t->type == IR::UInt32Type) {
- Assembler::RegisterID srcReg = (Assembler::RegisterID) t->index;
- Assembler::Jump intRange = _as->branch32(Assembler::GreaterThanOrEqual, srcReg, Assembler::TrustedImm32(0));
- _as->convertUInt32ToDouble(srcReg, Assembler::FPGpr0, Assembler::ReturnValueRegister);
- _as->moveDoubleTo64(Assembler::FPGpr0, Assembler::ReturnValueRegister);
- _as->move(Assembler::TrustedImm64(QV4::Value::NaNEncodeMask), Assembler::ScratchRegister);
- _as->xor64(Assembler::ScratchRegister, Assembler::ReturnValueRegister);
- Assembler::Jump done = _as->jump();
- intRange.link(_as);
- _as->zeroExtend32ToPtr(srcReg, Assembler::ReturnValueRegister);
- quint64 tag = QV4::Value::Integer_Type_Internal;
- _as->or64(Assembler::TrustedImm64(tag << 32),
- Assembler::ReturnValueRegister);
- done.link(_as);
- } else {
- _as->zeroExtend32ToPtr((Assembler::RegisterID) t->index, Assembler::ReturnValueRegister);
- quint64 tag;
- switch (t->type) {
- case IR::SInt32Type:
- tag = QV4::Value::Integer_Type_Internal;
- break;
- case IR::BoolType:
- tag = QV4::Value::Boolean_Type_Internal;
- break;
- default:
- tag = 31337; // bogus value
- Q_UNREACHABLE();
- }
- _as->or64(Assembler::TrustedImm64(tag << 32),
- Assembler::ReturnValueRegister);
- }
- } else {
- _as->copyValue(Assembler::ReturnValueRegister, t);
- }
-#endif
- } else if (IR::Const *c = s->expr->asConst()) {
- QV4::Primitive retVal = convertToValue(c);
-#if CPU(X86)
- _as->move(Assembler::TrustedImm32(retVal.int_32()), JSC::X86Registers::eax);
- _as->move(Assembler::TrustedImm32(retVal.tag()), JSC::X86Registers::edx);
-#elif CPU(ARM)
- _as->move(Assembler::TrustedImm32(retVal.int_32()), JSC::ARMRegisters::r0);
- _as->move(Assembler::TrustedImm32(retVal.tag()), JSC::ARMRegisters::r1);
-#elif CPU(MIPS)
- _as->move(Assembler::TrustedImm32(retVal.int_32()), JSC::MIPSRegisters::v0);
- _as->move(Assembler::TrustedImm32(retVal.tag()), JSC::MIPSRegisters::v1);
-#else
- _as->move(Assembler::TrustedImm64(retVal.rawValue()), Assembler::ReturnValueRegister);
-#endif
- } else {
- Q_UNREACHABLE();
- Q_UNUSED(s);
- }
-
- Assembler::Label leaveStackFrame = _as->label();
-
- const int locals = _as->stackLayout().calculateJSStackFrameSize();
- _as->subPtr(Assembler::TrustedImm32(sizeof(QV4::Value)*locals), Assembler::LocalsRegister);
- _as->loadPtr(Address(Assembler::EngineRegister, qOffsetOf(QV4::ExecutionEngine, current)), Assembler::ScratchRegister);
- _as->loadPtr(Address(Assembler::ScratchRegister, qOffsetOf(ExecutionContext::Data, engine)), Assembler::ScratchRegister);
- _as->storePtr(Assembler::LocalsRegister, Address(Assembler::ScratchRegister, qOffsetOf(ExecutionEngine, jsStackTop)));
-
- _as->leaveStandardStackFrame(regularRegistersToSave, fpRegistersToSave);
- _as->ret();
-
- _as->exceptionReturnLabel = _as->label();
- QV4::Primitive retVal = Primitive::undefinedValue();
-#if CPU(X86)
- _as->move(Assembler::TrustedImm32(retVal.int_32()), JSC::X86Registers::eax);
- _as->move(Assembler::TrustedImm32(retVal.tag()), JSC::X86Registers::edx);
-#elif CPU(ARM)
- _as->move(Assembler::TrustedImm32(retVal.int_32()), JSC::ARMRegisters::r0);
- _as->move(Assembler::TrustedImm32(retVal.tag()), JSC::ARMRegisters::r1);
-#elif CPU(MIPS)
- _as->move(Assembler::TrustedImm32(retVal.int_32()), JSC::MIPSRegisters::v0);
- _as->move(Assembler::TrustedImm32(retVal.tag()), JSC::MIPSRegisters::v1);
-#else
- _as->move(Assembler::TrustedImm64(retVal.rawValue()), Assembler::ReturnValueRegister);
-#endif
- _as->jump(leaveStackFrame);
+ _as->returnFromFunction(s, regularRegistersToSave, fpRegistersToSave);
}
-int InstructionSelection::prepareVariableArguments(IR::ExprList* args)
+template <typename JITAssembler>
+int InstructionSelection<JITAssembler>::prepareVariableArguments(IR::ExprList* args)
{
int argc = 0;
for (IR::ExprList *it = args; it; it = it->next) {
@@ -1492,7 +1328,7 @@ int InstructionSelection::prepareVariableArguments(IR::ExprList* args)
Q_ASSERT(arg != 0);
Pointer dst(_as->stackLayout().argumentAddressForCall(i));
if (arg->asTemp() && arg->asTemp()->kind != IR::Temp::PhysicalRegister)
- _as->memcopyValue(dst, arg->asTemp(), Assembler::ScratchRegister);
+ _as->memcopyValue(dst, arg->asTemp(), JITTargetPlatform::ScratchRegister);
else
_as->copyValue(dst, arg);
}
@@ -1500,7 +1336,8 @@ int InstructionSelection::prepareVariableArguments(IR::ExprList* args)
return argc;
}
-int InstructionSelection::prepareCallData(IR::ExprList* args, IR::Expr *thisObject)
+template <typename JITAssembler>
+int InstructionSelection<JITAssembler>::prepareCallData(IR::ExprList* args, IR::Expr *thisObject)
{
int argc = 0;
for (IR::ExprList *it = args; it; it = it->next) {
@@ -1508,9 +1345,9 @@ int InstructionSelection::prepareCallData(IR::ExprList* args, IR::Expr *thisObje
}
Pointer p = _as->stackLayout().callDataAddress(qOffsetOf(CallData, tag));
- _as->store32(Assembler::TrustedImm32(QV4::Value::Integer_Type_Internal), p);
+ _as->store32(TrustedImm32(QV4::Value::Integer_Type_Internal), p);
p = _as->stackLayout().callDataAddress(qOffsetOf(CallData, argc));
- _as->store32(Assembler::TrustedImm32(argc), p);
+ _as->store32(TrustedImm32(argc), p);
p = _as->stackLayout().callDataAddress(qOffsetOf(CallData, thisObject));
if (!thisObject)
_as->storeValue(QV4::Primitive::undefinedValue(), p);
@@ -1523,19 +1360,20 @@ int InstructionSelection::prepareCallData(IR::ExprList* args, IR::Expr *thisObje
Q_ASSERT(arg != 0);
Pointer dst(_as->stackLayout().argumentAddressForCall(i));
if (arg->asTemp() && arg->asTemp()->kind != IR::Temp::PhysicalRegister)
- _as->memcopyValue(dst, arg->asTemp(), Assembler::ScratchRegister);
+ _as->memcopyValue(dst, arg->asTemp(), JITTargetPlatform::ScratchRegister);
else
_as->copyValue(dst, arg);
}
return argc;
}
-void InstructionSelection::calculateRegistersToSave(const RegisterInformation &used)
+template <typename JITAssembler>
+void InstructionSelection<JITAssembler>::calculateRegistersToSave(const RegisterInformation &used)
{
regularRegistersToSave.clear();
fpRegistersToSave.clear();
- for (const RegisterInfo &ri : Assembler::getRegisterInfo()) {
+ for (const RegisterInfo &ri : JITTargetPlatform::getRegisterInfo()) {
#if defined(RESTORE_EBX_ON_CALL)
if (ri.isRegularRegister() && ri.reg<JSC::X86Registers::RegisterID>() == JSC::X86Registers::ebx) {
regularRegistersToSave.append(ri);
@@ -1564,35 +1402,38 @@ bool operator==(const Primitive &v1, const Primitive &v2)
} // QV4 namespace
QT_END_NAMESPACE
-bool InstructionSelection::visitCJumpDouble(IR::AluOp op, IR::Expr *left, IR::Expr *right,
+template <typename JITAssembler>
+bool InstructionSelection<JITAssembler>::visitCJumpDouble(IR::AluOp op, IR::Expr *left, IR::Expr *right,
IR::BasicBlock *iftrue, IR::BasicBlock *iffalse)
{
if (_as->nextBlock() == iftrue) {
- Assembler::Jump target = _as->branchDouble(true, op, left, right);
+ Jump target = _as->branchDouble(true, op, left, right);
_as->addPatch(iffalse, target);
} else {
- Assembler::Jump target = _as->branchDouble(false, op, left, right);
+ Jump target = _as->branchDouble(false, op, left, right);
_as->addPatch(iftrue, target);
_as->jumpToBlock(_block, iffalse);
}
return true;
}
-bool InstructionSelection::visitCJumpSInt32(IR::AluOp op, IR::Expr *left, IR::Expr *right,
+template <typename JITAssembler>
+bool InstructionSelection<JITAssembler>::visitCJumpSInt32(IR::AluOp op, IR::Expr *left, IR::Expr *right,
IR::BasicBlock *iftrue, IR::BasicBlock *iffalse)
{
if (_as->nextBlock() == iftrue) {
- Assembler::Jump target = _as->branchInt32(true, op, left, right);
+ Jump target = _as->branchInt32(true, op, left, right);
_as->addPatch(iffalse, target);
} else {
- Assembler::Jump target = _as->branchInt32(false, op, left, right);
+ Jump target = _as->branchInt32(false, op, left, right);
_as->addPatch(iftrue, target);
_as->jumpToBlock(_block, iffalse);
}
return true;
}
-void InstructionSelection::visitCJumpStrict(IR::Binop *binop, IR::BasicBlock *trueBlock,
+template <typename JITAssembler>
+void InstructionSelection<JITAssembler>::visitCJumpStrict(IR::Binop *binop, IR::BasicBlock *trueBlock,
IR::BasicBlock *falseBlock)
{
Q_ASSERT(binop->op == IR::OpStrictEqual || binop->op == IR::OpStrictNotEqual);
@@ -1607,15 +1448,16 @@ void InstructionSelection::visitCJumpStrict(IR::Binop *binop, IR::BasicBlock *tr
IR::Expr *left = binop->left;
IR::Expr *right = binop->right;
- generateRuntimeCall(Assembler::ReturnValueRegister, compareStrictEqual,
- Assembler::PointerToValue(left), Assembler::PointerToValue(right));
- _as->generateCJumpOnCompare(binop->op == IR::OpStrictEqual ? Assembler::NotEqual : Assembler::Equal,
- Assembler::ReturnValueRegister, Assembler::TrustedImm32(0),
+ generateRuntimeCall(_as, JITTargetPlatform::ReturnValueRegister, compareStrictEqual,
+ PointerToValue(left), PointerToValue(right));
+ _as->generateCJumpOnCompare(binop->op == IR::OpStrictEqual ? RelationalCondition::NotEqual : RelationalCondition::Equal,
+ JITTargetPlatform::ReturnValueRegister, TrustedImm32(0),
_block, trueBlock, falseBlock);
}
// Only load the non-null temp.
-bool InstructionSelection::visitCJumpStrictNull(IR::Binop *binop,
+template <typename JITAssembler>
+bool InstructionSelection<JITAssembler>::visitCJumpStrictNull(IR::Binop *binop,
IR::BasicBlock *trueBlock,
IR::BasicBlock *falseBlock)
{
@@ -1640,19 +1482,20 @@ bool InstructionSelection::visitCJumpStrictNull(IR::Binop *binop,
return true;
}
- Assembler::Pointer tagAddr = _as->loadAddress(Assembler::ScratchRegister, varSrc);
+ Pointer tagAddr = _as->loadAddress(JITTargetPlatform::ScratchRegister, varSrc);
tagAddr.offset += 4;
- const Assembler::RegisterID tagReg = Assembler::ScratchRegister;
+ const RegisterID tagReg = JITTargetPlatform::ScratchRegister;
_as->load32(tagAddr, tagReg);
- Assembler::RelationalCondition cond = binop->op == IR::OpStrictEqual ? Assembler::Equal
- : Assembler::NotEqual;
- const Assembler::TrustedImm32 tag(QV4::Value::Null_Type_Internal);
+ RelationalCondition cond = binop->op == IR::OpStrictEqual ? RelationalCondition::Equal
+ : RelationalCondition::NotEqual;
+ const TrustedImm32 tag(QV4::Value::Null_Type_Internal);
_as->generateCJumpOnCompare(cond, tagReg, tag, _block, trueBlock, falseBlock);
return true;
}
-bool InstructionSelection::visitCJumpStrictUndefined(IR::Binop *binop,
+template <typename JITAssembler>
+bool InstructionSelection<JITAssembler>::visitCJumpStrictUndefined(IR::Binop *binop,
IR::BasicBlock *trueBlock,
IR::BasicBlock *falseBlock)
{
@@ -1677,28 +1520,15 @@ bool InstructionSelection::visitCJumpStrictUndefined(IR::Binop *binop,
return true;
}
- Assembler::RelationalCondition cond = binop->op == IR::OpStrictEqual ? Assembler::Equal
- : Assembler::NotEqual;
- const Assembler::RegisterID tagReg = Assembler::ReturnValueRegister;
-#ifdef QV4_USE_64_BIT_VALUE_ENCODING
- Assembler::Pointer addr = _as->loadAddress(Assembler::ScratchRegister, varSrc);
- _as->load64(addr, tagReg);
- const Assembler::TrustedImm64 tag(0);
-#else // !QV4_USE_64_BIT_VALUE_ENCODING
- Assembler::Pointer tagAddr = _as->loadAddress(Assembler::ScratchRegister, varSrc);
- _as->load32(tagAddr, tagReg);
- Assembler::Jump j = _as->branch32(Assembler::invert(cond), tagReg, Assembler::TrustedImm32(0));
- _as->addPatch(falseBlock, j);
-
- tagAddr.offset += 4;
- _as->load32(tagAddr, tagReg);
- const Assembler::TrustedImm32 tag(QV4::Value::Managed_Type_Internal);
-#endif
- _as->generateCJumpOnCompare(cond, tagReg, tag, _block, trueBlock, falseBlock);
+ RelationalCondition cond = binop->op == IR::OpStrictEqual ? RelationalCondition::Equal
+ : RelationalCondition::NotEqual;
+ const RegisterID tagReg = JITTargetPlatform::ReturnValueRegister;
+ _as->generateCJumpOnUndefined(cond, varSrc, JITTargetPlatform::ScratchRegister, tagReg, _block, trueBlock, falseBlock);
return true;
}
-bool InstructionSelection::visitCJumpStrictBool(IR::Binop *binop, IR::BasicBlock *trueBlock,
+template <typename JITAssembler>
+bool InstructionSelection<JITAssembler>::visitCJumpStrictBool(IR::Binop *binop, IR::BasicBlock *trueBlock,
IR::BasicBlock *falseBlock)
{
IR::Expr *boolSrc = 0, *otherSrc = 0;
@@ -1720,12 +1550,12 @@ bool InstructionSelection::visitCJumpStrictBool(IR::Binop *binop, IR::BasicBlock
return false;
}
- Assembler::RelationalCondition cond = binop->op == IR::OpStrictEqual ? Assembler::Equal
- : Assembler::NotEqual;
+ RelationalCondition cond = binop->op == IR::OpStrictEqual ? RelationalCondition::Equal
+ : RelationalCondition::NotEqual;
if (otherSrc->type == IR::BoolType) { // both are boolean
- Assembler::RegisterID one = _as->toBoolRegister(boolSrc, Assembler::ReturnValueRegister);
- Assembler::RegisterID two = _as->toBoolRegister(otherSrc, Assembler::ScratchRegister);
+ RegisterID one = _as->toBoolRegister(boolSrc, JITTargetPlatform::ReturnValueRegister);
+ RegisterID two = _as->toBoolRegister(otherSrc, JITTargetPlatform::ScratchRegister);
_as->generateCJumpOnCompare(cond, one, two, _block, trueBlock, falseBlock);
return true;
}
@@ -1735,13 +1565,13 @@ bool InstructionSelection::visitCJumpStrictBool(IR::Binop *binop, IR::BasicBlock
return true;
}
- Assembler::Pointer otherAddr = _as->loadAddress(Assembler::ReturnValueRegister, otherSrc);
+ Pointer otherAddr = _as->loadAddress(JITTargetPlatform::ReturnValueRegister, otherSrc);
otherAddr.offset += 4; // tag address
// check if the tag of the var operand is indicates 'boolean'
- _as->load32(otherAddr, Assembler::ScratchRegister);
- Assembler::Jump noBool = _as->branch32(Assembler::NotEqual, Assembler::ScratchRegister,
- Assembler::TrustedImm32(QV4::Value::Boolean_Type_Internal));
+ _as->load32(otherAddr, JITTargetPlatform::ScratchRegister);
+ Jump noBool = _as->branch32(RelationalCondition::NotEqual, JITTargetPlatform::ScratchRegister,
+ TrustedImm32(QV4::Value::Boolean_Type_Internal));
if (binop->op == IR::OpStrictEqual)
_as->addPatch(falseBlock, noBool);
else
@@ -1749,14 +1579,15 @@ bool InstructionSelection::visitCJumpStrictBool(IR::Binop *binop, IR::BasicBlock
// ok, both are boolean, so let's load them and compare them.
otherAddr.offset -= 4; // int_32 address
- _as->load32(otherAddr, Assembler::ReturnValueRegister);
- Assembler::RegisterID boolReg = _as->toBoolRegister(boolSrc, Assembler::ScratchRegister);
- _as->generateCJumpOnCompare(cond, boolReg, Assembler::ReturnValueRegister, _block, trueBlock,
+ _as->load32(otherAddr, JITTargetPlatform::ReturnValueRegister);
+ RegisterID boolReg = _as->toBoolRegister(boolSrc, JITTargetPlatform::ScratchRegister);
+ _as->generateCJumpOnCompare(cond, boolReg, JITTargetPlatform::ReturnValueRegister, _block, trueBlock,
falseBlock);
return true;
}
-bool InstructionSelection::visitCJumpNullUndefined(IR::Type nullOrUndef, IR::Binop *binop,
+template <typename JITAssembler>
+bool InstructionSelection<JITAssembler>::visitCJumpNullUndefined(IR::Type nullOrUndef, IR::Binop *binop,
IR::BasicBlock *trueBlock,
IR::BasicBlock *falseBlock)
{
@@ -1783,18 +1614,18 @@ bool InstructionSelection::visitCJumpNullUndefined(IR::Type nullOrUndef, IR::Bin
return true;
}
- Assembler::Pointer tagAddr = _as->loadAddress(Assembler::ScratchRegister, varSrc);
+ Pointer tagAddr = _as->loadAddress(JITTargetPlatform::ScratchRegister, varSrc);
tagAddr.offset += 4;
- const Assembler::RegisterID tagReg = Assembler::ReturnValueRegister;
+ const RegisterID tagReg = JITTargetPlatform::ReturnValueRegister;
_as->load32(tagAddr, tagReg);
if (binop->op == IR::OpNotEqual)
qSwap(trueBlock, falseBlock);
- Assembler::Jump isNull = _as->branch32(Assembler::Equal, tagReg, Assembler::TrustedImm32(int(QV4::Value::Null_Type_Internal)));
- Assembler::Jump isNotUndefinedTag = _as->branch32(Assembler::NotEqual, tagReg, Assembler::TrustedImm32(int(QV4::Value::Managed_Type_Internal)));
+ Jump isNull = _as->branch32(RelationalCondition::Equal, tagReg, TrustedImm32(int(QV4::Value::Null_Type_Internal)));
+ Jump isNotUndefinedTag = _as->branch32(RelationalCondition::NotEqual, tagReg, TrustedImm32(int(QV4::Value::Managed_Type_Internal)));
tagAddr.offset -= 4;
_as->load32(tagAddr, tagReg);
- Assembler::Jump isNotUndefinedValue = _as->branch32(Assembler::NotEqual, tagReg, Assembler::TrustedImm32(0));
+ Jump isNotUndefinedValue = _as->branch32(RelationalCondition::NotEqual, tagReg, TrustedImm32(0));
_as->addPatch(trueBlock, isNull);
_as->addPatch(falseBlock, isNotUndefinedTag);
_as->addPatch(falseBlock, isNotUndefinedValue);
@@ -1804,7 +1635,8 @@ bool InstructionSelection::visitCJumpNullUndefined(IR::Type nullOrUndef, IR::Bin
}
-void InstructionSelection::visitCJumpEqual(IR::Binop *binop, IR::BasicBlock *trueBlock,
+template <typename JITAssembler>
+void InstructionSelection<JITAssembler>::visitCJumpEqual(IR::Binop *binop, IR::BasicBlock *trueBlock,
IR::BasicBlock *falseBlock)
{
Q_ASSERT(binop->op == IR::OpEqual || binop->op == IR::OpNotEqual);
@@ -1815,18 +1647,54 @@ void InstructionSelection::visitCJumpEqual(IR::Binop *binop, IR::BasicBlock *tru
IR::Expr *left = binop->left;
IR::Expr *right = binop->right;
- generateRuntimeCall(Assembler::ReturnValueRegister, compareEqual,
- Assembler::PointerToValue(left), Assembler::PointerToValue(right));
- _as->generateCJumpOnCompare(binop->op == IR::OpEqual ? Assembler::NotEqual : Assembler::Equal,
- Assembler::ReturnValueRegister, Assembler::TrustedImm32(0),
+ generateRuntimeCall(_as, JITTargetPlatform::ReturnValueRegister, compareEqual,
+ PointerToValue(left), PointerToValue(right));
+ _as->generateCJumpOnCompare(binop->op == IR::OpEqual ? RelationalCondition::NotEqual : RelationalCondition::Equal,
+ JITTargetPlatform::ReturnValueRegister, TrustedImm32(0),
_block, trueBlock, falseBlock);
}
-QQmlRefPointer<CompiledData::CompilationUnit> ISelFactory::createUnitForLoading()
+template <typename JITAssembler>
+QQmlRefPointer<CompiledData::CompilationUnit> ISelFactory<JITAssembler>::createUnitForLoading()
{
QQmlRefPointer<CompiledData::CompilationUnit> result;
result.adopt(new JIT::CompilationUnit);
return result;
}
+QT_BEGIN_NAMESPACE
+namespace QV4 { namespace JIT {
+template class Q_QML_EXPORT InstructionSelection<>;
+template class Q_QML_EXPORT ISelFactory<>;
+#if defined(V4_BOOTSTRAP) && CPU(X86_64)
+
+Q_QML_EXPORT QV4::EvalISelFactory *createISelForArchitecture(const QString &architecture)
+{
+ using ARMv7CrossAssembler = QV4::JIT::Assembler<AssemblerTargetConfiguration<JSC::MacroAssemblerARMv7, NoOperatingSystemSpecialization>>;
+
+ if (architecture == QLatin1String("armv7"))
+ return new ISelFactory<ARMv7CrossAssembler>;
+
+ QString hostArch;
+#if CPU(ARM_THUMB2)
+ hostArch = QStringLiteral("armv7");
+#elif CPU(ARM64)
+ hostArch = QStringLiteral("armv8");
+#elif CPU(MIPS)
+ hostArch = QStringLiteral("mips");
+#elif CPU(X86)
+ hostArch = QStringLiteral("x86");
+#elif CPU(X86_64)
+ hostArch = QStringLiteral("x86_64");
+#endif
+ if (!hostArch.isEmpty() && architecture == hostArch)
+ return new ISelFactory<>;
+
+ return nullptr;
+}
+
+#endif
+} }
+QT_END_NAMESPACE
+
#endif // ENABLE(ASSEMBLER)
diff --git a/src/qml/jit/qv4isel_masm_p.h b/src/qml/jit/qv4isel_masm_p.h
index 012745c5f2..6ae50c3260 100644
--- a/src/qml/jit/qv4isel_masm_p.h
+++ b/src/qml/jit/qv4isel_masm_p.h
@@ -72,6 +72,7 @@ QT_BEGIN_NAMESPACE
namespace QV4 {
namespace JIT {
+template <typename JITAssembler = Assembler<DefaultAssemblerTargetConfiguration>>
class Q_QML_EXPORT InstructionSelection:
protected IR::IRDecoder,
public EvalInstructionSelection
@@ -136,8 +137,23 @@ protected:
void unop(IR::AluOp oper, IR::Expr *sourceTemp, IR::Expr *target) override;
void binop(IR::AluOp oper, IR::Expr *leftSource, IR::Expr *rightSource, IR::Expr *target) override;
- typedef Assembler::Address Address;
- typedef Assembler::Pointer Pointer;
+ using Address = typename JITAssembler::Address;
+ using Pointer = typename JITAssembler::Pointer;
+ using PointerToValue = typename JITAssembler::PointerToValue;
+ using RegisterID = typename JITAssembler::RegisterID;
+ using FPRegisterID = typename JITAssembler::FPRegisterID;
+ using ResultCondition = typename JITAssembler::ResultCondition;
+ using TrustedImm32 = typename JITAssembler::TrustedImm32;
+ using TrustedImm64 = typename JITAssembler::TrustedImm64;
+ using Label = typename JITAssembler::Label;
+ using Jump = typename JITAssembler::Jump;
+ using StringToIndex = typename JITAssembler::StringToIndex;
+ using Reference = typename JITAssembler::Reference;
+ using RelationalCondition = typename JITAssembler::RelationalCondition;
+ using BranchTruncateType = typename JITAssembler::BranchTruncateType;
+ using RuntimeCall = typename JITAssembler::RuntimeCall;
+
+ using JITTargetPlatform = typename JITAssembler::JITTargetPlatform;
#if !defined(ARGUMENTS_IN_REGISTERS)
Address addressForArgument(int index) const
@@ -145,7 +161,7 @@ protected:
// FramePointerRegister points to its old value on the stack, and above
// it we have the return address, hence the need to step over two
// values before reaching the first argument.
- return Address(Assembler::FramePointerRegister, (index + 2) * sizeof(void*));
+ return Address(JITTargetPlatform::FramePointerRegister, (index + 2) * sizeof(void*));
}
#endif
@@ -192,61 +208,55 @@ private:
if (targetTemp->kind == IR::Temp::PhysicalRegister) {
if (IR::Temp *sourceTemp = source->asTemp()) {
if (sourceTemp->kind == IR::Temp::PhysicalRegister) {
- _as->convertInt32ToDouble((Assembler::RegisterID) sourceTemp->index,
- (Assembler::FPRegisterID) targetTemp->index);
+ _as->convertInt32ToDouble((RegisterID) sourceTemp->index,
+ (FPRegisterID) targetTemp->index);
} else {
- _as->convertInt32ToDouble(_as->loadAddress(Assembler::ReturnValueRegister, sourceTemp),
- (Assembler::FPRegisterID) targetTemp->index);
+ _as->convertInt32ToDouble(_as->loadAddress(JITTargetPlatform::ReturnValueRegister, sourceTemp),
+ (FPRegisterID) targetTemp->index);
}
} else {
- _as->convertInt32ToDouble(_as->toInt32Register(source, Assembler::ScratchRegister),
- (Assembler::FPRegisterID) targetTemp->index);
+ _as->convertInt32ToDouble(_as->toInt32Register(source, JITTargetPlatform::ScratchRegister),
+ (FPRegisterID) targetTemp->index);
}
return;
}
}
- _as->convertInt32ToDouble(_as->toInt32Register(source, Assembler::ScratchRegister),
- Assembler::FPGpr0);
- _as->storeDouble(Assembler::FPGpr0, _as->loadAddress(Assembler::ReturnValueRegister, target));
+ _as->convertInt32ToDouble(_as->toInt32Register(source, JITTargetPlatform::ScratchRegister),
+ JITTargetPlatform::FPGpr0);
+ _as->storeDouble(JITTargetPlatform::FPGpr0, _as->loadAddress(JITTargetPlatform::ReturnValueRegister, target));
}
void convertUIntToDouble(IR::Expr *source, IR::Expr *target)
{
- Assembler::RegisterID tmpReg = Assembler::ScratchRegister;
- Assembler::RegisterID reg = _as->toInt32Register(source, tmpReg);
+ RegisterID tmpReg = JITTargetPlatform::ScratchRegister;
+ RegisterID reg = _as->toInt32Register(source, tmpReg);
if (IR::Temp *targetTemp = target->asTemp()) {
if (targetTemp->kind == IR::Temp::PhysicalRegister) {
- _as->convertUInt32ToDouble(reg, (Assembler::FPRegisterID) targetTemp->index, tmpReg);
+ _as->convertUInt32ToDouble(reg, (FPRegisterID) targetTemp->index, tmpReg);
return;
}
}
_as->convertUInt32ToDouble(_as->toUInt32Register(source, tmpReg),
- Assembler::FPGpr0, tmpReg);
- _as->storeDouble(Assembler::FPGpr0, _as->loadAddress(tmpReg, target));
+ JITTargetPlatform::FPGpr0, tmpReg);
+ _as->storeDouble(JITTargetPlatform::FPGpr0, _as->loadAddress(tmpReg, target));
}
void convertIntToBool(IR::Expr *source, IR::Expr *target)
{
- Assembler::RegisterID reg = Assembler::ScratchRegister;
+ RegisterID reg = JITTargetPlatform::ScratchRegister;
if (IR::Temp *targetTemp = target->asTemp())
if (targetTemp->kind == IR::Temp::PhysicalRegister)
- reg = (Assembler::RegisterID) targetTemp->index;
+ reg = (RegisterID) targetTemp->index;
_as->move(_as->toInt32Register(source, reg), reg);
- _as->compare32(Assembler::NotEqual, reg, Assembler::TrustedImm32(0), reg);
+ _as->compare32(RelationalCondition::NotEqual, reg, TrustedImm32(0), reg);
_as->storeBool(reg, target);
}
- #define isel_stringIfyx(s) #s
- #define isel_stringIfy(s) isel_stringIfyx(s)
-
- #define generateRuntimeCall(t, function, ...) \
- _as->generateFunctionCallImp(Runtime::Method_##function##_NeedsExceptionCheck, t, "Runtime::" isel_stringIfy(function), RuntimeCall(qOffsetOf(QV4::Runtime, function)), __VA_ARGS__)
-
int prepareVariableArguments(IR::ExprList* args);
int prepareCallData(IR::ExprList* args, IR::Expr *thisObject);
@@ -259,22 +269,22 @@ private:
// goes into the same register as the return value (currently only ARM), the prepareCall
// will combine loading the looupAddr into the register and calculating the indirect call
// address.
- Assembler::Pointer lookupAddr(Assembler::ReturnValueRegister, index * sizeof(QV4::Lookup));
+ Pointer lookupAddr(JITTargetPlatform::ReturnValueRegister, index * sizeof(QV4::Lookup));
_as->generateFunctionCallImp(true, retval, "lookup getter/setter",
- LookupCall(lookupAddr, getterSetterOffset), lookupAddr,
+ typename JITAssembler::LookupCall(lookupAddr, getterSetterOffset), lookupAddr,
arg1, arg2, arg3);
}
template <typename Retval, typename Arg1, typename Arg2>
void generateLookupCall(Retval retval, uint index, uint getterSetterOffset, Arg1 arg1, Arg2 arg2)
{
- generateLookupCall(retval, index, getterSetterOffset, arg1, arg2, Assembler::VoidType());
+ generateLookupCall(retval, index, getterSetterOffset, arg1, arg2, typename JITAssembler::VoidType());
}
IR::BasicBlock *_block;
BitVector _removableJumps;
- Assembler* _as;
+ JITAssembler* _as;
QScopedPointer<CompilationUnit> compilationUnit;
QQmlEnginePrivate *qmlEngine;
@@ -282,13 +292,14 @@ private:
RegisterInformation fpRegistersToSave;
};
+template <typename JITAssembler = Assembler<DefaultAssemblerTargetConfiguration>>
class Q_QML_EXPORT ISelFactory: public EvalISelFactory
{
public:
ISelFactory() : EvalISelFactory(QStringLiteral("jit")) {}
virtual ~ISelFactory() {}
EvalInstructionSelection *create(QQmlEnginePrivate *qmlEngine, QV4::ExecutableAllocator *execAllocator, IR::Module *module, QV4::Compiler::JSUnitGenerator *jsGenerator) Q_DECL_OVERRIDE Q_DECL_FINAL
- { return new InstructionSelection(qmlEngine, execAllocator, module, jsGenerator, this); }
+ { return new InstructionSelection<JITAssembler>(qmlEngine, execAllocator, module, jsGenerator, this); }
bool jitCompileRegexps() const Q_DECL_OVERRIDE Q_DECL_FINAL
{ return true; }
QQmlRefPointer<CompiledData::CompilationUnit> createUnitForLoading() Q_DECL_OVERRIDE Q_DECL_FINAL;
diff --git a/src/qml/jit/qv4regalloc.cpp b/src/qml/jit/qv4regalloc.cpp
index f2ae7e117a..d5da863ee0 100644
--- a/src/qml/jit/qv4regalloc.cpp
+++ b/src/qml/jit/qv4regalloc.cpp
@@ -1529,7 +1529,7 @@ static inline int indexOfRangeCoveringPosition(const LifeTimeInterval::Ranges &r
return -1;
}
-static inline int intersectionPosition(const LifeTimeInterval::Range &one, const LifeTimeInterval::Range &two)
+static inline int intersectionPosition(const LifeTimeIntervalRange &one, const LifeTimeIntervalRange &two)
{
if (one.covers(two.start))
return two.start;
@@ -1787,9 +1787,9 @@ int RegisterAllocator::nextIntersection(const LifeTimeInterval &current,
return -1;
for (int currentEnd = currentRanges.size(); currentIt < currentEnd; ++currentIt) {
- const LifeTimeInterval::Range currentRange = currentRanges.at(currentIt);
+ const LifeTimeIntervalRange currentRange = currentRanges.at(currentIt);
for (int anotherIt = anotherItStart, anotherEnd = anotherRanges.size(); anotherIt < anotherEnd; ++anotherIt) {
- const LifeTimeInterval::Range anotherRange = anotherRanges.at(anotherIt);
+ const LifeTimeIntervalRange anotherRange = anotherRanges.at(anotherIt);
if (anotherRange.start > currentRange.end)
break;
int intersectPos = intersectionPosition(currentRange, anotherRange);
diff --git a/src/qml/jit/qv4targetplatform_p.h b/src/qml/jit/qv4targetplatform_p.h
index 7e265258d5..1c29aa2a70 100644
--- a/src/qml/jit/qv4targetplatform_p.h
+++ b/src/qml/jit/qv4targetplatform_p.h
@@ -63,6 +63,11 @@ QT_BEGIN_NAMESPACE
namespace QV4 {
namespace JIT {
+enum TargetOperatingSystemSpecialization {
+ NoOperatingSystemSpecialization,
+ WindowsSpecialization
+};
+
// The TargetPlatform class describes how the stack and the registers work on a CPU+ABI combination.
//
// All combinations have a separate definition, guarded by #ifdefs. The exceptions are:
@@ -79,25 +84,37 @@ namespace JIT {
// a call, we add a load it right before emitting the call instruction.
//
// NOTE: When adding new architecture, do not forget to whitelist it in qv4global_p.h!
+template <typename PlatformAssembler, TargetOperatingSystemSpecialization specialization = NoOperatingSystemSpecialization>
class TargetPlatform
{
-public:
+};
+
#if CPU(X86) && (OS(LINUX) || OS(WINDOWS) || OS(QNX) || OS(FREEBSD) || defined(Q_OS_IOS))
- enum { RegAllocIsSupported = 1 };
+template <>
+class TargetPlatform<JSC::MacroAssemblerX86, NoOperatingSystemSpecialization>
+{
+public:
+ using PlatformAssembler = JSC::MacroAssemblerX86;
+ using RegisterID = PlatformAssembler::RegisterID;
+ using FPRegisterID = PlatformAssembler::FPRegisterID;
- static const JSC::MacroAssembler::RegisterID FramePointerRegister = JSC::X86Registers::ebp;
- static const JSC::MacroAssembler::RegisterID StackPointerRegister = JSC::X86Registers::esp;
- static const JSC::MacroAssembler::RegisterID LocalsRegister = JSC::X86Registers::edi;
- static const JSC::MacroAssembler::RegisterID EngineRegister = JSC::X86Registers::esi;
- static const JSC::MacroAssembler::RegisterID ReturnValueRegister = JSC::X86Registers::eax;
- static const JSC::MacroAssembler::RegisterID ScratchRegister = JSC::X86Registers::ecx;
- static const JSC::MacroAssembler::FPRegisterID FPGpr0 = JSC::X86Registers::xmm0;
- static const JSC::MacroAssembler::FPRegisterID FPGpr1 = JSC::X86Registers::xmm1;
+ enum { RegAllocIsSupported = 1 };
- static RegisterInformation getPlatformRegisterInfo()
+ static const RegisterID FramePointerRegister = JSC::X86Registers::ebp;
+ static const RegisterID StackPointerRegister = JSC::X86Registers::esp;
+ static const RegisterID LocalsRegister = JSC::X86Registers::edi;
+ static const RegisterID EngineRegister = JSC::X86Registers::esi;
+ static const RegisterID ReturnValueRegister = JSC::X86Registers::eax;
+ static const RegisterID ScratchRegister = JSC::X86Registers::ecx;
+ static const FPRegisterID FPGpr0 = JSC::X86Registers::xmm0;
+ static const FPRegisterID FPGpr1 = JSC::X86Registers::xmm1;
+ static const RegisterID LowReturnValueRegister = JSC::X86Registers::eax;
+ static const RegisterID HighReturnValueRegister = JSC::X86Registers::edx;
+
+ static RegisterInformation getRegisterInfo()
{
typedef RegisterInfo RI;
- return RegisterInformation()
+ static RegisterInformation info = RegisterInformation()
<< RI(JSC::X86Registers::edx, QStringLiteral("edx"), RI::RegularRegister, RI::CallerSaved, RI::RegAlloc)
<< RI(JSC::X86Registers::ebx, QStringLiteral("ebx"), RI::RegularRegister, RI::CallerSaved, RI::RegAlloc)
<< RI(JSC::X86Registers::edi, QStringLiteral("edi"), RI::RegularRegister, RI::CalleeSaved, RI::Predefined)
@@ -109,6 +126,7 @@ public:
<< RI(JSC::X86Registers::xmm6, QStringLiteral("xmm6"), RI::FloatingPointRegister, RI::CallerSaved, RI::RegAlloc)
<< RI(JSC::X86Registers::xmm7, QStringLiteral("xmm7"), RI::FloatingPointRegister, RI::CallerSaved, RI::RegAlloc)
;
+ return info;
}
# define HAVE_ALU_OPS_WITH_MEM_OPERAND 1
@@ -117,19 +135,20 @@ public:
# undef ARGUMENTS_IN_REGISTERS
static const int RegisterArgumentCount = 0;
- static JSC::MacroAssembler::RegisterID registerForArgument(int) { Q_UNREACHABLE(); }
+ static RegisterID registerForArgument(int) { Q_UNREACHABLE(); }
static const int StackAlignment = 16;
static const int StackShadowSpace = 0;
static const int StackSpaceAllocatedUponFunctionEntry = RegisterSize; // Return address is pushed onto stack by the CPU.
- static void platformEnterStandardStackFrame(JSC::MacroAssembler *as) { as->push(FramePointerRegister); }
- static void platformLeaveStandardStackFrame(JSC::MacroAssembler *as) { as->pop(FramePointerRegister); }
+ static void platformEnterStandardStackFrame(PlatformAssembler *as) { as->push(FramePointerRegister); }
+ static void platformLeaveStandardStackFrame(PlatformAssembler *as) { as->pop(FramePointerRegister); }
#if OS(WINDOWS) || OS(QNX) || \
((OS(LINUX) || OS(FREEBSD)) && (defined(__PIC__) || defined(__PIE__)))
#define RESTORE_EBX_ON_CALL
- static JSC::MacroAssembler::Address ebxAddressOnStack()
+ using Address = PlatformAssembler::Address;
+ static Address ebxAddressOnStack()
{
static int ebxIdx = -1;
if (ebxIdx == -1) {
@@ -146,28 +165,36 @@ public:
Q_ASSERT(ebxIdx >= 0);
ebxIdx += 1;
}
- return JSC::MacroAssembler::Address(FramePointerRegister, ebxIdx * -int(sizeof(void*)));
+ return Address(FramePointerRegister, ebxIdx * -int(sizeof(void*)));
}
#endif
-
-#endif // Windows on x86
+};
+#endif // x86
#if CPU(X86_64) && (OS(LINUX) || OS(MAC_OS_X) || OS(FREEBSD) || OS(QNX) || defined(Q_OS_IOS))
+template <>
+class TargetPlatform<JSC::MacroAssemblerX86_64, NoOperatingSystemSpecialization>
+{
+public:
+ using PlatformAssembler = JSC::MacroAssemblerX86_64;
+ using RegisterID = PlatformAssembler::RegisterID;
+ using FPRegisterID = PlatformAssembler::FPRegisterID;
+
enum { RegAllocIsSupported = 1 };
- static const JSC::MacroAssembler::RegisterID FramePointerRegister = JSC::X86Registers::ebp;
- static const JSC::MacroAssembler::RegisterID StackPointerRegister = JSC::X86Registers::esp;
- static const JSC::MacroAssembler::RegisterID LocalsRegister = JSC::X86Registers::r12;
- static const JSC::MacroAssembler::RegisterID EngineRegister = JSC::X86Registers::r14;
- static const JSC::MacroAssembler::RegisterID ReturnValueRegister = JSC::X86Registers::eax;
- static const JSC::MacroAssembler::RegisterID ScratchRegister = JSC::X86Registers::r10;
- static const JSC::MacroAssembler::FPRegisterID FPGpr0 = JSC::X86Registers::xmm0;
- static const JSC::MacroAssembler::FPRegisterID FPGpr1 = JSC::X86Registers::xmm1;
+ static const RegisterID FramePointerRegister = JSC::X86Registers::ebp;
+ static const RegisterID StackPointerRegister = JSC::X86Registers::esp;
+ static const RegisterID LocalsRegister = JSC::X86Registers::r12;
+ static const RegisterID EngineRegister = JSC::X86Registers::r14;
+ static const RegisterID ReturnValueRegister = JSC::X86Registers::eax;
+ static const RegisterID ScratchRegister = JSC::X86Registers::r10;
+ static const FPRegisterID FPGpr0 = JSC::X86Registers::xmm0;
+ static const FPRegisterID FPGpr1 = JSC::X86Registers::xmm1;
- static RegisterInformation getPlatformRegisterInfo()
+ static RegisterInformation getRegisterInfo()
{
typedef RegisterInfo RI;
- return RegisterInformation()
+ static RegisterInformation info = RegisterInformation()
<< RI(JSC::X86Registers::ebx, QStringLiteral("rbx"), RI::RegularRegister, RI::CalleeSaved, RI::RegAlloc)
<< RI(JSC::X86Registers::edi, QStringLiteral("rdi"), RI::RegularRegister, RI::CallerSaved, RI::RegAlloc)
<< RI(JSC::X86Registers::esi, QStringLiteral("rsi"), RI::RegularRegister, RI::CallerSaved, RI::RegAlloc)
@@ -185,6 +212,7 @@ public:
<< RI(JSC::X86Registers::xmm6, QStringLiteral("xmm6"), RI::FloatingPointRegister, RI::CallerSaved, RI::RegAlloc)
<< RI(JSC::X86Registers::xmm7, QStringLiteral("xmm7"), RI::FloatingPointRegister, RI::CallerSaved, RI::RegAlloc)
;
+ return info;
}
#define HAVE_ALU_OPS_WITH_MEM_OPERAND 1
@@ -193,9 +221,9 @@ public:
#define ARGUMENTS_IN_REGISTERS
static const int RegisterArgumentCount = 6;
- static JSC::MacroAssembler::RegisterID registerForArgument(int index)
+ static RegisterID registerForArgument(int index)
{
- static JSC::MacroAssembler::RegisterID regs[RegisterArgumentCount] = {
+ static RegisterID regs[RegisterArgumentCount] = {
JSC::X86Registers::edi,
JSC::X86Registers::esi,
JSC::X86Registers::edx,
@@ -210,29 +238,38 @@ public:
static const int StackAlignment = 16;
static const int StackShadowSpace = 0;
static const int StackSpaceAllocatedUponFunctionEntry = RegisterSize; // Return address is pushed onto stack by the CPU.
- static void platformEnterStandardStackFrame(JSC::MacroAssembler *as) { as->push(FramePointerRegister); }
- static void platformLeaveStandardStackFrame(JSC::MacroAssembler *as) { as->pop(FramePointerRegister); }
+ static void platformEnterStandardStackFrame(PlatformAssembler *as) { as->push(FramePointerRegister); }
+ static void platformLeaveStandardStackFrame(PlatformAssembler *as) { as->pop(FramePointerRegister); }
+};
#endif // Linux/MacOS on x86_64
#if CPU(X86_64) && OS(WINDOWS)
+template <>
+class TargetPlatform<JSC::MacroAssemblerX86_64, WindowsSpecialization>
+{
+public:
+ using PlatformAssembler = JSC::MacroAssemblerX86_64;
+ using RegisterID = PlatformAssembler::RegisterID;
+ using FPRegisterID = PlatformAssembler::FPRegisterID;
+
// Register allocation is not (yet) supported on win64, because the ABI related stack handling
// is not completely implemented. Specifically, the saving of xmm registers, and the saving of
// incoming function parameters to the shadow space is missing.
enum { RegAllocIsSupported = 0 };
- static const JSC::MacroAssembler::RegisterID FramePointerRegister = JSC::X86Registers::ebp;
- static const JSC::MacroAssembler::RegisterID StackPointerRegister = JSC::X86Registers::esp;
- static const JSC::MacroAssembler::RegisterID LocalsRegister = JSC::X86Registers::r12;
- static const JSC::MacroAssembler::RegisterID EngineRegister = JSC::X86Registers::r14;
- static const JSC::MacroAssembler::RegisterID ReturnValueRegister = JSC::X86Registers::eax;
- static const JSC::MacroAssembler::RegisterID ScratchRegister = JSC::X86Registers::r10;
- static const JSC::MacroAssembler::FPRegisterID FPGpr0 = JSC::X86Registers::xmm0;
- static const JSC::MacroAssembler::FPRegisterID FPGpr1 = JSC::X86Registers::xmm1;
+ static const RegisterID FramePointerRegister = JSC::X86Registers::ebp;
+ static const RegisterID StackPointerRegister = JSC::X86Registers::esp;
+ static const RegisterID LocalsRegister = JSC::X86Registers::r12;
+ static const RegisterID EngineRegister = JSC::X86Registers::r14;
+ static const RegisterID ReturnValueRegister = JSC::X86Registers::eax;
+ static const RegisterID ScratchRegister = JSC::X86Registers::r10;
+ static const FPRegisterID FPGpr0 = JSC::X86Registers::xmm0;
+ static const FPRegisterID FPGpr1 = JSC::X86Registers::xmm1;
- static RegisterInformation getPlatformRegisterInfo()
+ static RegisterInformation getRegisterInfo()
{
typedef RegisterInfo RI;
- return RegisterInformation()
+ static RegisterInformation info = RegisterInformation()
<< RI(JSC::X86Registers::ebx, QStringLiteral("rbx"), RI::RegularRegister, RI::CalleeSaved, RI::Predefined)
<< RI(JSC::X86Registers::edi, QStringLiteral("rdi"), RI::RegularRegister, RI::CalleeSaved, RI::Predefined)
<< RI(JSC::X86Registers::esi, QStringLiteral("rsi"), RI::RegularRegister, RI::CalleeSaved, RI::Predefined)
@@ -244,6 +281,7 @@ public:
<< RI(JSC::X86Registers::r14, QStringLiteral("r14"), RI::RegularRegister, RI::CalleeSaved, RI::Predefined)
<< RI(JSC::X86Registers::r15, QStringLiteral("r15"), RI::RegularRegister, RI::CalleeSaved, RI::Predefined)
;
+ return info;
}
#define HAVE_ALU_OPS_WITH_MEM_OPERAND 1
@@ -252,9 +290,9 @@ public:
#define ARGUMENTS_IN_REGISTERS
static const int RegisterArgumentCount = 4;
- static JSC::MacroAssembler::RegisterID registerForArgument(int index)
+ static RegisterID registerForArgument(int index)
{
- static JSC::MacroAssembler::RegisterID regs[RegisterArgumentCount] = {
+ static RegisterID regs[RegisterArgumentCount] = {
JSC::X86Registers::ecx,
JSC::X86Registers::edx,
JSC::X86Registers::r8,
@@ -267,11 +305,20 @@ public:
static const int StackAlignment = 16;
static const int StackShadowSpace = 32;
static const int StackSpaceAllocatedUponFunctionEntry = RegisterSize; // Return address is pushed onto stack by the CPU.
- static void platformEnterStandardStackFrame(JSC::MacroAssembler *as) { as->push(FramePointerRegister); }
- static void platformLeaveStandardStackFrame(JSC::MacroAssembler *as) { as->pop(FramePointerRegister); }
+ static void platformEnterStandardStackFrame(PlatformAssembler *as) { as->push(FramePointerRegister); }
+ static void platformLeaveStandardStackFrame(PlatformAssembler *as) { as->pop(FramePointerRegister); }
+};
#endif // Windows on x86_64
-#if CPU(ARM)
+#if CPU(ARM) || defined(V4_BOOTSTRAP)
+template <>
+class TargetPlatform<JSC::MacroAssemblerARMv7, NoOperatingSystemSpecialization>
+{
+public:
+ using PlatformAssembler = JSC::MacroAssemblerARMv7;
+ using RegisterID = PlatformAssembler::RegisterID;
+ using FPRegisterID = PlatformAssembler::FPRegisterID;
+
enum { RegAllocIsSupported = 1 };
// The AAPCS specifies that the platform ABI has to define the usage of r9. Known are:
@@ -287,23 +334,25 @@ public:
// is used for the subroutine: r7 for Thumb or Thumb2, and r11 for ARM. We assign the constants
// accordingly, and assign the locals-register to the "other" register.
#if CPU(ARM_THUMB2)
- static const JSC::MacroAssembler::RegisterID FramePointerRegister = JSC::ARMRegisters::r7;
- static const JSC::MacroAssembler::RegisterID LocalsRegister = JSC::ARMRegisters::r11;
+ static const RegisterID FramePointerRegister = JSC::ARMRegisters::r7;
+ static const RegisterID LocalsRegister = JSC::ARMRegisters::r11;
#else // Thumbs down
- static const JSC::MacroAssembler::RegisterID FramePointerRegister = JSC::ARMRegisters::r11;
- static const JSC::MacroAssembler::RegisterID LocalsRegister = JSC::ARMRegisters::r7;
+ static const RegisterID FramePointerRegister = JSC::ARMRegisters::r11;
+ static const RegisterID LocalsRegister = JSC::ARMRegisters::r7;
#endif
- static const JSC::MacroAssembler::RegisterID StackPointerRegister = JSC::ARMRegisters::r13;
- static const JSC::MacroAssembler::RegisterID ScratchRegister = JSC::ARMRegisters::r5;
- static const JSC::MacroAssembler::RegisterID EngineRegister = JSC::ARMRegisters::r10;
- static const JSC::MacroAssembler::RegisterID ReturnValueRegister = JSC::ARMRegisters::r0;
- static const JSC::MacroAssembler::FPRegisterID FPGpr0 = JSC::ARMRegisters::d0;
- static const JSC::MacroAssembler::FPRegisterID FPGpr1 = JSC::ARMRegisters::d1;
-
- static RegisterInformation getPlatformRegisterInfo()
+ static const RegisterID StackPointerRegister = JSC::ARMRegisters::r13;
+ static const RegisterID ScratchRegister = JSC::ARMRegisters::r5;
+ static const RegisterID EngineRegister = JSC::ARMRegisters::r10;
+ static const RegisterID ReturnValueRegister = JSC::ARMRegisters::r0;
+ static const FPRegisterID FPGpr0 = JSC::ARMRegisters::d0;
+ static const FPRegisterID FPGpr1 = JSC::ARMRegisters::d1;
+ static const RegisterID LowReturnValueRegister = JSC::ARMRegisters::r0;
+ static const RegisterID HighReturnValueRegister = JSC::ARMRegisters::r1;
+
+ static RegisterInformation getRegisterInfo()
{
typedef RegisterInfo RI;
- return RegisterInformation()
+ static RegisterInformation info = RegisterInformation()
<< RI(JSC::ARMRegisters::r0, QStringLiteral("r0"), RI::RegularRegister, RI::CallerSaved, RI::Predefined)
<< RI(JSC::ARMRegisters::r1, QStringLiteral("r1"), RI::RegularRegister, RI::CallerSaved, RI::RegAlloc)
<< RI(JSC::ARMRegisters::r2, QStringLiteral("r2"), RI::RegularRegister, RI::CallerSaved, RI::RegAlloc)
@@ -336,6 +385,7 @@ public:
<< RI(JSC::ARMRegisters::d14, QStringLiteral("d14"), RI::FloatingPointRegister, RI::CalleeSaved, RI::RegAlloc)
<< RI(JSC::ARMRegisters::d15, QStringLiteral("d15"), RI::FloatingPointRegister, RI::CalleeSaved, RI::RegAlloc)
;
+ return info;
}
#undef HAVE_ALU_OPS_WITH_MEM_OPERAND
@@ -344,9 +394,9 @@ public:
#define ARGUMENTS_IN_REGISTERS
static const int RegisterArgumentCount = 4;
- static JSC::MacroAssembler::RegisterID registerForArgument(int index)
+ static RegisterID registerForArgument(int index)
{
- static JSC::MacroAssembler::RegisterID regs[RegisterArgumentCount] = {
+ static RegisterID regs[RegisterArgumentCount] = {
JSC::ARMRegisters::r0,
JSC::ARMRegisters::r1,
JSC::ARMRegisters::r2,
@@ -361,35 +411,44 @@ public:
static const int StackShadowSpace = 0;
static const int StackSpaceAllocatedUponFunctionEntry = 1 * RegisterSize; // Registers saved in platformEnterStandardStackFrame below.
- static void platformEnterStandardStackFrame(JSC::MacroAssembler *as)
+ static void platformEnterStandardStackFrame(PlatformAssembler *as)
{
as->push(JSC::ARMRegisters::lr);
as->push(FramePointerRegister);
}
- static void platformLeaveStandardStackFrame(JSC::MacroAssembler *as)
+ static void platformLeaveStandardStackFrame(PlatformAssembler *as)
{
as->pop(FramePointerRegister);
as->pop(JSC::ARMRegisters::lr);
}
+};
#endif // ARM (32 bit)
#if CPU(ARM64)
+template <>
+class TargetPlatform<JSC::MacroAssemblerARM64, NoOperatingSystemSpecialization>
+{
+public:
+ using PlatformAssembler = JSC::MacroAssemblerARM64;
+ using RegisterID = PlatformAssembler::RegisterID;
+ using FPRegisterID = PlatformAssembler::FPRegisterID;
+
enum { RegAllocIsSupported = 1 };
- static const JSC::MacroAssembler::RegisterID FramePointerRegister = JSC::ARM64Registers::fp;
- static const JSC::MacroAssembler::RegisterID LocalsRegister = JSC::ARM64Registers::x28;
- static const JSC::MacroAssembler::RegisterID StackPointerRegister = JSC::ARM64Registers::sp;
- static const JSC::MacroAssembler::RegisterID ScratchRegister = JSC::ARM64Registers::x9;
- static const JSC::MacroAssembler::RegisterID EngineRegister = JSC::ARM64Registers::x27;
- static const JSC::MacroAssembler::RegisterID ReturnValueRegister = JSC::ARM64Registers::x0;
- static const JSC::MacroAssembler::FPRegisterID FPGpr0 = JSC::ARM64Registers::q0;
- static const JSC::MacroAssembler::FPRegisterID FPGpr1 = JSC::ARM64Registers::q1;
+ static const RegisterID FramePointerRegister = JSC::ARM64Registers::fp;
+ static const RegisterID LocalsRegister = JSC::ARM64Registers::x28;
+ static const RegisterID StackPointerRegister = JSC::ARM64Registers::sp;
+ static const RegisterID ScratchRegister = JSC::ARM64Registers::x9;
+ static const RegisterID EngineRegister = JSC::ARM64Registers::x27;
+ static const RegisterID ReturnValueRegister = JSC::ARM64Registers::x0;
+ static const FPRegisterID FPGpr0 = JSC::ARM64Registers::q0;
+ static const FPRegisterID FPGpr1 = JSC::ARM64Registers::q1;
- static RegisterInformation getPlatformRegisterInfo()
+ static RegisterInformation getRegisterInfo()
{
typedef RegisterInfo RI;
- return RegisterInformation()
+ static RegisterInformation info = RegisterInformation()
<< RI(JSC::ARM64Registers::x0, QStringLiteral("x0"), RI::RegularRegister, RI::CallerSaved, RI::Predefined)
<< RI(JSC::ARM64Registers::x1, QStringLiteral("x1"), RI::RegularRegister, RI::CallerSaved, RI::RegAlloc)
<< RI(JSC::ARM64Registers::x2, QStringLiteral("x2"), RI::RegularRegister, RI::CallerSaved, RI::RegAlloc)
@@ -447,6 +506,7 @@ public:
<< RI(JSC::ARM64Registers::q30, QStringLiteral("q30"), RI::FloatingPointRegister, RI::CallerSaved, RI::RegAlloc)
<< RI(JSC::ARM64Registers::q31, QStringLiteral("q31"), RI::FloatingPointRegister, RI::CallerSaved, RI::RegAlloc)
;
+ return info;
}
#undef HAVE_ALU_OPS_WITH_MEM_OPERAND
@@ -455,9 +515,9 @@ public:
#define ARGUMENTS_IN_REGISTERS
static const int RegisterArgumentCount = 8;
- static JSC::MacroAssembler::RegisterID registerForArgument(int index)
+ static RegisterID registerForArgument(int index)
{
- static JSC::MacroAssembler::RegisterID regs[RegisterArgumentCount] = {
+ static RegisterID regs[RegisterArgumentCount] = {
JSC::ARM64Registers::x0,
JSC::ARM64Registers::x1,
JSC::ARM64Registers::x2,
@@ -476,33 +536,43 @@ public:
static const int StackShadowSpace = 0;
static const int StackSpaceAllocatedUponFunctionEntry = 1 * RegisterSize; // Registers saved in platformEnterStandardStackFrame below.
- static void platformEnterStandardStackFrame(JSC::MacroAssembler *as)
+ static void platformEnterStandardStackFrame(PlatformAssembler *as)
{
as->pushPair(FramePointerRegister, JSC::ARM64Registers::lr);
}
- static void platformLeaveStandardStackFrame(JSC::MacroAssembler *as)
+ static void platformLeaveStandardStackFrame(PlatformAssembler *as)
{
as->popPair(FramePointerRegister, JSC::ARM64Registers::lr);
}
+};
#endif // ARM64
#if defined(Q_PROCESSOR_MIPS_32) && defined(Q_OS_LINUX)
+template <>
+class TargetPlatform<JSC::MacroAssemblerMIPS, NoOperatingSystemSpecialization>
+{
+public:
+ using PlatformAssembler = JSC::MacroAssemblerMIPS;
+ using RegisterID = PlatformAssembler::RegisterID;
+ using FPRegisterID = PlatformAssembler::FPRegisterID;
enum { RegAllocIsSupported = 1 };
- static const JSC::MacroAssembler::RegisterID FramePointerRegister = JSC::MIPSRegisters::fp;
- static const JSC::MacroAssembler::RegisterID StackPointerRegister = JSC::MIPSRegisters::sp;
- static const JSC::MacroAssembler::RegisterID LocalsRegister = JSC::MIPSRegisters::s0;
- static const JSC::MacroAssembler::RegisterID EngineRegister = JSC::MIPSRegisters::s1;
- static const JSC::MacroAssembler::RegisterID ReturnValueRegister = JSC::MIPSRegisters::v0;
- static const JSC::MacroAssembler::RegisterID ScratchRegister = JSC::MIPSRegisters::s2;
- static const JSC::MacroAssembler::FPRegisterID FPGpr0 = JSC::MIPSRegisters::f0;
- static const JSC::MacroAssembler::FPRegisterID FPGpr1 = JSC::MIPSRegisters::f2;
-
- static RegisterInformation getPlatformRegisterInfo()
+ static const RegisterID FramePointerRegister = JSC::MIPSRegisters::fp;
+ static const RegisterID StackPointerRegister = JSC::MIPSRegisters::sp;
+ static const RegisterID LocalsRegister = JSC::MIPSRegisters::s0;
+ static const RegisterID EngineRegister = JSC::MIPSRegisters::s1;
+ static const RegisterID ReturnValueRegister = JSC::MIPSRegisters::v0;
+ static const RegisterID ScratchRegister = JSC::MIPSRegisters::s2;
+ static const FPRegisterID FPGpr0 = JSC::MIPSRegisters::f0;
+ static const FPRegisterID FPGpr1 = JSC::MIPSRegisters::f2;
+ static const RegisterID LowReturnValueRegister = JSC::MIPSRegisters::v0;
+ static const RegisterID HighReturnValueRegister = JSC::MIPSRegisters::v1;
+
+ static RegisterInformation getRegisterInfo()
{
typedef RegisterInfo RI;
- return RegisterInformation()
+ static RegisterInformation info = RegisterInformation()
// Note: t0, t1, t2, t3 and f16 are already used by MacroAssemblerMIPS.
<< RI(JSC::MIPSRegisters::t4, QStringLiteral("t4"), RI::RegularRegister, RI::CallerSaved, RI::RegAlloc)
<< RI(JSC::MIPSRegisters::t5, QStringLiteral("t5"), RI::RegularRegister, RI::CallerSaved, RI::RegAlloc)
@@ -524,6 +594,7 @@ public:
<< RI(JSC::MIPSRegisters::f26, QStringLiteral("f26"), RI::FloatingPointRegister, RI::CalleeSaved, RI::RegAlloc)
<< RI(JSC::MIPSRegisters::f28, QStringLiteral("f28"), RI::FloatingPointRegister, RI::CalleeSaved, RI::RegAlloc)
;
+ return info;
}
#undef HAVE_ALU_OPS_WITH_MEM_OPERAND
@@ -532,9 +603,9 @@ public:
#define ARGUMENTS_IN_REGISTERS
static const int RegisterArgumentCount = 4;
- static JSC::MacroAssembler::RegisterID registerForArgument(int index)
+ static RegisterID registerForArgument(int index)
{
- static JSC::MacroAssembler::RegisterID regs[RegisterArgumentCount] = {
+ static RegisterID regs[RegisterArgumentCount] = {
JSC::MIPSRegisters::a0,
JSC::MIPSRegisters::a1,
JSC::MIPSRegisters::a2,
@@ -549,27 +620,19 @@ public:
static const int StackShadowSpace = 4 * RegisterSize; // Stack space for 4 argument registers.
static const int StackSpaceAllocatedUponFunctionEntry = 1 * RegisterSize; // Registers saved in platformEnterStandardStackFrame below.
- static void platformEnterStandardStackFrame(JSC::MacroAssembler *as)
+ static void platformEnterStandardStackFrame(PlatformAssembler *as)
{
as->push(JSC::MIPSRegisters::ra);
as->push(FramePointerRegister);
}
- static void platformLeaveStandardStackFrame(JSC::MacroAssembler *as)
+ static void platformLeaveStandardStackFrame(PlatformAssembler *as)
{
as->pop(FramePointerRegister);
as->pop(JSC::MIPSRegisters::ra);
}
-#endif // Linux on MIPS (32 bit)
-
-public: // utility functions
- static const RegisterInformation getRegisterInfo()
- {
- static const RegisterInformation info = getPlatformRegisterInfo();
-
- return info;
- }
};
+#endif // Linux on MIPS (32 bit)
} // JIT namespace
} // QV4 namespace
diff --git a/src/qml/jit/qv4unop.cpp b/src/qml/jit/qv4unop.cpp
index 799103849b..31355e5dce 100644
--- a/src/qml/jit/qv4unop.cpp
+++ b/src/qml/jit/qv4unop.cpp
@@ -48,14 +48,15 @@ using namespace JIT;
#define stringIfy(s) stringIfyx(s)
#define setOp(operation) \
do { \
- call = RuntimeCall(qOffsetOf(QV4::Runtime, operation)); name = "Runtime::" stringIfy(operation); \
+ call = typename JITAssembler::RuntimeCall(qOffsetOf(QV4::Runtime, operation)); name = "Runtime::" stringIfy(operation); \
needsExceptionCheck = Runtime::Method_##operation##_NeedsExceptionCheck; \
} while (0)
-void Unop::generate(IR::Expr *source, IR::Expr *target)
+template <typename JITAssembler>
+void Unop<JITAssembler>::generate(IR::Expr *source, IR::Expr *target)
{
bool needsExceptionCheck;
- RuntimeCall call;
+ typename JITAssembler::RuntimeCall call;
const char *name = 0;
switch (op) {
case IR::OpNot:
@@ -75,17 +76,18 @@ void Unop::generate(IR::Expr *source, IR::Expr *target)
} // switch
Q_ASSERT(call.isValid());
- _as->generateFunctionCallImp(needsExceptionCheck, target, name, call, Assembler::PointerToValue(source));
+ _as->generateFunctionCallImp(needsExceptionCheck, target, name, call, PointerToValue(source));
}
-void Unop::generateUMinus(IR::Expr *source, IR::Expr *target)
+template <typename JITAssembler>
+void Unop<JITAssembler>::generateUMinus(IR::Expr *source, IR::Expr *target)
{
IR::Temp *targetTemp = target->asTemp();
if (source->type == IR::SInt32Type) {
- Assembler::RegisterID tReg = Assembler::ScratchRegister;
+ typename JITAssembler::RegisterID tReg = JITAssembler::ScratchRegister;
if (targetTemp && targetTemp->kind == IR::Temp::PhysicalRegister)
- tReg = (Assembler::RegisterID) targetTemp->index;
- Assembler::RegisterID sReg = _as->toInt32Register(source, tReg);
+ tReg = (typename JITAssembler::RegisterID) targetTemp->index;
+ typename JITAssembler::RegisterID sReg = _as->toInt32Register(source, tReg);
_as->move(sReg, tReg);
_as->neg32(tReg);
if (!targetTemp || targetTemp->kind != IR::Temp::PhysicalRegister)
@@ -93,26 +95,27 @@ void Unop::generateUMinus(IR::Expr *source, IR::Expr *target)
return;
}
- generateRuntimeCall(target, uMinus, Assembler::PointerToValue(source));
+ generateRuntimeCall(_as, target, uMinus, PointerToValue(source));
}
-void Unop::generateNot(IR::Expr *source, IR::Expr *target)
+template <typename JITAssembler>
+void Unop<JITAssembler>::generateNot(IR::Expr *source, IR::Expr *target)
{
IR::Temp *targetTemp = target->asTemp();
if (source->type == IR::BoolType) {
- Assembler::RegisterID tReg = Assembler::ScratchRegister;
+ typename JITAssembler::RegisterID tReg = JITAssembler::ScratchRegister;
if (targetTemp && targetTemp->kind == IR::Temp::PhysicalRegister)
- tReg = (Assembler::RegisterID) targetTemp->index;
- _as->xor32(Assembler::TrustedImm32(0x1), _as->toInt32Register(source, tReg), tReg);
+ tReg = (typename JITAssembler::RegisterID) targetTemp->index;
+ _as->xor32(TrustedImm32(0x1), _as->toInt32Register(source, tReg), tReg);
if (!targetTemp || targetTemp->kind != IR::Temp::PhysicalRegister)
_as->storeBool(tReg, target);
return;
} else if (source->type == IR::SInt32Type) {
- Assembler::RegisterID tReg = Assembler::ScratchRegister;
+ typename JITAssembler::RegisterID tReg = JITAssembler::ScratchRegister;
if (targetTemp && targetTemp->kind == IR::Temp::PhysicalRegister)
- tReg = (Assembler::RegisterID) targetTemp->index;
- _as->compare32(Assembler::Equal,
- _as->toInt32Register(source, Assembler::ScratchRegister), Assembler::TrustedImm32(0),
+ tReg = (typename JITAssembler::RegisterID) targetTemp->index;
+ _as->compare32(RelationalCondition::Equal,
+ _as->toInt32Register(source, JITAssembler::ScratchRegister), TrustedImm32(0),
tReg);
if (!targetTemp || targetTemp->kind != IR::Temp::PhysicalRegister)
_as->storeBool(tReg, target);
@@ -122,22 +125,28 @@ void Unop::generateNot(IR::Expr *source, IR::Expr *target)
}
// ## generic implementation testing for int/bool
- generateRuntimeCall(target, uNot, Assembler::PointerToValue(source));
+ generateRuntimeCall(_as, target, uNot, PointerToValue(source));
}
-void Unop::generateCompl(IR::Expr *source, IR::Expr *target)
+template <typename JITAssembler>
+void Unop<JITAssembler>::generateCompl(IR::Expr *source, IR::Expr *target)
{
IR::Temp *targetTemp = target->asTemp();
if (source->type == IR::SInt32Type) {
- Assembler::RegisterID tReg = Assembler::ScratchRegister;
+ typename JITAssembler::RegisterID tReg = JITAssembler::ScratchRegister;
if (targetTemp && targetTemp->kind == IR::Temp::PhysicalRegister)
- tReg = (Assembler::RegisterID) targetTemp->index;
- _as->xor32(Assembler::TrustedImm32(0xffffffff), _as->toInt32Register(source, tReg), tReg);
+ tReg = (typename JITAssembler::RegisterID) targetTemp->index;
+ _as->xor32(TrustedImm32(0xffffffff), _as->toInt32Register(source, tReg), tReg);
if (!targetTemp || targetTemp->kind != IR::Temp::PhysicalRegister)
_as->storeInt32(tReg, target);
return;
}
- generateRuntimeCall(target, complement, Assembler::PointerToValue(source));
+ generateRuntimeCall(_as, target, complement, PointerToValue(source));
}
+template struct QV4::JIT::Unop<QV4::JIT::Assembler<DefaultAssemblerTargetConfiguration>>;
+#if defined(V4_BOOTSTRAP) && CPU(X86_64)
+template struct QV4::JIT::Unop<QV4::JIT::Assembler<AssemblerTargetConfiguration<JSC::MacroAssemblerARMv7, NoOperatingSystemSpecialization>>>;
+#endif
+
#endif
diff --git a/src/qml/jit/qv4unop_p.h b/src/qml/jit/qv4unop_p.h
index 1141a84913..fb68f80eec 100644
--- a/src/qml/jit/qv4unop_p.h
+++ b/src/qml/jit/qv4unop_p.h
@@ -60,21 +60,25 @@ QT_BEGIN_NAMESPACE
namespace QV4 {
namespace JIT {
-class Assembler;
-
+template <typename JITAssembler>
struct Unop {
- Unop(Assembler *assembler, IR::AluOp operation)
+ Unop(JITAssembler *assembler, IR::AluOp operation)
: _as(assembler)
, op(operation)
{}
+ using RelationalCondition = typename JITAssembler::RelationalCondition;
+ using PointerToValue = typename JITAssembler::PointerToValue;
+ using RuntimeCall = typename JITAssembler::RuntimeCall;
+ using TrustedImm32 = typename JITAssembler::TrustedImm32;
+
void generate(IR::Expr *source, IR::Expr *target);
void generateUMinus(IR::Expr *source, IR::Expr *target);
void generateNot(IR::Expr *source, IR::Expr *target);
void generateCompl(IR::Expr *source, IR::Expr *target);
- Assembler *_as;
+ JITAssembler *_as;
IR::AluOp op;
};
diff --git a/src/qml/jsruntime/jsruntime.pri b/src/qml/jsruntime/jsruntime.pri
index 919524d1ed..955cf585e4 100644
--- a/src/qml/jsruntime/jsruntime.pri
+++ b/src/qml/jsruntime/jsruntime.pri
@@ -35,7 +35,6 @@ SOURCES += \
$$PWD/qv4regexp.cpp \
$$PWD/qv4serialize.cpp \
$$PWD/qv4script.cpp \
- $$PWD/qv4executableallocator.cpp \
$$PWD/qv4sequenceobject.cpp \
$$PWD/qv4include.cpp \
$$PWD/qv4qobjectwrapper.cpp \
@@ -113,7 +112,8 @@ HEADERS += \
SOURCES += \
$$PWD/qv4runtime.cpp \
$$PWD/qv4string.cpp \
- $$PWD/qv4value.cpp
+ $$PWD/qv4value.cpp \
+ $$PWD/qv4executableallocator.cpp
valgrind {
DEFINES += V4_USE_VALGRIND
diff --git a/src/qml/jsruntime/qv4engine.cpp b/src/qml/jsruntime/qv4engine.cpp
index a11f7f0875..084ddc9010 100644
--- a/src/qml/jsruntime/qv4engine.cpp
+++ b/src/qml/jsruntime/qv4engine.cpp
@@ -91,7 +91,9 @@
#if USE(PTHREADS)
# include <pthread.h>
+#if !defined(Q_OS_INTEGRITY)
# include <sys/resource.h>
+#endif
#if HAVE(PTHREAD_NP_H)
# include <pthread_np.h>
#endif
@@ -168,7 +170,7 @@ ExecutionEngine::ExecutionEngine(EvalISelFactory *factory)
if (forceMoth) {
factory = new Moth::ISelFactory;
} else {
- factory = new JIT::ISelFactory;
+ factory = new JIT::ISelFactory<>;
jitDisabled = false;
}
#else // !V4_ENABLE_JIT
@@ -1109,7 +1111,7 @@ static QVariant toVariant(QV4::ExecutionEngine *e, const QV4::Value &value, int
if (typeHint == qMetaTypeId<QJSValue>())
return QVariant::fromValue(QJSValue(e, value.asReturnedValue()));
- if (value.as<Object>()) {
+ if (value.as<QV4::Object>()) {
QV4::ScopedObject object(scope, value);
if (typeHint == QMetaType::QJsonObject
&& !value.as<ArrayObject>() && !value.as<FunctionObject>()) {
@@ -1755,7 +1757,7 @@ bool ExecutionEngine::metaTypeFromJS(const Value *value, int type, void *data)
return false;
}
-static bool convertToNativeQObject(QV4::ExecutionEngine *e, const Value &value, const QByteArray &targetType, void **result)
+static bool convertToNativeQObject(QV4::ExecutionEngine *e, const QV4::Value &value, const QByteArray &targetType, void **result)
{
if (!targetType.endsWith('*'))
return false;
@@ -1770,7 +1772,7 @@ static bool convertToNativeQObject(QV4::ExecutionEngine *e, const Value &value,
return false;
}
-static QObject *qtObjectFromJS(QV4::ExecutionEngine *engine, const Value &value)
+static QObject *qtObjectFromJS(QV4::ExecutionEngine *engine, const QV4::Value &value)
{
if (!value.isObject())
return 0;
diff --git a/src/qml/jsruntime/qv4lookup_p.h b/src/qml/jsruntime/qv4lookup_p.h
index 52f54e25f5..c5ee92fedd 100644
--- a/src/qml/jsruntime/qv4lookup_p.h
+++ b/src/qml/jsruntime/qv4lookup_p.h
@@ -54,8 +54,11 @@
#include "qv4runtime_p.h"
#include "qv4engine_p.h"
#include "qv4context_p.h"
+
+#if !defined(V4_BOOTSTRAP)
#include "qv4object_p.h"
#include "qv4internalclass_p.h"
+#endif
QT_BEGIN_NAMESPACE
diff --git a/src/qml/jsruntime/qv4object.cpp b/src/qml/jsruntime/qv4object.cpp
index eb9cb80cee..2f664c6398 100644
--- a/src/qml/jsruntime/qv4object.cpp
+++ b/src/qml/jsruntime/qv4object.cpp
@@ -1155,6 +1155,49 @@ uint Object::getLength(const Managed *m)
return v->toUInt32();
}
+// 'var' is 'V' in 15.3.5.3.
+ReturnedValue Object::instanceOf(const Object *typeObject, const Value &var)
+{
+ QV4::ExecutionEngine *engine = typeObject->internalClass()->engine;
+
+ // 15.3.5.3, Assume F is a Function object.
+ const FunctionObject *function = typeObject->as<FunctionObject>();
+ if (!function)
+ return engine->throwTypeError();
+
+ Heap::FunctionObject *f = function->d();
+ if (function->isBoundFunction())
+ f = function->cast<BoundFunction>()->target();
+
+ // 15.3.5.3, 1: HasInstance can only be used on an object
+ const Object *lhs = var.as<Object>();
+ if (!lhs)
+ return Encode(false);
+
+ // 15.3.5.3, 2
+ const Object *o = f->protoProperty();
+ if (!o) // 15.3.5.3, 3
+ return engine->throwTypeError();
+
+ Heap::Object *v = lhs->d();
+
+ // 15.3.5.3, 4
+ while (v) {
+ // 15.3.5.3, 4, a
+ v = v->prototype;
+
+ // 15.3.5.3, 4, b
+ if (!v)
+ break; // will return false
+
+ // 15.3.5.3, 4, c
+ else if (o->d() == v)
+ return Encode(true);
+ }
+
+ return Encode(false);
+}
+
bool Object::setArrayLength(uint newLen)
{
Q_ASSERT(isArrayObject());
diff --git a/src/qml/jsruntime/qv4object_p.h b/src/qml/jsruntime/qv4object_p.h
index 4a78690f47..6a543ae1a8 100644
--- a/src/qml/jsruntime/qv4object_p.h
+++ b/src/qml/jsruntime/qv4object_p.h
@@ -140,6 +140,7 @@ struct ObjectVTable
void (*setLookup)(Managed *m, Lookup *l, const Value &v);
uint (*getLength)(const Managed *m);
void (*advanceIterator)(Managed *m, ObjectIterator *it, Value *name, uint *index, Property *p, PropertyAttributes *attributes);
+ ReturnedValue (*instanceOf)(const Object *typeObject, const Value &var);
};
#define DEFINE_OBJECT_VTABLE_BASE(classname) \
@@ -159,7 +160,8 @@ const QV4::ObjectVTable classname::static_vtbl = \
getLookup, \
setLookup, \
getLength, \
- advanceIterator \
+ advanceIterator, \
+ instanceOf \
}
#define DEFINE_OBJECT_VTABLE(classname) \
@@ -351,6 +353,8 @@ public:
void advanceIterator(ObjectIterator *it, Value *name, uint *index, Property *p, PropertyAttributes *attributes)
{ vtable()->advanceIterator(this, it, name, index, p, attributes); }
uint getLength() const { return vtable()->getLength(this); }
+ ReturnedValue instanceOf(const Value &var) const
+ { return vtable()->instanceOf(this, var); }
inline void construct(Scope &scope, CallData *d) const
{ return vtable()->construct(this, scope, d); }
@@ -372,6 +376,7 @@ protected:
static void setLookup(Managed *m, Lookup *l, const Value &v);
static void advanceIterator(Managed *m, ObjectIterator *it, Value *name, uint *index, Property *p, PropertyAttributes *attributes);
static uint getLength(const Managed *m);
+ static ReturnedValue instanceOf(const Object *typeObject, const Value &var);
private:
ReturnedValue internalGet(String *name, bool *hasProperty) const;
diff --git a/src/qml/jsruntime/qv4runtime.cpp b/src/qml/jsruntime/qv4runtime.cpp
index 023a739e33..7f184f8221 100644
--- a/src/qml/jsruntime/qv4runtime.cpp
+++ b/src/qml/jsruntime/qv4runtime.cpp
@@ -343,35 +343,15 @@ ReturnedValue Runtime::method_deleteName(ExecutionEngine *engine, int nameIndex)
return Encode(engine->currentContext->deleteProperty(name));
}
-QV4::ReturnedValue Runtime::method_instanceof(ExecutionEngine *engine, const Value &left, const Value &right)
+QV4::ReturnedValue Runtime::method_instanceof(ExecutionEngine *engine, const Value &lval, const Value &rval)
{
- const FunctionObject *function = right.as<FunctionObject>();
- if (!function)
- return engine->throwTypeError();
-
- Heap::FunctionObject *f = function->d();
- if (function->isBoundFunction())
- f = function->cast<BoundFunction>()->target();
-
- const Object *o = left.as<Object>();
- if (!o)
- return Encode(false);
- Heap::Object *v = o->d();
-
- o = f->protoProperty();
- if (!o)
- return engine->throwTypeError();
-
- while (v) {
- v = v->prototype;
-
- if (!v)
- break;
- else if (o->d() == v)
- return Encode(true);
- }
+ // 11.8.6, 5: rval must be an Object
+ const Object *rhs = rval.as<Object>();
+ if (!rhs)
+ return engine->throwTypeError();
- return Encode(false);
+ // 11.8.6, 7: call "HasInstance", which we term instanceOf, and return the result.
+ return rhs->instanceOf(lval);
}
QV4::ReturnedValue Runtime::method_in(ExecutionEngine *engine, const Value &left, const Value &right)
diff --git a/src/qml/jsruntime/qv4value_p.h b/src/qml/jsruntime/qv4value_p.h
index 64f0f3a86f..4ff0565f9b 100644
--- a/src/qml/jsruntime/qv4value_p.h
+++ b/src/qml/jsruntime/qv4value_p.h
@@ -254,34 +254,47 @@ public:
Q_ASSERT(isDouble()); return Double_Type;
}
-#ifndef QV4_USE_64_BIT_VALUE_ENCODING
+ // Shared between 32-bit and 64-bit encoding
+ enum {
+ Tag_Shift = 32
+ };
+
+ // Used only by 64-bit encoding
+ static const quint64 NaNEncodeMask = 0xfffc000000000000ll;
+ enum {
+ IsDouble_Shift = 64-14,
+ IsManagedOrUndefined_Shift = 64-15,
+ IsIntegerConvertible_Shift = 64-16,
+ IsDoubleTag_Shift = IsDouble_Shift - Tag_Shift,
+ Managed_Type_Internal_64 = 0
+ };
+ static const quint64 Immediate_Mask_64 = 0x00020000u; // bit 49
+
+ // Used only by 32-bit encoding
enum Masks {
SilentNaNBit = 0x00040000,
- NaN_Mask = 0x7ff80000,
NotDouble_Mask = 0x7ffa0000,
- Immediate_Mask = NotDouble_Mask | 0x00020000u | SilentNaNBit,
- Tag_Shift = 32
};
+ static const quint64 Immediate_Mask_32 = NotDouble_Mask | 0x00020000u | SilentNaNBit;
enum {
- Managed_Type_Internal = NotDouble_Mask
+ Managed_Type_Internal_32 = NotDouble_Mask
};
-#else
- static const quint64 NaNEncodeMask = 0xfffc000000000000ll;
- static const quint64 Immediate_Mask = 0x00020000u; // bit 49
- enum Masks {
- NaN_Mask = 0x7ff80000,
+#ifdef QV4_USE_64_BIT_VALUE_ENCODING
+ enum {
+ Managed_Type_Internal = Managed_Type_Internal_64
};
+ static const quint64 Immediate_Mask = Immediate_Mask_64;
+#else
enum {
- IsDouble_Shift = 64-14,
- IsManagedOrUndefined_Shift = 64-15,
- IsIntegerConvertible_Shift = 64-16,
- Tag_Shift = 32,
- IsDoubleTag_Shift = IsDouble_Shift - Tag_Shift,
- Managed_Type_Internal = 0
+ Managed_Type_Internal = Managed_Type_Internal_32
};
+ static const quint64 Immediate_Mask = Immediate_Mask_32;
#endif
+ enum {
+ NaN_Mask = 0x7ff80000,
+ };
enum ValueTypeInternal {
Empty_Type_Internal = Immediate_Mask | 0,
ConvertibleToInt = Immediate_Mask | 0x10000u, // bit 48
diff --git a/src/qml/parser/qqmljskeywords_p.h b/src/qml/parser/qqmljskeywords_p.h
index 84ebe5f210..a31c188efc 100644
--- a/src/qml/parser/qqmljskeywords_p.h
+++ b/src/qml/parser/qqmljskeywords_p.h
@@ -278,7 +278,7 @@ static inline int classify5(const QChar *s, bool qmlMode) {
if (s[2].unicode() == 'n') {
if (s[3].unicode() == 's') {
if (s[4].unicode() == 't') {
- return qmlMode ? int(Lexer::T_CONST) : int(Lexer::T_RESERVED_WORD);
+ return int(Lexer::T_CONST);
}
}
}
diff --git a/src/qml/qml/qqmlimport.cpp b/src/qml/qml/qqmlimport.cpp
index bd41659f27..c07d5c740a 100644
--- a/src/qml/qml/qqmlimport.cpp
+++ b/src/qml/qml/qqmlimport.cpp
@@ -1,5 +1,6 @@
/****************************************************************************
**
+** Copyright (C) 2017 Crimson AS <info@crimson.no>
** Copyright (C) 2016 The Qt Company Ltd.
** Contact: https://www.qt.io/licensing/
**
@@ -238,6 +239,21 @@ typedef QPair<QStaticPlugin, QJsonArray> StaticPluginPair;
/*!
\internal
+ \class QQmlImportInstance
+
+ A QQmlImportType represents a single import of a document, held within a
+ namespace.
+
+ \note The uri here may not necessarily be unique (e.g. for file imports).
+
+ \note Version numbers may be -1 for file imports: this means that no
+ version was specified as part of the import. Type resolution will be
+ responsible for attempting to find the "best" possible version.
+*/
+
+/*!
+ \internal
+ \class QQmlImportNamespace
A QQmlImportNamespace is a way of seperating imports into a local namespace.
@@ -253,56 +269,6 @@ typedef QPair<QStaticPlugin, QJsonArray> StaticPluginPair;
import MyFoo 1.0 as Foo
*/
-class QQmlImportNamespace
-{
-public:
- QQmlImportNamespace() : nextNamespace(0) {}
- ~QQmlImportNamespace() { qDeleteAll(imports); }
-
- /*!
- \internal
-
- A QQmlImportNamespace::Import represents an actual instance of an import
- within a namespace.
-
- \note The uri here may not necessarily be unique (e.g. for file imports).
-
- \note Version numbers may be -1 for file imports: this means that no
- version was specified as part of the import. Type resolution will be
- responsible for attempting to find the "best" possible version.
- */
- struct Import {
- QString uri; // e.g. QtQuick
- QString url; // the base path of the import
- int majversion; // the major version imported
- int minversion; // the minor version imported
- bool isLibrary; // true means that this is not a file import
- QQmlDirComponents qmlDirComponents; // a copy of the components listed in the qmldir
- QQmlDirScripts qmlDirScripts; // a copy of the scripts in the qmldir
-
- bool setQmldirContent(const QString &resolvedUrl, const QQmlTypeLoader::QmldirContent *qmldir,
- QQmlImportNamespace *nameSpace, QList<QQmlError> *errors);
-
- static QQmlDirScripts getVersionedScripts(const QQmlDirScripts &qmldirscripts, int vmaj, int vmin);
-
- bool resolveType(QQmlTypeLoader *typeLoader, const QHashedStringRef &type,
- int *vmajor, int *vminor, QQmlType** type_return,
- QString *base = 0, bool *typeRecursionDetected = 0) const;
- };
- QList<Import *> imports;
-
- Import *findImport(const QString &uri) const;
-
- bool resolveType(QQmlTypeLoader *typeLoader, const QHashedStringRef& type,
- int *vmajor, int *vminor, QQmlType** type_return,
- QString *base = 0, QList<QQmlError> *errors = 0);
-
- // Prefix when used as a qualified import. Otherwise empty.
- QHashedString prefix;
-
- // Used by QQmlImportsPrivate::qualifiedSets
- QQmlImportNamespace *nextNamespace;
-};
class QQmlImportsPrivate
{
@@ -348,21 +314,21 @@ public:
QQmlImportDatabase *database,
QString *outQmldirFilePath, QString *outUrl);
- static bool validateQmldirVersion(const QQmlTypeLoader::QmldirContent *qmldir, const QString &uri, int vmaj, int vmin,
+ static bool validateQmldirVersion(const QQmlTypeLoaderQmldirContent *qmldir, const QString &uri, int vmaj, int vmin,
QList<QQmlError> *errors);
bool importExtension(const QString &absoluteFilePath, const QString &uri,
int vmaj, int vmin,
QQmlImportDatabase *database,
- const QQmlTypeLoader::QmldirContent *qmldir,
+ const QQmlTypeLoaderQmldirContent *qmldir,
QList<QQmlError> *errors);
bool getQmldirContent(const QString &qmldirIdentifier, const QString &uri,
- const QQmlTypeLoader::QmldirContent **qmldir, QList<QQmlError> *errors);
+ const QQmlTypeLoaderQmldirContent **qmldir, QList<QQmlError> *errors);
QString resolvedUri(const QString &dir_arg, QQmlImportDatabase *database);
- QQmlImportNamespace::Import *addImportToNamespace(QQmlImportNamespace *nameSpace,
+ QQmlImportInstance *addImportToNamespace(QQmlImportNamespace *nameSpace,
const QString &uri, const QString &url,
int vmaj, int vmin, QV4::CompiledData::Import::ImportType type,
QList<QQmlError> *errors, bool lowPrecedence = false);
@@ -427,12 +393,22 @@ QUrl QQmlImports::baseUrl() const
return d->baseUrl;
}
+/*
+ \internal
+
+ This method is responsible for populating data of all types visible in this
+ document's imports into the \a cache for resolution elsewhere (e.g. in JS,
+ or when loading additional types).
+
+ \note This is for C++ types only. Composite types are handled separately,
+ as they do not have a QQmlTypeModule.
+*/
void QQmlImports::populateCache(QQmlTypeNameCache *cache) const
{
const QQmlImportNamespace &set = d->unqualifiedset;
for (int ii = set.imports.count() - 1; ii >= 0; --ii) {
- const QQmlImportNamespace::Import *import = set.imports.at(ii);
+ const QQmlImportInstance *import = set.imports.at(ii);
QQmlTypeModule *module = QQmlMetaType::typeModule(import->uri, import->majversion);
if (module) {
cache->m_anonymousImports.append(QQmlTypeModuleVersion(module, import->minversion));
@@ -443,11 +419,14 @@ void QQmlImports::populateCache(QQmlTypeNameCache *cache) const
const QQmlImportNamespace &set = *ns;
+ // positioning is important; we must create the namespace even if there is no module.
+ QQmlTypeNameCache::Import &typeimport = cache->m_namedImports[set.prefix];
+ typeimport.m_qualifier = set.prefix;
+
for (int ii = set.imports.count() - 1; ii >= 0; --ii) {
- const QQmlImportNamespace::Import *import = set.imports.at(ii);
+ const QQmlImportInstance *import = set.imports.at(ii);
QQmlTypeModule *module = QQmlMetaType::typeModule(import->uri, import->majversion);
if (module) {
- QQmlTypeNameCache::Import &typeimport = cache->m_namedImports[set.prefix];
typeimport.modules.append(QQmlTypeModuleVersion(module, import->minversion));
}
}
@@ -476,7 +455,7 @@ void findCompositeSingletons(const QQmlImportNamespace &set, QList<QQmlImports::
typedef QQmlDirComponents::const_iterator ConstIterator;
for (int ii = set.imports.count() - 1; ii >= 0; --ii) {
- const QQmlImportNamespace::Import *import = set.imports.at(ii);
+ const QQmlImportInstance *import = set.imports.at(ii);
const QQmlDirComponents &components = import->qmlDirComponents;
@@ -494,6 +473,15 @@ void findCompositeSingletons(const QQmlImportNamespace &set, QList<QQmlImports::
}
}
+/*
+ \internal
+
+ Returns a list of all composite singletons present in this document's
+ imports.
+
+ This information is used by QQmlTypeLoader to ensure that composite singletons
+ are marked as dependencies during type loading.
+*/
QList<QQmlImports::CompositeSingletonReference> QQmlImports::resolvedCompositeSingletons() const
{
QList<QQmlImports::CompositeSingletonReference> compositeSingletons;
@@ -509,6 +497,12 @@ QList<QQmlImports::CompositeSingletonReference> QQmlImports::resolvedCompositeSi
return compositeSingletons;
}
+/*
+ \internal
+
+ Returns a list of scripts imported by this document. This is used by
+ QQmlTypeLoader to properly handle dependencies on imported scripts.
+*/
QList<QQmlImports::ScriptReference> QQmlImports::resolvedScripts() const
{
QList<QQmlImports::ScriptReference> scripts;
@@ -516,7 +510,7 @@ QList<QQmlImports::ScriptReference> QQmlImports::resolvedScripts() const
const QQmlImportNamespace &set = d->unqualifiedset;
for (int ii = set.imports.count() - 1; ii >= 0; --ii) {
- const QQmlImportNamespace::Import *import = set.imports.at(ii);
+ const QQmlImportInstance *import = set.imports.at(ii);
for (const QQmlDirParser::Script &script : import->qmlDirScripts) {
ScriptReference ref;
@@ -530,7 +524,7 @@ QList<QQmlImports::ScriptReference> QQmlImports::resolvedScripts() const
const QQmlImportNamespace &set = *ns;
for (int ii = set.imports.count() - 1; ii >= 0; --ii) {
- const QQmlImportNamespace::Import *import = set.imports.at(ii);
+ const QQmlImportInstance *import = set.imports.at(ii);
for (const QQmlDirParser::Script &script : import->qmlDirScripts) {
ScriptReference ref;
@@ -654,7 +648,7 @@ bool QQmlImports::resolveType(const QHashedStringRef &type,
return false;
}
-bool QQmlImportNamespace::Import::setQmldirContent(const QString &resolvedUrl, const QQmlTypeLoader::QmldirContent *qmldir, QQmlImportNamespace *nameSpace, QList<QQmlError> *errors)
+bool QQmlImportInstance::setQmldirContent(const QString &resolvedUrl, const QQmlTypeLoaderQmldirContent *qmldir, QQmlImportNamespace *nameSpace, QList<QQmlError> *errors)
{
Q_ASSERT(resolvedUrl.endsWith(Slash));
url = resolvedUrl;
@@ -664,7 +658,7 @@ bool QQmlImportNamespace::Import::setQmldirContent(const QString &resolvedUrl, c
const QQmlDirScripts &scripts = qmldir->scripts();
if (!scripts.isEmpty()) {
// Verify that we haven't imported these scripts already
- for (QList<QQmlImportNamespace::Import *>::const_iterator it = nameSpace->imports.constBegin();
+ for (QList<QQmlImportInstance *>::const_iterator it = nameSpace->imports.constBegin();
it != nameSpace->imports.constEnd(); ++it) {
if ((*it != this) && ((*it)->uri == uri)) {
QQmlError error;
@@ -680,7 +674,7 @@ bool QQmlImportNamespace::Import::setQmldirContent(const QString &resolvedUrl, c
return true;
}
-QQmlDirScripts QQmlImportNamespace::Import::getVersionedScripts(const QQmlDirScripts &qmldirscripts, int vmaj, int vmin)
+QQmlDirScripts QQmlImportInstance::getVersionedScripts(const QQmlDirScripts &qmldirscripts, int vmaj, int vmin)
{
QMap<QString, QQmlDirParser::Script> versioned;
@@ -716,7 +710,7 @@ bool QQmlImports::resolveType(QQmlImportNamespace* ns, const QHashedStringRef &t
return ns->resolveType(d->typeLoader,type,vmaj,vmin,type_return);
}
-bool QQmlImportNamespace::Import::resolveType(QQmlTypeLoader *typeLoader,
+bool QQmlImportInstance::resolveType(QQmlTypeLoader *typeLoader,
const QHashedStringRef& type, int *vmajor, int *vminor,
QQmlType** type_return, QString *base, bool *typeRecursionDetected) const
{
@@ -853,9 +847,9 @@ bool QQmlImportsPrivate::resolveType(const QHashedStringRef& type, int *vmajor,
return false;
}
-QQmlImportNamespace::Import *QQmlImportNamespace::findImport(const QString &uri) const
+QQmlImportInstance *QQmlImportNamespace::findImport(const QString &uri) const
{
- for (Import *import : imports) {
+ for (QQmlImportInstance *import : imports) {
if (import->uri == uri)
return import;
}
@@ -868,13 +862,13 @@ bool QQmlImportNamespace::resolveType(QQmlTypeLoader *typeLoader, const QHashedS
{
bool typeRecursionDetected = false;
for (int i=0; i<imports.count(); ++i) {
- const Import *import = imports.at(i);
+ const QQmlImportInstance *import = imports.at(i);
if (import->resolveType(typeLoader, type, vmajor, vminor, type_return,
base, &typeRecursionDetected)) {
if (qmlCheckTypes()) {
// check for type clashes
for (int j = i+1; j<imports.count(); ++j) {
- const Import *import2 = imports.at(j);
+ const QQmlImportInstance *import2 = imports.at(j);
if (import2->resolveType(typeLoader, type, vmajor, vminor, 0, base)) {
if (errors) {
QString u1 = import->url;
@@ -1017,10 +1011,12 @@ bool QQmlImportsPrivate::populatePluginPairVector(QVector<StaticPluginPair> &res
}
#endif
+#if defined(QT_SHARED) || !QT_CONFIG(library)
static inline QString msgCannotLoadPlugin(const QString &uri, const QString &why)
{
return QQmlImportDatabase::tr("plugin cannot be loaded for module \"%1\": %2").arg(uri, why);
}
+#endif
/*!
Import an extension defined by a qmldir file.
@@ -1031,7 +1027,7 @@ bool QQmlImportsPrivate::importExtension(const QString &qmldirFilePath,
const QString &uri,
int vmaj, int vmin,
QQmlImportDatabase *database,
- const QQmlTypeLoader::QmldirContent *qmldir,
+ const QQmlTypeLoaderQmldirContent *qmldir,
QList<QQmlError> *errors)
{
#if QT_CONFIG(library)
@@ -1167,7 +1163,7 @@ bool QQmlImportsPrivate::importExtension(const QString &qmldirFilePath,
}
bool QQmlImportsPrivate::getQmldirContent(const QString &qmldirIdentifier, const QString &uri,
- const QQmlTypeLoader::QmldirContent **qmldir, QList<QQmlError> *errors)
+ const QQmlTypeLoaderQmldirContent **qmldir, QList<QQmlError> *errors)
{
Q_ASSERT(errors);
Q_ASSERT(qmldir);
@@ -1292,7 +1288,7 @@ bool QQmlImportsPrivate::locateQmldir(const QString &uri, int vmaj, int vmin, QQ
return false;
}
-bool QQmlImportsPrivate::validateQmldirVersion(const QQmlTypeLoader::QmldirContent *qmldir, const QString &uri, int vmaj, int vmin,
+bool QQmlImportsPrivate::validateQmldirVersion(const QQmlTypeLoaderQmldirContent *qmldir, const QString &uri, int vmaj, int vmin,
QList<QQmlError> *errors)
{
int lowest_min = INT_MAX;
@@ -1375,7 +1371,7 @@ QQmlImportNamespace *QQmlImportsPrivate::importNamespace(const QString &prefix)
return nameSpace;
}
-QQmlImportNamespace::Import *QQmlImportsPrivate::addImportToNamespace(QQmlImportNamespace *nameSpace,
+QQmlImportInstance *QQmlImportsPrivate::addImportToNamespace(QQmlImportNamespace *nameSpace,
const QString &uri, const QString &url, int vmaj, int vmin,
QV4::CompiledData::Import::ImportType type,
QList<QQmlError> *errors, bool lowPrecedence)
@@ -1385,7 +1381,7 @@ QQmlImportNamespace::Import *QQmlImportsPrivate::addImportToNamespace(QQmlImport
Q_UNUSED(errors);
Q_ASSERT(url.isEmpty() || url.endsWith(Slash));
- QQmlImportNamespace::Import *import = new QQmlImportNamespace::Import;
+ QQmlImportInstance *import = new QQmlImportInstance;
import->uri = uri;
import->url = url;
import->majversion = vmaj;
@@ -1411,11 +1407,11 @@ bool QQmlImportsPrivate::addLibraryImport(const QString& uri, const QString &pre
QQmlImportNamespace *nameSpace = importNamespace(prefix);
Q_ASSERT(nameSpace);
- QQmlImportNamespace::Import *inserted = addImportToNamespace(nameSpace, uri, qmldirUrl, vmaj, vmin, QV4::CompiledData::Import::ImportLibrary, errors);
+ QQmlImportInstance *inserted = addImportToNamespace(nameSpace, uri, qmldirUrl, vmaj, vmin, QV4::CompiledData::Import::ImportLibrary, errors);
Q_ASSERT(inserted);
if (!incomplete) {
- const QQmlTypeLoader::QmldirContent *qmldir = 0;
+ const QQmlTypeLoaderQmldirContent *qmldir = 0;
if (!qmldirIdentifier.isEmpty()) {
if (!getQmldirContent(qmldirIdentifier, uri, &qmldir, errors))
@@ -1512,11 +1508,11 @@ bool QQmlImportsPrivate::addFileImport(const QString& uri, const QString &prefix
if (!url.endsWith(Slash) && !url.endsWith(Backslash))
url += Slash;
- QQmlImportNamespace::Import *inserted = addImportToNamespace(nameSpace, importUri, url, vmaj, vmin, QV4::CompiledData::Import::ImportFile, errors, isImplicitImport);
+ QQmlImportInstance *inserted = addImportToNamespace(nameSpace, importUri, url, vmaj, vmin, QV4::CompiledData::Import::ImportFile, errors, isImplicitImport);
Q_ASSERT(inserted);
if (!incomplete && !qmldirIdentifier.isEmpty()) {
- const QQmlTypeLoader::QmldirContent *qmldir = 0;
+ const QQmlTypeLoaderQmldirContent *qmldir = 0;
if (!getQmldirContent(qmldirIdentifier, importUri, &qmldir, errors))
return false;
@@ -1539,8 +1535,8 @@ bool QQmlImportsPrivate::updateQmldirContent(const QString &uri, const QString &
QQmlImportNamespace *nameSpace = importNamespace(prefix);
Q_ASSERT(nameSpace);
- if (QQmlImportNamespace::Import *import = nameSpace->findImport(uri)) {
- const QQmlTypeLoader::QmldirContent *qmldir = 0;
+ if (QQmlImportInstance *import = nameSpace->findImport(uri)) {
+ const QQmlTypeLoaderQmldirContent *qmldir = 0;
if (!getQmldirContent(qmldirIdentifier, uri, &qmldir, errors))
return false;
diff --git a/src/qml/qml/qqmlimport_p.h b/src/qml/qml/qqmlimport_p.h
index 0e7848730f..7c691a468c 100644
--- a/src/qml/qml/qqmlimport_p.h
+++ b/src/qml/qml/qqmlimport_p.h
@@ -68,6 +68,48 @@ class QQmlImportNamespace;
class QQmlImportsPrivate;
class QQmlImportDatabase;
class QQmlTypeLoader;
+class QQmlTypeLoaderQmldirContent;
+
+struct QQmlImportInstance
+{
+ QString uri; // e.g. QtQuick
+ QString url; // the base path of the import
+ int majversion; // the major version imported
+ int minversion; // the minor version imported
+ bool isLibrary; // true means that this is not a file import
+ QQmlDirComponents qmlDirComponents; // a copy of the components listed in the qmldir
+ QQmlDirScripts qmlDirScripts; // a copy of the scripts in the qmldir
+
+ bool setQmldirContent(const QString &resolvedUrl, const QQmlTypeLoaderQmldirContent *qmldir,
+ QQmlImportNamespace *nameSpace, QList<QQmlError> *errors);
+
+ static QQmlDirScripts getVersionedScripts(const QQmlDirScripts &qmldirscripts, int vmaj, int vmin);
+
+ bool resolveType(QQmlTypeLoader *typeLoader, const QHashedStringRef &type,
+ int *vmajor, int *vminor, QQmlType** type_return,
+ QString *base = 0, bool *typeRecursionDetected = 0) const;
+};
+
+class QQmlImportNamespace
+{
+public:
+ QQmlImportNamespace() : nextNamespace(0) {}
+ ~QQmlImportNamespace() { qDeleteAll(imports); }
+
+ QList<QQmlImportInstance *> imports;
+
+ QQmlImportInstance *findImport(const QString &uri) const;
+
+ bool resolveType(QQmlTypeLoader *typeLoader, const QHashedStringRef& type,
+ int *vmajor, int *vminor, QQmlType** type_return,
+ QString *base = 0, QList<QQmlError> *errors = 0);
+
+ // Prefix when used as a qualified import. Otherwise empty.
+ QHashedString prefix;
+
+ // Used by QQmlImportsPrivate::qualifiedSets
+ QQmlImportNamespace *nextNamespace;
+};
class Q_QML_PRIVATE_EXPORT QQmlImports
{
diff --git a/src/qml/qml/qqmlpropertycache.cpp b/src/qml/qml/qqmlpropertycache.cpp
index 562aa1c88a..88ce2fa1b9 100644
--- a/src/qml/qml/qqmlpropertycache.cpp
+++ b/src/qml/qml/qqmlpropertycache.cpp
@@ -76,7 +76,8 @@ public:
int argumentsValid:1;
QList<QByteArray> *names;
- int arguments[0];
+
+ int arguments[1];
};
// Flags that do *NOT* depend on the property's QMetaProperty::userType() and thus are quick
@@ -919,7 +920,7 @@ static int EnumType(const QMetaObject *metaobj, const QByteArray &str, int type)
QQmlPropertyCacheMethodArguments *QQmlPropertyCache::createArgumentsObject(int argc, const QList<QByteArray> &names)
{
typedef QQmlPropertyCacheMethodArguments A;
- A *args = static_cast<A *>(malloc(sizeof(A) + (argc + 1) * sizeof(int)));
+ A *args = static_cast<A *>(malloc(sizeof(A) + (argc) * sizeof(int)));
args->arguments[0] = argc;
args->argumentsValid = false;
args->signalParameterStringForJS = 0;
diff --git a/src/qml/qml/qqmltypeloader.cpp b/src/qml/qml/qqmltypeloader.cpp
index 13ad02f7cb..f4f04e12c0 100644
--- a/src/qml/qml/qqmltypeloader.cpp
+++ b/src/qml/qml/qqmltypeloader.cpp
@@ -1362,7 +1362,7 @@ bool QQmlTypeLoader::Blob::updateQmldir(QQmlQmldirData *data, const QV4::Compile
if (!importQualifier.isEmpty()) {
// Does this library contain any qualified scripts?
QUrl libraryUrl(qmldirUrl);
- const QmldirContent *qmldir = typeLoader()->qmldirContent(qmldirIdentifier);
+ const QQmlTypeLoaderQmldirContent *qmldir = typeLoader()->qmldirContent(qmldirIdentifier);
const auto qmldirScripts = qmldir->scripts();
for (const QQmlDirParser::Script &script : qmldirScripts) {
QUrl scriptUrl = libraryUrl.resolved(QUrl(script.fileName));
@@ -1410,7 +1410,7 @@ bool QQmlTypeLoader::Blob::addImport(const QV4::CompiledData::Import *import, QL
if (!importQualifier.isEmpty()) {
// Does this library contain any qualified scripts?
QUrl libraryUrl(qmldirUrl);
- const QmldirContent *qmldir = typeLoader()->qmldirContent(qmldirFilePath);
+ const QQmlTypeLoaderQmldirContent *qmldir = typeLoader()->qmldirContent(qmldirFilePath);
const auto qmldirScripts = qmldir->scripts();
for (const QQmlDirParser::Script &script : qmldirScripts) {
QUrl scriptUrl = libraryUrl.resolved(QUrl(script.fileName));
@@ -1539,57 +1539,57 @@ bool QQmlTypeLoader::Blob::qmldirDataAvailable(QQmlQmldirData *data, QList<QQmlE
}
-QQmlTypeLoader::QmldirContent::QmldirContent()
+QQmlTypeLoaderQmldirContent::QQmlTypeLoaderQmldirContent()
{
}
-bool QQmlTypeLoader::QmldirContent::hasError() const
+bool QQmlTypeLoaderQmldirContent::hasError() const
{
return m_parser.hasError();
}
-QList<QQmlError> QQmlTypeLoader::QmldirContent::errors(const QString &uri) const
+QList<QQmlError> QQmlTypeLoaderQmldirContent::errors(const QString &uri) const
{
return m_parser.errors(uri);
}
-QString QQmlTypeLoader::QmldirContent::typeNamespace() const
+QString QQmlTypeLoaderQmldirContent::typeNamespace() const
{
return m_parser.typeNamespace();
}
-void QQmlTypeLoader::QmldirContent::setContent(const QString &location, const QString &content)
+void QQmlTypeLoaderQmldirContent::setContent(const QString &location, const QString &content)
{
m_location = location;
m_parser.parse(content);
}
-void QQmlTypeLoader::QmldirContent::setError(const QQmlError &error)
+void QQmlTypeLoaderQmldirContent::setError(const QQmlError &error)
{
m_parser.setError(error);
}
-QQmlDirComponents QQmlTypeLoader::QmldirContent::components() const
+QQmlDirComponents QQmlTypeLoaderQmldirContent::components() const
{
return m_parser.components();
}
-QQmlDirScripts QQmlTypeLoader::QmldirContent::scripts() const
+QQmlDirScripts QQmlTypeLoaderQmldirContent::scripts() const
{
return m_parser.scripts();
}
-QQmlDirPlugins QQmlTypeLoader::QmldirContent::plugins() const
+QQmlDirPlugins QQmlTypeLoaderQmldirContent::plugins() const
{
return m_parser.plugins();
}
-QString QQmlTypeLoader::QmldirContent::pluginLocation() const
+QString QQmlTypeLoaderQmldirContent::pluginLocation() const
{
return m_location;
}
-bool QQmlTypeLoader::QmldirContent::designerSupported() const
+bool QQmlTypeLoaderQmldirContent::designerSupported() const
{
return m_parser.designerSupported();
}
@@ -1861,13 +1861,13 @@ bool QQmlTypeLoader::directoryExists(const QString &path)
/*!
-Return a QmldirContent for absoluteFilePath. The QmldirContent may be cached.
+Return a QQmlTypeLoaderQmldirContent for absoluteFilePath. The QQmlTypeLoaderQmldirContent may be cached.
\a filePath is a local file path.
It can also be a remote path for a remote directory import, but it will have been cached by now in this case.
*/
-const QQmlTypeLoader::QmldirContent *QQmlTypeLoader::qmldirContent(const QString &filePathIn)
+const QQmlTypeLoaderQmldirContent *QQmlTypeLoader::qmldirContent(const QString &filePathIn)
{
QUrl url(filePathIn); //May already contain http scheme
if (url.scheme() == QLatin1String("http") || url.scheme() == QLatin1String("https"))
@@ -1883,10 +1883,10 @@ const QQmlTypeLoader::QmldirContent *QQmlTypeLoader::qmldirContent(const QString
else
filePath = url.path();
- QmldirContent *qmldir;
- QmldirContent **val = m_importQmlDirCache.value(filePath);
+ QQmlTypeLoaderQmldirContent *qmldir;
+ QQmlTypeLoaderQmldirContent **val = m_importQmlDirCache.value(filePath);
if (!val) {
- qmldir = new QmldirContent;
+ qmldir = new QQmlTypeLoaderQmldirContent;
#define ERROR(description) { QQmlError e; e.setDescription(description); qmldir->setError(e); }
#define NOT_READABLE_ERROR QString(QLatin1String("module \"$$URI$$\" definition \"%1\" not readable"))
@@ -1916,12 +1916,12 @@ const QQmlTypeLoader::QmldirContent *QQmlTypeLoader::qmldirContent(const QString
void QQmlTypeLoader::setQmldirContent(const QString &url, const QString &content)
{
- QmldirContent *qmldir;
- QmldirContent **val = m_importQmlDirCache.value(url);
+ QQmlTypeLoaderQmldirContent *qmldir;
+ QQmlTypeLoaderQmldirContent **val = m_importQmlDirCache.value(url);
if (val) {
qmldir = *val;
} else {
- qmldir = new QmldirContent;
+ qmldir = new QQmlTypeLoaderQmldirContent;
m_importQmlDirCache.insert(url, qmldir);
}
@@ -2075,6 +2075,11 @@ bool QQmlTypeData::tryLoadFromDiskCache()
}
}
+ if (unit->data->flags & QV4::CompiledData::Unit::PendingTypeCompilation) {
+ restoreIR(unit);
+ return true;
+ }
+
m_compiledData = unit;
for (int i = 0, count = m_compiledData->objectCount(); i < count; ++i)
@@ -2397,6 +2402,15 @@ bool QQmlTypeData::loadFromSource()
return true;
}
+void QQmlTypeData::restoreIR(QQmlRefPointer<QV4::CompiledData::CompilationUnit> unit)
+{
+ m_document.reset(new QmlIR::Document(isDebugging()));
+ QmlIR::IRLoader loader(unit->data, m_document.data());
+ loader.load();
+ m_document->javaScriptCompilationUnit = unit;
+ continueLoadFromIR();
+}
+
void QQmlTypeData::continueLoadFromIR()
{
m_typeReferences.collectFromObjects(m_document->objects.constBegin(), m_document->objects.constEnd());
@@ -2602,7 +2616,7 @@ QQmlCompileError QQmlTypeData::buildTypeResolutionCaches(
QV4::CompiledData::ResolvedTypeReferenceMap *resolvedTypeCache
) const
{
- typeNameCache->adopt(new QQmlTypeNameCache);
+ typeNameCache->adopt(new QQmlTypeNameCache(m_importCache));
for (const QString &ns: m_namespaces)
(*typeNameCache)->add(ns);
@@ -2946,7 +2960,7 @@ void QQmlScriptBlob::done()
}
}
- m_scriptData->typeNameCache = new QQmlTypeNameCache();
+ m_scriptData->typeNameCache = new QQmlTypeNameCache(m_importCache);
QSet<QString> ns;
diff --git a/src/qml/qml/qqmltypeloader_p.h b/src/qml/qml/qqmltypeloader_p.h
index 14141db180..915b1bcc4c 100644
--- a/src/qml/qml/qqmltypeloader_p.h
+++ b/src/qml/qml/qqmltypeloader_p.h
@@ -218,6 +218,34 @@ private:
class QQmlTypeLoaderThread;
+class QQmlTypeLoaderQmldirContent
+{
+private:
+ friend class QQmlTypeLoader;
+ QQmlTypeLoaderQmldirContent();
+
+ void setContent(const QString &location, const QString &content);
+ void setError(const QQmlError &);
+
+public:
+ bool hasError() const;
+ QList<QQmlError> errors(const QString &uri) const;
+
+ QString typeNamespace() const;
+
+ QQmlDirComponents components() const;
+ QQmlDirScripts scripts() const;
+ QQmlDirPlugins plugins() const;
+
+ QString pluginLocation() const;
+
+ bool designerSupported() const;
+
+private:
+ QQmlDirParser m_parser;
+ QString m_location;
+};
+
class Q_AUTOTEST_EXPORT QQmlTypeLoader
{
Q_DECLARE_TR_FUNCTIONS(QQmlTypeLoader)
@@ -256,34 +284,6 @@ public:
QList<QQmlQmldirData *> m_qmldirs;
};
- class QmldirContent
- {
- private:
- friend class QQmlTypeLoader;
- QmldirContent();
-
- void setContent(const QString &location, const QString &content);
- void setError(const QQmlError &);
-
- public:
- bool hasError() const;
- QList<QQmlError> errors(const QString &uri) const;
-
- QString typeNamespace() const;
-
- QQmlDirComponents components() const;
- QQmlDirScripts scripts() const;
- QQmlDirPlugins plugins() const;
-
- QString pluginLocation() const;
-
- bool designerSupported() const;
-
- private:
- QQmlDirParser m_parser;
- QString m_location;
- };
-
QQmlTypeLoader(QQmlEngine *);
~QQmlTypeLoader();
@@ -298,7 +298,7 @@ public:
QString absoluteFilePath(const QString &path);
bool directoryExists(const QString &path);
- const QmldirContent *qmldirContent(const QString &filePath);
+ const QQmlTypeLoaderQmldirContent *qmldirContent(const QString &filePath);
void setQmldirContent(const QString &filePath, const QString &content);
void clearCache();
@@ -363,7 +363,7 @@ private:
typedef QHash<QUrl, QQmlQmldirData *> QmldirCache;
typedef QStringHash<bool> StringSet;
typedef QStringHash<StringSet*> ImportDirCache;
- typedef QStringHash<QmldirContent *> ImportQmlDirCache;
+ typedef QStringHash<QQmlTypeLoaderQmldirContent *> ImportQmlDirCache;
QQmlEngine *m_engine;
QQmlTypeLoaderThread *m_thread;
@@ -446,6 +446,7 @@ protected:
private:
bool tryLoadFromDiskCache();
bool loadFromSource();
+ void restoreIR(QQmlRefPointer<QV4::CompiledData::CompilationUnit> unit);
void continueLoadFromIR();
void resolveTypes();
QQmlCompileError buildTypeResolutionCaches(
diff --git a/src/qml/qml/qqmltypenamecache.cpp b/src/qml/qml/qqmltypenamecache.cpp
index c2098bc9a1..c8e2b92c29 100644
--- a/src/qml/qml/qqmltypenamecache.cpp
+++ b/src/qml/qml/qqmltypenamecache.cpp
@@ -43,7 +43,8 @@
QT_BEGIN_NAMESPACE
-QQmlTypeNameCache::QQmlTypeNameCache()
+QQmlTypeNameCache::QQmlTypeNameCache(const QQmlImports &importCache)
+ : m_imports(importCache)
{
}
@@ -70,6 +71,7 @@ void QQmlTypeNameCache::add(const QHashedString &name, int importedScriptIndex,
{
Import import;
import.scriptIndex = importedScriptIndex;
+ import.m_qualifier = name;
if (nameSpace.length() != 0) {
Import *i = m_namedImports.value(nameSpace);
@@ -94,6 +96,18 @@ QQmlTypeNameCache::Result QQmlTypeNameCache::query(const QHashedStringRef &name)
if (!result.isValid())
result = query(m_anonymousCompositeSingletons, name);
+ if (!result.isValid()) {
+ // Look up anonymous types from the imports of this document
+ QQmlImportNamespace *typeNamespace = 0;
+ QList<QQmlError> errors;
+ QQmlType *t = 0;
+ bool typeFound = m_imports.resolveType(name, &t, 0, 0, &typeNamespace, &errors);
+ if (typeFound) {
+ return Result(t);
+ }
+
+ }
+
return result;
}
@@ -109,6 +123,20 @@ QQmlTypeNameCache::Result QQmlTypeNameCache::query(const QHashedStringRef &name,
if (!result.isValid())
result = query(i->compositeSingletons, name);
+ if (!result.isValid()) {
+ // Look up types from the imports of this document
+ // ### it would be nice if QQmlImports allowed us to resolve a namespace
+ // first, and then types on it.
+ QString qualifiedTypeName = i->m_qualifier + QLatin1Char('.') + name.toString();
+ QQmlImportNamespace *typeNamespace = 0;
+ QList<QQmlError> errors;
+ QQmlType *t = 0;
+ bool typeFound = m_imports.resolveType(qualifiedTypeName, &t, 0, 0, &typeNamespace, &errors);
+ if (typeFound) {
+ return Result(t);
+ }
+ }
+
return result;
}
@@ -122,6 +150,19 @@ QQmlTypeNameCache::Result QQmlTypeNameCache::query(const QV4::String *name) cons
if (!result.isValid())
result = query(m_anonymousCompositeSingletons, name);
+ if (!result.isValid()) {
+ // Look up anonymous types from the imports of this document
+ QString typeName = name->toQStringNoThrow();
+ QQmlImportNamespace *typeNamespace = 0;
+ QList<QQmlError> errors;
+ QQmlType *t = 0;
+ bool typeFound = m_imports.resolveType(typeName, &t, 0, 0, &typeNamespace, &errors);
+ if (typeFound) {
+ return Result(t);
+ }
+
+ }
+
return result;
}
@@ -143,6 +184,20 @@ QQmlTypeNameCache::Result QQmlTypeNameCache::query(const QV4::String *name, cons
if (!r.isValid())
r = query(i->compositeSingletons, name);
+ if (!r.isValid()) {
+ // Look up types from the imports of this document
+ // ### it would be nice if QQmlImports allowed us to resolve a namespace
+ // first, and then types on it.
+ QString qualifiedTypeName = i->m_qualifier + QLatin1Char('.') + name->toQStringNoThrow();
+ QQmlImportNamespace *typeNamespace = 0;
+ QList<QQmlError> errors;
+ QQmlType *t = 0;
+ bool typeFound = m_imports.resolveType(qualifiedTypeName, &t, 0, 0, &typeNamespace, &errors);
+ if (typeFound) {
+ return Result(t);
+ }
+ }
+
return r;
}
diff --git a/src/qml/qml/qqmltypenamecache_p.h b/src/qml/qml/qqmltypenamecache_p.h
index 8a387bed5f..7cdcbe91b6 100644
--- a/src/qml/qml/qqmltypenamecache_p.h
+++ b/src/qml/qml/qqmltypenamecache_p.h
@@ -56,6 +56,7 @@
#include "qqmlmetatype_p.h"
#include <private/qhashedstring_p.h>
+#include <private/qqmlimport_p.h>
#include <QtCore/qvector.h>
@@ -66,7 +67,7 @@ class QQmlEngine;
class QQmlTypeNameCache : public QQmlRefCount
{
public:
- QQmlTypeNameCache();
+ QQmlTypeNameCache(const QQmlImports &imports);
virtual ~QQmlTypeNameCache();
inline bool isEmpty() const;
@@ -105,6 +106,9 @@ private:
// Or, imported compositeSingletons
QStringHash<QUrl> compositeSingletons;
+
+ // The qualifier of this import
+ QString m_qualifier;
};
template<typename Key>
@@ -112,6 +116,7 @@ private:
{
Import *i = imports.value(key);
if (i) {
+ Q_ASSERT(!i->m_qualifier.isEmpty());
if (i->scriptIndex != -1) {
return Result(i->scriptIndex);
} else {
@@ -151,6 +156,7 @@ private:
QMap<const Import *, QStringHash<Import> > m_namespacedImports;
QVector<QQmlTypeModuleVersion> m_anonymousImports;
QStringHash<QUrl> m_anonymousCompositeSingletons;
+ QQmlImports m_imports;
};
QQmlTypeNameCache::Result::Result()
diff --git a/src/qmldevtools/qmldevtools.pro b/src/qmldevtools/qmldevtools.pro
index acd5c9729b..ec5d73044f 100644
--- a/src/qmldevtools/qmldevtools.pro
+++ b/src/qmldevtools/qmldevtools.pro
@@ -18,5 +18,6 @@ include(../qml/parser/parser.pri)
include(../qml/jsruntime/jsruntime.pri)
include(../qml/compiler/compiler.pri)
include(../qml/memory/memory.pri)
+include(../qml/jit/jit.pri)
load(qt_module)
diff --git a/src/quick/items/qquickitemgrabresult.cpp b/src/quick/items/qquickitemgrabresult.cpp
index 07c2cb607c..12bcd43076 100644
--- a/src/quick/items/qquickitemgrabresult.cpp
+++ b/src/quick/items/qquickitemgrabresult.cpp
@@ -75,7 +75,7 @@ public:
void ensureImageInCache() const {
if (url.isEmpty() && !image.isNull()) {
- url.setScheme(QStringLiteral("ItemGrabber"));
+ url.setScheme(QQuickPixmap::itemGrabberScheme);
url.setPath(QVariant::fromValue(item.data()).toString());
static uint counter = 0;
url.setFragment(QString::number(++counter));
diff --git a/src/quick/items/qquicktext.cpp b/src/quick/items/qquicktext.cpp
index 965fb0102f..1720377046 100644
--- a/src/quick/items/qquicktext.cpp
+++ b/src/quick/items/qquicktext.cpp
@@ -74,7 +74,7 @@ Q_DECLARE_LOGGING_CATEGORY(DBG_HOVER_TRACE)
const QChar QQuickTextPrivate::elideChar = QChar(0x2026);
QQuickTextPrivate::QQuickTextPrivate()
- : elideLayout(0), textLine(0), lineWidth(0)
+ : fontInfo(font), elideLayout(0), textLine(0), lineWidth(0)
, color(0xFF000000), linkColor(0xFF0000FF), styleColor(0xFF000000)
, lineCount(1), multilengthEos(-1)
, elideMode(QQuickText::ElideNone), hAlign(QQuickText::AlignLeft), vAlign(QQuickText::AlignTop)
@@ -1018,6 +1018,17 @@ QRectF QQuickTextPrivate::setupTextLayout(qreal *const baseline)
implicitWidthValid = true;
implicitHeightValid = true;
+ QFontInfo scaledFontInfo(scaledFont);
+ if (fontInfo.weight() != scaledFontInfo.weight()
+ || fontInfo.pixelSize() != scaledFontInfo.pixelSize()
+ || fontInfo.italic() != scaledFontInfo.italic()
+ || !qFuzzyCompare(fontInfo.pointSizeF(), scaledFontInfo.pointSizeF())
+ || fontInfo.family() != scaledFontInfo.family()
+ || fontInfo.styleName() != scaledFontInfo.styleName()) {
+ fontInfo = scaledFontInfo;
+ emit q->fontInfoChanged();
+ }
+
if (eos != multilengthEos)
truncated = true;
@@ -2974,4 +2985,80 @@ void QQuickText::resetBottomPadding()
d->setBottomPadding(0, true);
}
+/*!
+ \qmlproperty string QtQuick::Text::fontInfo.family
+ \since 5.9
+
+ The family name of the font that has been resolved for the current font
+ and fontSizeMode.
+*/
+
+/*!
+ \qmlproperty string QtQuick::Text::fontInfo.styleName
+ \since 5.9
+
+ The style name of the font info that has been resolved for the current font
+ and fontSizeMode.
+*/
+
+/*!
+ \qmlproperty bool QtQuick::Text::fontInfo.bold
+ \since 5.9
+
+ The bold state of the font info that has been resolved for the current font
+ and fontSizeMode. This is true if the weight of the resolved font is bold or higher.
+*/
+
+/*!
+ \qmlproperty int QtQuick::Text::fontInfo.weight
+ \since 5.9
+
+ The weight of the font info that has been resolved for the current font
+ and fontSizeMode.
+*/
+
+/*!
+ \qmlproperty bool QtQuick::Text::fontInfo.italic
+ \since 5.9
+
+ The italic state of the font info that has been resolved for the current font
+ and fontSizeMode.
+*/
+
+/*!
+ \qmlproperty real QtQuick::Text::fontInfo.pointSize
+ \since 5.9
+
+ The pointSize of the font info that has been resolved for the current font
+ and fontSizeMode.
+*/
+
+/*!
+ \qmlproperty string QtQuick::Text::fontInfo.pixelSize
+ \since 5.9
+
+ The pixel size of the font info that has been resolved for the current font
+ and fontSizeMode.
+*/
+QJSValue QQuickText::fontInfo() const
+{
+ Q_D(const QQuickText);
+
+ QJSEngine *engine = qjsEngine(this);
+ if (!engine) {
+ qmlWarning(this) << "fontInfo: item has no JS engine";
+ return QJSValue();
+ }
+
+ QJSValue value = engine->newObject();
+ value.setProperty(QStringLiteral("family"), d->fontInfo.family());
+ value.setProperty(QStringLiteral("styleName"), d->fontInfo.styleName());
+ value.setProperty(QStringLiteral("bold"), d->fontInfo.bold());
+ value.setProperty(QStringLiteral("weight"), d->fontInfo.weight());
+ value.setProperty(QStringLiteral("italic"), d->fontInfo.italic());
+ value.setProperty(QStringLiteral("pointSize"), d->fontInfo.pointSizeF());
+ value.setProperty(QStringLiteral("pixelSize"), d->fontInfo.pixelSize());
+ return value;
+}
+
QT_END_NAMESPACE
diff --git a/src/quick/items/qquicktext_p.h b/src/quick/items/qquicktext_p.h
index c3d3906e7e..b190738cfb 100644
--- a/src/quick/items/qquicktext_p.h
+++ b/src/quick/items/qquicktext_p.h
@@ -98,6 +98,8 @@ class Q_QUICK_PRIVATE_EXPORT QQuickText : public QQuickImplicitSizeItem
Q_PROPERTY(qreal rightPadding READ rightPadding WRITE setRightPadding RESET resetRightPadding NOTIFY rightPaddingChanged REVISION 6)
Q_PROPERTY(qreal bottomPadding READ bottomPadding WRITE setBottomPadding RESET resetBottomPadding NOTIFY bottomPaddingChanged REVISION 6)
+ Q_PROPERTY(QJSValue fontInfo READ fontInfo NOTIFY fontInfoChanged REVISION 9)
+
public:
QQuickText(QQuickItem *parent=0);
~QQuickText();
@@ -248,6 +250,8 @@ public:
void setBottomPadding(qreal padding);
void resetBottomPadding();
+ QJSValue fontInfo() const;
+
Q_SIGNALS:
void textChanged(const QString &text);
void linkActivated(const QString &link);
@@ -280,6 +284,7 @@ Q_SIGNALS:
Q_REVISION(6) void leftPaddingChanged();
Q_REVISION(6) void rightPaddingChanged();
Q_REVISION(6) void bottomPaddingChanged();
+ Q_REVISION(9) void fontInfoChanged();
protected:
QQuickText(QQuickTextPrivate &dd, QQuickItem *parent = 0);
diff --git a/src/quick/items/qquicktext_p_p.h b/src/quick/items/qquicktext_p_p.h
index 0669bcf115..6456750359 100644
--- a/src/quick/items/qquicktext_p_p.h
+++ b/src/quick/items/qquicktext_p_p.h
@@ -122,6 +122,7 @@ public:
QString text;
QFont font;
QFont sourceFont;
+ QFontInfo fontInfo;
QTextLayout layout;
QTextLayout *elideLayout;
diff --git a/src/quick/items/qquickwindow.cpp b/src/quick/items/qquickwindow.cpp
index 83aff5e07e..6897240fbd 100644
--- a/src/quick/items/qquickwindow.cpp
+++ b/src/quick/items/qquickwindow.cpp
@@ -454,8 +454,13 @@ void QQuickWindowPrivate::renderSceneGraph(const QSize &size)
fboId = renderTargetId;
renderer->setDeviceRect(rect);
renderer->setViewportRect(rect);
- renderer->setProjectionMatrixToRect(QRect(QPoint(0, 0), rect.size()));
- renderer->setDevicePixelRatio(1);
+ if (QQuickRenderControl::renderWindowFor(q)) {
+ renderer->setProjectionMatrixToRect(QRect(QPoint(0, 0), size));
+ renderer->setDevicePixelRatio(devicePixelRatio);
+ } else {
+ renderer->setProjectionMatrixToRect(QRect(QPoint(0, 0), rect.size()));
+ renderer->setDevicePixelRatio(1);
+ }
} else {
QRect rect(QPoint(0, 0), devicePixelRatio * size);
renderer->setDeviceRect(rect);
diff --git a/src/quick/scenegraph/coreapi/qsgbatchrenderer_p.h b/src/quick/scenegraph/coreapi/qsgbatchrenderer_p.h
index 322192944b..2c0f8667e8 100644
--- a/src/quick/scenegraph/coreapi/qsgbatchrenderer_p.h
+++ b/src/quick/scenegraph/coreapi/qsgbatchrenderer_p.h
@@ -440,9 +440,7 @@ struct Batch
mutable uint uploadedThisFrame : 1; // solely for debugging purposes
Buffer vbo;
-#ifdef QSG_SEPARATE_INDEX_BUFFER
Buffer ibo;
-#endif
QDataBuffer<DrawSet> drawSets;
};
@@ -744,9 +742,7 @@ private:
ClipType m_currentClipType;
QDataBuffer<char> m_vertexUploadPool;
-#ifdef QSG_SEPARATE_INDEX_BUFFER
QDataBuffer<char> m_indexUploadPool;
-#endif
// For minimal OpenGL core profile support
QOpenGLVertexArrayObject *m_vao;
@@ -766,10 +762,7 @@ Batch *Renderer::newBatch()
m_batchPool.resize(size - 1);
} else {
b = new Batch();
- memset(&b->vbo, 0, sizeof(Buffer));
-#ifdef QSG_SEPARATE_INDEX_BUFFER
- memset(&b->ibo, 0, sizeof(Buffer));
-#endif
+ memset(&b->vbo, 0, sizeof(Buffer) * 2); // Clear VBO & IBO
}
b->init();
return b;
diff --git a/src/quick/scenegraph/qsgcontext_p.h b/src/quick/scenegraph/qsgcontext_p.h
index 6b9db105e7..2f5d5790ee 100644
--- a/src/quick/scenegraph/qsgcontext_p.h
+++ b/src/quick/scenegraph/qsgcontext_p.h
@@ -88,6 +88,7 @@ class QSGRectangleNode;
class QSGImageNode;
class QSGNinePatchNode;
class QSGSpriteNode;
+class QSGRenderContext;
Q_DECLARE_LOGGING_CATEGORY(QSG_LOG_TIME_RENDERLOOP)
Q_DECLARE_LOGGING_CATEGORY(QSG_LOG_TIME_COMPILATION)
@@ -98,6 +99,54 @@ Q_DECLARE_LOGGING_CATEGORY(QSG_LOG_TIME_RENDERER)
Q_DECLARE_LOGGING_CATEGORY(QSG_LOG_INFO)
Q_DECLARE_LOGGING_CATEGORY(QSG_LOG_RENDERLOOP)
+class Q_QUICK_PRIVATE_EXPORT QSGContext : public QObject
+{
+ Q_OBJECT
+
+public:
+ enum AntialiasingMethod {
+ UndecidedAntialiasing,
+ VertexAntialiasing,
+ MsaaAntialiasing
+ };
+
+ explicit QSGContext(QObject *parent = 0);
+ virtual ~QSGContext();
+
+ virtual void renderContextInitialized(QSGRenderContext *renderContext);
+ virtual void renderContextInvalidated(QSGRenderContext *renderContext);
+ virtual QSGRenderContext *createRenderContext() = 0;
+
+ QSGInternalRectangleNode *createInternalRectangleNode(const QRectF &rect, const QColor &c);
+ virtual QSGInternalRectangleNode *createInternalRectangleNode() = 0;
+ virtual QSGInternalImageNode *createInternalImageNode() = 0;
+ virtual QSGPainterNode *createPainterNode(QQuickPaintedItem *item) = 0;
+ virtual QSGGlyphNode *createGlyphNode(QSGRenderContext *rc, bool preferNativeGlyphNode) = 0;
+ virtual QSGLayer *createLayer(QSGRenderContext *renderContext) = 0;
+ virtual QSGGuiThreadShaderEffectManager *createGuiThreadShaderEffectManager();
+ virtual QSGShaderEffectNode *createShaderEffectNode(QSGRenderContext *renderContext,
+ QSGGuiThreadShaderEffectManager *mgr);
+#if QT_CONFIG(quick_sprite)
+ virtual QSGSpriteNode *createSpriteNode() = 0;
+#endif
+ virtual QAnimationDriver *createAnimationDriver(QObject *parent);
+
+ virtual QSize minimumFBOSize() const;
+ virtual QSurfaceFormat defaultSurfaceFormat() const = 0;
+
+ virtual QSGRendererInterface *rendererInterface(QSGRenderContext *renderContext);
+
+ virtual QSGRectangleNode *createRectangleNode() = 0;
+ virtual QSGImageNode *createImageNode() = 0;
+ virtual QSGNinePatchNode *createNinePatchNode() = 0;
+
+ static QSGContext *createDefaultContext();
+ static QQuickTextureFactory *createTextureFactoryFromImage(const QImage &image);
+ static QSGRenderLoop *createWindowManager();
+
+ static void setBackend(const QString &backend);
+};
+
class Q_QUICK_PRIVATE_EXPORT QSGRenderContext : public QObject
{
Q_OBJECT
@@ -150,55 +199,6 @@ protected:
QSet<QFontEngine *> m_fontEnginesToClean;
};
-
-class Q_QUICK_PRIVATE_EXPORT QSGContext : public QObject
-{
- Q_OBJECT
-
-public:
- enum AntialiasingMethod {
- UndecidedAntialiasing,
- VertexAntialiasing,
- MsaaAntialiasing
- };
-
- explicit QSGContext(QObject *parent = 0);
- virtual ~QSGContext();
-
- virtual void renderContextInitialized(QSGRenderContext *renderContext);
- virtual void renderContextInvalidated(QSGRenderContext *renderContext);
- virtual QSGRenderContext *createRenderContext() = 0;
-
- QSGInternalRectangleNode *createInternalRectangleNode(const QRectF &rect, const QColor &c);
- virtual QSGInternalRectangleNode *createInternalRectangleNode() = 0;
- virtual QSGInternalImageNode *createInternalImageNode() = 0;
- virtual QSGPainterNode *createPainterNode(QQuickPaintedItem *item) = 0;
- virtual QSGGlyphNode *createGlyphNode(QSGRenderContext *rc, bool preferNativeGlyphNode) = 0;
- virtual QSGLayer *createLayer(QSGRenderContext *renderContext) = 0;
- virtual QSGGuiThreadShaderEffectManager *createGuiThreadShaderEffectManager();
- virtual QSGShaderEffectNode *createShaderEffectNode(QSGRenderContext *renderContext,
- QSGGuiThreadShaderEffectManager *mgr);
-#if QT_CONFIG(quick_sprite)
- virtual QSGSpriteNode *createSpriteNode() = 0;
-#endif
- virtual QAnimationDriver *createAnimationDriver(QObject *parent);
-
- virtual QSize minimumFBOSize() const;
- virtual QSurfaceFormat defaultSurfaceFormat() const = 0;
-
- virtual QSGRendererInterface *rendererInterface(QSGRenderContext *renderContext);
-
- virtual QSGRectangleNode *createRectangleNode() = 0;
- virtual QSGImageNode *createImageNode() = 0;
- virtual QSGNinePatchNode *createNinePatchNode() = 0;
-
- static QSGContext *createDefaultContext();
- static QQuickTextureFactory *createTextureFactoryFromImage(const QImage &image);
- static QSGRenderLoop *createWindowManager();
-
- static void setBackend(const QString &backend);
-};
-
QT_END_NAMESPACE
#endif // QSGCONTEXT_H
diff --git a/src/quick/scenegraph/scenegraph.pri b/src/quick/scenegraph/scenegraph.pri
index edf4aa08c5..38c3b8dd85 100644
--- a/src/quick/scenegraph/scenegraph.pri
+++ b/src/quick/scenegraph/scenegraph.pri
@@ -1,4 +1,4 @@
-# DEFINES += QSG_SEPARATE_INDEX_BUFFER
+DEFINES += QSG_SEPARATE_INDEX_BUFFER
# DEFINES += QSG_DISTANCEFIELD_CACHE_DEBUG
# Core API
diff --git a/src/quick/util/qquickpixmapcache.cpp b/src/quick/util/qquickpixmapcache.cpp
index 1c6b2afb54..be6d4d18bd 100644
--- a/src/quick/util/qquickpixmapcache.cpp
+++ b/src/quick/util/qquickpixmapcache.cpp
@@ -84,6 +84,7 @@
QT_BEGIN_NAMESPACE
+const QLatin1String QQuickPixmap::itemGrabberScheme = QLatin1String("itemgrabber");
#ifndef QT_NO_DEBUG
static const bool qsg_leak_check = !qEnvironmentVariableIsEmpty("QML_LEAK_CHECK");
@@ -1462,8 +1463,15 @@ void QQuickPixmap::load(QQmlEngine *engine, const QUrl &url, const QSize &reques
QHash<QQuickPixmapKey, QQuickPixmapData *>::Iterator iter = store->m_cache.end();
// If Cache is disabled, the pixmap will always be loaded, even if there is an existing
- // cached version.
- if (options & QQuickPixmap::Cache)
+ // cached version. Unless it's an itemgrabber url, since the cache is used to pass
+ // the result between QQuickItemGrabResult and QQuickImage.
+ if (url.scheme() == itemGrabberScheme) {
+ QSize dummy;
+ if (requestSize != dummy)
+ qWarning() << "Ignoring sourceSize request for image url that came from grabToImage. Use the targetSize parameter of the grabToImage() function instead.";
+ const QQuickPixmapKey grabberKey = { &url, &dummy, QQuickImageProviderOptions() };
+ iter = store->m_cache.find(grabberKey);
+ } else if (options & QQuickPixmap::Cache)
iter = store->m_cache.find(key);
if (iter == store->m_cache.end()) {
diff --git a/src/quick/util/qquickpixmapcache_p.h b/src/quick/util/qquickpixmapcache_p.h
index f7cdfa7d07..a867771755 100644
--- a/src/quick/util/qquickpixmapcache_p.h
+++ b/src/quick/util/qquickpixmapcache_p.h
@@ -177,6 +177,8 @@ public:
static void purgeCache();
static bool isCached(const QUrl &url, const QSize &requestSize);
+ static const QLatin1String itemGrabberScheme;
+
private:
Q_DISABLE_COPY(QQuickPixmap)
QQuickPixmapData *d;
diff --git a/src/src.pro b/src/src.pro
index 21925c9330..c2a58c3757 100644
--- a/src/src.pro
+++ b/src/src.pro
@@ -21,5 +21,3 @@ SUBDIRS += \
qmldevtools
qtConfig(localserver):!contains(QT_CONFIG, no-qml-debug): SUBDIRS += qmldebug
-
-qmldevtools.CONFIG = host_build
diff --git a/tests/auto/auto.pro b/tests/auto/auto.pro
index f25742fb14..1e80f1bf65 100644
--- a/tests/auto/auto.pro
+++ b/tests/auto/auto.pro
@@ -18,6 +18,4 @@ qtHaveModule(gui):qtConfig(opengl(es1|es2)?) {
# console applications not supported
uikit: SUBDIRS -= qmltest
-qmldevtools.CONFIG = host_build
-
installed_cmake.depends = cmake
diff --git a/tests/auto/qml/qml.pro b/tests/auto/qml/qml.pro
index 68a2eace19..04e4b04114 100644
--- a/tests/auto/qml/qml.pro
+++ b/tests/auto/qml/qml.pro
@@ -73,7 +73,7 @@ qtHaveModule(widgets) {
SUBDIRS += $$PUBLICTESTS \
qqmlextensionplugin
SUBDIRS += $$METATYPETESTS
-!uikit:!winrt { # no QProcess on uikit/winrt
+!qtConfig(process) {
!contains(QT_CONFIG, no-qml-debug): SUBDIRS += debugger
SUBDIRS += qmllint qmlplugindump
}
diff --git a/tests/auto/qml/qqmlecmascript/tst_qqmlecmascript.cpp b/tests/auto/qml/qqmlecmascript/tst_qqmlecmascript.cpp
index 89dac33671..1f0248c258 100644
--- a/tests/auto/qml/qqmlecmascript/tst_qqmlecmascript.cpp
+++ b/tests/auto/qml/qqmlecmascript/tst_qqmlecmascript.cpp
@@ -1,5 +1,6 @@
/****************************************************************************
**
+** Copyright (C) 2017 Crimson AS <info@crimson.no>
** Copyright (C) 2016 The Qt Company Ltd.
** Contact: https://www.qt.io/licensing/
**
@@ -333,6 +334,8 @@ private slots:
void stringify_qtbug_50592();
void instanceof_data();
void instanceof();
+ void constkw_data();
+ void constkw();
private:
// static void propertyVarWeakRefCallback(v8::Persistent<v8::Value> object, void* parameter);
@@ -8183,6 +8186,60 @@ void tst_qqmlecmascript::instanceof()
}
}
+void tst_qqmlecmascript::constkw_data()
+{
+ QTest::addColumn<QString>("sourceCode");
+ QTest::addColumn<bool>("exceptionExpected");
+ QTest::addColumn<QVariant>("expectedValue");
+
+ QTest::newRow("simpleconst")
+ << "const v = 5\n"
+ "v\n"
+ << false
+ << QVariant(5);
+ QTest::newRow("twoconst")
+ << "const v = 5, i = 10\n"
+ "v + i\n"
+ << false
+ << QVariant(15);
+ QTest::newRow("constandvar")
+ << "const v = 5\n"
+ "var i = 20\n"
+ "v + i\n"
+ << false
+ << QVariant(25);
+
+ // error cases
+ QTest::newRow("const-no-initializer")
+ << "const v\n"
+ << true
+ << QVariant("SyntaxError: Missing initializer in const declaration");
+ QTest::newRow("const-no-initializer-comma")
+ << "const v = 1, i\n"
+ << true
+ << QVariant("SyntaxError: Missing initializer in const declaration");
+}
+
+void tst_qqmlecmascript::constkw()
+{
+ QFETCH(QString, sourceCode);
+ QFETCH(bool, exceptionExpected);
+ QFETCH(QVariant, expectedValue);
+
+ QJSEngine engine;
+ QJSValue ret = engine.evaluate(sourceCode);
+
+ if (!exceptionExpected) {
+ QVERIFY2(!ret.isError(), qPrintable(ret.toString()));
+ QCOMPARE(ret.toVariant(), expectedValue);
+ } else {
+ QVERIFY2(ret.isError(), qPrintable(ret.toString()));
+ QCOMPARE(ret.toString(), expectedValue.toString());
+ }
+}
+
+
+
QTEST_MAIN(tst_qqmlecmascript)
#include "tst_qqmlecmascript.moc"
diff --git a/tests/auto/qml/qqmllanguage/data/compositeTypeByName_anon_qmldir.qml b/tests/auto/qml/qqmllanguage/data/compositeTypeByName_anon_qmldir.qml
new file mode 100644
index 0000000000..5ffdc26096
--- /dev/null
+++ b/tests/auto/qml/qqmllanguage/data/compositeTypeByName_anon_qmldir.qml
@@ -0,0 +1,9 @@
+import QtQuick 2.6
+import "simpleimportByName"
+
+Item {
+ Component.onCompleted: {
+ console.warn(SimpleType)
+ }
+}
+
diff --git a/tests/auto/qml/qqmllanguage/data/compositeTypeByName_named_qmldir.qml b/tests/auto/qml/qqmllanguage/data/compositeTypeByName_named_qmldir.qml
new file mode 100644
index 0000000000..c446eae84c
--- /dev/null
+++ b/tests/auto/qml/qqmllanguage/data/compositeTypeByName_named_qmldir.qml
@@ -0,0 +1,9 @@
+import QtQuick 2.6
+import "simpleimportByName" as ImportName
+
+Item {
+ Component.onCompleted: {
+ console.warn(ImportName.SimpleType)
+ }
+}
+
diff --git a/tests/auto/qml/qqmllanguage/data/quickTypeByName_anon.qml b/tests/auto/qml/qqmllanguage/data/quickTypeByName_anon.qml
new file mode 100644
index 0000000000..abe750db33
--- /dev/null
+++ b/tests/auto/qml/qqmllanguage/data/quickTypeByName_anon.qml
@@ -0,0 +1,8 @@
+import QtQuick 2.6
+
+Item {
+ Component.onCompleted: {
+ console.warn(Item)
+ }
+}
+
diff --git a/tests/auto/qml/qqmllanguage/data/quickTypeByName_named.qml b/tests/auto/qml/qqmllanguage/data/quickTypeByName_named.qml
new file mode 100644
index 0000000000..397d4f42f0
--- /dev/null
+++ b/tests/auto/qml/qqmllanguage/data/quickTypeByName_named.qml
@@ -0,0 +1,8 @@
+import QtQuick 2.6 as Quick
+
+Quick.Item {
+ Quick.Component.onCompleted: {
+ console.warn(Quick.Item)
+ }
+}
+
diff --git a/tests/auto/qml/qqmllanguage/data/simpleimportByName/SimpleType.qml b/tests/auto/qml/qqmllanguage/data/simpleimportByName/SimpleType.qml
new file mode 100644
index 0000000000..4772dde8f0
--- /dev/null
+++ b/tests/auto/qml/qqmllanguage/data/simpleimportByName/SimpleType.qml
@@ -0,0 +1,4 @@
+import QtQuick 2.6
+
+MouseArea {
+}
diff --git a/tests/auto/qml/qqmllanguage/data/simpleimportByName/qmldir b/tests/auto/qml/qqmllanguage/data/simpleimportByName/qmldir
new file mode 100644
index 0000000000..80df37d0e6
--- /dev/null
+++ b/tests/auto/qml/qqmllanguage/data/simpleimportByName/qmldir
@@ -0,0 +1 @@
+SimpleType 1.0 SimpleType.qml
diff --git a/tests/auto/qml/qqmllanguage/tst_qqmllanguage.cpp b/tests/auto/qml/qqmllanguage/tst_qqmllanguage.cpp
index f586f7d429..c0500afddd 100644
--- a/tests/auto/qml/qqmllanguage/tst_qqmllanguage.cpp
+++ b/tests/auto/qml/qqmllanguage/tst_qqmllanguage.cpp
@@ -257,6 +257,9 @@ private slots:
void defaultListProperty();
void namespacedPropertyTypes();
+ void qmlTypeCanBeResolvedByName_data();
+ void qmlTypeCanBeResolvedByName();
+
private:
QQmlEngine engine;
QStringList defaultImportPathList;
@@ -4250,6 +4253,32 @@ void tst_qqmllanguage::namespacedPropertyTypes()
QVERIFY(!o.isNull());
}
+void tst_qqmllanguage::qmlTypeCanBeResolvedByName_data()
+{
+ QTest::addColumn<QUrl>("componentUrl");
+
+ // Built-in C++ types
+ QTest::newRow("C++ - Anonymous") << testFileUrl("quickTypeByName_anon.qml");
+ QTest::newRow("C++ - Named") << testFileUrl("quickTypeByName_named.qml");
+
+ // Composite types with a qmldir
+ QTest::newRow("QML - Anonymous - qmldir") << testFileUrl("compositeTypeByName_anon_qmldir.qml");
+ QTest::newRow("QML - Named - qmldir") << testFileUrl("compositeTypeByName_named_qmldir.qml");
+}
+
+void tst_qqmllanguage::qmlTypeCanBeResolvedByName()
+{
+ QFETCH(QUrl, componentUrl);
+
+ QQmlEngine engine;
+ QQmlComponent component(&engine, componentUrl);
+ VERIFY_ERRORS(0);
+ QTest::ignoreMessage(QtMsgType::QtWarningMsg, "[object Object]"); // a bit crude, but it will do
+
+ QScopedPointer<QObject> o(component.create());
+ QVERIFY(!o.isNull());
+}
+
QTEST_MAIN(tst_qqmllanguage)
#include "tst_qqmllanguage.moc"
diff --git a/tests/auto/qmltest/BLACKLIST b/tests/auto/qmltest/BLACKLIST
index fd796fcdb4..c38347b42a 100644
--- a/tests/auto/qmltest/BLACKLIST
+++ b/tests/auto/qmltest/BLACKLIST
@@ -9,5 +9,3 @@ linux
linux
[ListView::test_listInteractiveCurrentIndexEnforce]
linux
-[mouserelease::test_mouseDrag]
-rhel-7.2
diff --git a/tests/auto/qmltest/events/tst_drag.qml b/tests/auto/qmltest/events/tst_drag.qml
index ffdc94167c..ae77247a41 100644
--- a/tests/auto/qmltest/events/tst_drag.qml
+++ b/tests/auto/qmltest/events/tst_drag.qml
@@ -117,9 +117,9 @@ Rectangle{
name:"mouserelease"
when:windowShown
function test_mouseDrag() {
- mouseDrag(container, 10, 10, 20, 30);
- compare(container.x, 20 - util.dragThreshold - 1);
- compare(container.y, 30 - util.dragThreshold - 1);
+ mouseDrag(container, 10, 10, util.dragThreshold * 2, util.dragThreshold * 3);
+ compare(container.x, util.dragThreshold - 1);
+ compare(container.y, util.dragThreshold * 2 - 1);
}
function test_doSomethingWhileDragging() {
diff --git a/tests/auto/qmltest/itemgrabber/tst_itemgrabber.qml b/tests/auto/qmltest/itemgrabber/tst_itemgrabber.qml
index 022e98a202..a80814d6de 100644
--- a/tests/auto/qmltest/itemgrabber/tst_itemgrabber.qml
+++ b/tests/auto/qmltest/itemgrabber/tst_itemgrabber.qml
@@ -37,8 +37,9 @@ Item {
TestCase {
id: testCase
name: "item-grabber"
- when: imageOnDisk.ready && imageOnDiskSmall.ready && imageInCache.ready && imageInCacheSmall.ready
- function test_endresult() {
+ when: imageOnDisk.ready && imageOnDiskSmall.ready
+
+ function test_endresult_disk() {
var image = grabImage(root);
// imageOnDisk at (0, 0) - (100x100)
@@ -52,6 +53,40 @@ Item {
compare(imageOnDiskSmall.height, 50);
verify(image.pixel(100, 0) === Qt.rgba(1, 0, 0, 1));
verify(image.pixel(149, 49) === Qt.rgba(0, 0, 1, 1));
+ }
+
+ function test_endresult_cache_data() {
+ return [
+ { cache: true, sourceSize: Qt.size(-1, -1), fillMode: Image.Stretch },
+ { cache: true, sourceSize: Qt.size(-1, -1), fillMode: Image.PreserveAspectFit },
+ { cache: true, sourceSize: Qt.size(-1, -1), fillMode: Image.PreserveAspectCrop },
+ { cache: true, sourceSize: Qt.size(10, 10), fillMode: Image.Stretch },
+ { cache: true, sourceSize: Qt.size(10, 10), fillMode: Image.PreserveAspectFit },
+ { cache: true, sourceSize: Qt.size(10, 10), fillMode: Image.PreserveAspectCrop },
+ { cache: false, sourceSize: Qt.size(-1, -1), fillMode: Image.Stretch },
+ { cache: false, sourceSize: Qt.size(-1, -1), fillMode: Image.PreserveAspectFit },
+ { cache: false, sourceSize: Qt.size(-1, -1), fillMode: Image.PreserveAspectCrop },
+ { cache: false, sourceSize: Qt.size(10, 10), fillMode: Image.Stretch },
+ { cache: false, sourceSize: Qt.size(10, 10), fillMode: Image.PreserveAspectFit },
+ { cache: false, sourceSize: Qt.size(10, 10), fillMode: Image.PreserveAspectCrop },
+ ];
+ }
+
+ function test_endresult_cache(data) {
+ imageInCache.cache = data.cache;
+ imageInCache.sourceSize = data.sourceSize;
+ imageInCache.fillMode = data.fillMode;
+ imageInCacheSmall.cache = data.cache;
+ imageInCacheSmall.sourceSize = data.sourceSize;
+ imageInCacheSmall.fillMode = data.fillMode;
+
+ box.grabToImage(imageInCache.handleGrab);
+ box.grabToImage(imageInCacheSmall.handleGrab, Qt.size(50, 50));
+
+ tryCompare(imageInCache, "ready", true);
+ tryCompare(imageInCacheSmall, "ready", true);
+
+ var image = grabImage(root);
// imageInCache at (0, 100) - 100x100
compare(imageInCache.width, 100);
@@ -72,8 +107,6 @@ Item {
onWindowShownChanged: {
box.grabToImage(imageOnDisk.handleGrab);
box.grabToImage(imageOnDiskSmall.handleGrab, Qt.size(50, 50));
- box.grabToImage(imageInCache.handleGrab);
- box.grabToImage(imageInCacheSmall.handleGrab, Qt.size(50, 50));
}
}
diff --git a/tests/auto/quick/qquicktext/data/fontInfo.qml b/tests/auto/quick/qquicktext/data/fontInfo.qml
new file mode 100644
index 0000000000..25f924029f
--- /dev/null
+++ b/tests/auto/quick/qquicktext/data/fontInfo.qml
@@ -0,0 +1,24 @@
+import QtQuick 2.9
+
+Item {
+ Text {
+ id: main
+ objectName: "main"
+ width: 500
+ height: 500
+ text: "Meaningless text"
+ font.pixelSize: 1000
+ fontSizeMode: Text.Fit
+ }
+
+ Text {
+ objectName: "copy"
+ text: main.text
+ width: main.width
+ height: main.height
+
+ font.family: main.fontInfo.family
+ font.pixelSize: main.fontInfo.pixelSize
+ }
+}
+
diff --git a/tests/auto/quick/qquicktext/tst_qquicktext.cpp b/tests/auto/quick/qquicktext/tst_qquicktext.cpp
index 034ea4aec8..f741062d42 100644
--- a/tests/auto/quick/qquicktext/tst_qquicktext.cpp
+++ b/tests/auto/quick/qquicktext/tst_qquicktext.cpp
@@ -154,6 +154,8 @@ private slots:
void hAlignWidthDependsOnImplicitWidth_data();
void hAlignWidthDependsOnImplicitWidth();
+ void fontInfo();
+
private:
QStringList standard;
QStringList richText;
@@ -4253,6 +4255,23 @@ void tst_qquicktext::hAlignWidthDependsOnImplicitWidth()
QCOMPARE(numberOfNonWhitePixels(0, rectX - 1, image), 0);
}
+void tst_qquicktext::fontInfo()
+{
+ QQmlComponent component(&engine, testFile("fontInfo.qml"));
+
+ QScopedPointer<QObject> object(component.create());
+ QObject *root = object.data();
+
+ QQuickText *main = root->findChild<QQuickText *>("main");
+ QVERIFY(main);
+ QCOMPARE(main->font().pixelSize(), 1000);
+
+ QQuickText *copy = root->findChild<QQuickText *>("copy");
+ QVERIFY(copy);
+ QCOMPARE(copy->font().family(), QFontInfo(QFont()).family());
+ QVERIFY(copy->font().pixelSize() < 1000);
+}
+
QTEST_MAIN(tst_qquicktext)
#include "tst_qquicktext.moc"
diff --git a/tools/qmlcachegen/qmlcache.prf b/tools/qmlcachegen/qmlcache.prf
new file mode 100644
index 0000000000..fed9f0d2f3
--- /dev/null
+++ b/tools/qmlcachegen/qmlcache.prf
@@ -0,0 +1,12 @@
+qtPrepareTool(QML_CACHEGEN, qmlcachegen)
+
+!isEmpty(QT_TARGET_ARCH):QML_CACHEGEN_ARCH=$$QT_TARGET_ARCH
+else:QML_CACHEGEN_ARCH=$$QT_ARCH
+
+qmlcachegen.input = QML_FILES
+qmlcachegen.output = ${QMAKE_FILE_IN}c
+qmlcachegen.commands = $$QML_CACHEGEN --target-architecture=$$QML_CACHEGEN_ARCH ${QMAKE_FILE_IN}
+qmlcachegen.name = Generate QML Cache ${QMAKE_FILE_IN}
+qmlcachegen.variable_out = AUX_QML_FILES
+
+QMAKE_EXTRA_COMPILERS += qmlcachegen
diff --git a/tools/qmlcachegen/qmlcachegen.cpp b/tools/qmlcachegen/qmlcachegen.cpp
new file mode 100644
index 0000000000..4b902eda0f
--- /dev/null
+++ b/tools/qmlcachegen/qmlcachegen.cpp
@@ -0,0 +1,309 @@
+/****************************************************************************
+**
+** Copyright (C) 2016 The Qt Company Ltd.
+** Contact: https://www.qt.io/licensing/
+**
+** This file is part of the QtQml module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:GPL-EXCEPT$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and The Qt Company. For licensing terms
+** and conditions see https://www.qt.io/terms-conditions. For further
+** information use the contact form at https://www.qt.io/contact-us.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3 as published by the Free Software
+** Foundation with exceptions as appearing in the file LICENSE.GPL3-EXCEPT
+** included in the packaging of this file. Please review the following
+** information to ensure the GNU General Public License requirements will
+** be met: https://www.gnu.org/licenses/gpl-3.0.html.
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+
+#include <QCoreApplication>
+#include <QStringList>
+#include <QCommandLineParser>
+#include <QFile>
+#include <QFileInfo>
+#include <QDateTime>
+
+#include <private/qqmlirbuilder_p.h>
+#include <private/qv4isel_moth_p.h>
+#include <private/qqmljsparser_p.h>
+
+QT_BEGIN_NAMESPACE
+extern Q_CORE_EXPORT QBasicAtomicInt qt_qhash_seed;
+
+namespace QV4 { namespace JIT {
+Q_QML_EXPORT QV4::EvalISelFactory *createISelForArchitecture(const QString &architecture);
+} }
+
+QT_END_NAMESPACE
+
+struct Error
+{
+ QString message;
+ void print();
+ Error augment(const QString &contextErrorMessage) const;
+};
+
+void Error::print()
+{
+ fprintf(stderr, "%s\n", qPrintable(message));
+}
+
+Error Error::augment(const QString &contextErrorMessage) const
+{
+ Error augmented;
+ augmented.message = contextErrorMessage + message;
+ return augmented;
+}
+
+QString diagnosticErrorMessage(const QString &fileName, const QQmlJS::DiagnosticMessage &m)
+{
+ QString message;
+ message = fileName + QLatin1Char(':') + QString::number(m.loc.startLine) + QLatin1Char(':');
+ if (m.loc.startColumn > 0)
+ message += QString::number(m.loc.startColumn) + QLatin1Char(':');
+
+ if (m.isError())
+ message += QLatin1String(" error: ");
+ else
+ message += QLatin1String(" warning: ");
+ message += m.message;
+ return message;
+}
+
+static bool compileQmlFile(const QString &inputFileName, QV4::EvalISelFactory *iselFactory, Error *error)
+{
+ QmlIR::Document irDocument(/*debugMode*/false);
+
+ QString sourceCode;
+ {
+ QFile f(inputFileName);
+ if (!f.open(QIODevice::ReadOnly)) {
+ error->message = QLatin1String("Error opening ") + inputFileName + QLatin1Char(':') + f.errorString();
+ return false;
+ }
+ sourceCode = QString::fromUtf8(f.readAll());
+ if (f.error() != QFileDevice::NoError) {
+ error->message = QLatin1String("Error reading from ") + inputFileName + QLatin1Char(':') + f.errorString();
+ return false;
+ }
+ irDocument.jsModule.sourceTimeStamp = QFileInfo(f).lastModified().toMSecsSinceEpoch();
+ }
+
+ {
+ QSet<QString> illegalNames; // ####
+ QmlIR::IRBuilder irBuilder(illegalNames);
+ if (!irBuilder.generateFromQml(sourceCode, inputFileName, &irDocument)) {
+ for (const QQmlJS::DiagnosticMessage &parseError: qAsConst(irBuilder.errors)) {
+ if (!error->message.isEmpty())
+ error->message += QLatin1Char('\n');
+ error->message += diagnosticErrorMessage(inputFileName, parseError);
+ }
+ return false;
+ }
+ }
+
+ {
+ QmlIR::JSCodeGen v4CodeGen(inputFileName, irDocument.code, &irDocument.jsModule, &irDocument.jsParserEngine, irDocument.program, /*import cache*/0, &irDocument.jsGenerator.stringTable);
+ for (QmlIR::Object *object: qAsConst(irDocument.objects)) {
+ if (object->functionsAndExpressions->count == 0)
+ continue;
+ QList<QmlIR::CompiledFunctionOrExpression> functionsToCompile;
+ for (QmlIR::CompiledFunctionOrExpression *foe = object->functionsAndExpressions->first; foe; foe = foe->next) {
+ foe->disableAcceleratedLookups = true;
+ functionsToCompile << *foe;
+ }
+ const QVector<int> runtimeFunctionIndices = v4CodeGen.generateJSCodeForFunctionsAndBindings(functionsToCompile);
+ QList<QQmlJS::DiagnosticMessage> jsErrors = v4CodeGen.errors();
+ if (!jsErrors.isEmpty()) {
+ for (const QQmlJS::DiagnosticMessage &e: qAsConst(jsErrors)) {
+ if (!error->message.isEmpty())
+ error->message += QLatin1Char('\n');
+ error->message += diagnosticErrorMessage(inputFileName, e);
+ }
+ return false;
+ }
+
+ QQmlJS::MemoryPool *pool = irDocument.jsParserEngine.pool();
+ object->runtimeFunctionIndices.allocate(pool, runtimeFunctionIndices);
+ }
+
+ QmlIR::QmlUnitGenerator generator;
+
+ // ### translation binding simplification
+
+ QScopedPointer<QV4::EvalInstructionSelection> isel(iselFactory->create(/*engine*/nullptr, /*executable allocator*/nullptr, &irDocument.jsModule, &irDocument.jsGenerator));
+ // Disable lookups in non-standalone (aka QML) mode
+ isel->setUseFastLookups(false);
+ irDocument.javaScriptCompilationUnit = isel->compile(/*generate unit*/false);
+ // ###
+ QV4::CompiledData::ResolvedTypeReferenceMap dummyDependencies;
+ QV4::CompiledData::Unit *unit = generator.generate(irDocument, /*engine*/nullptr, dummyDependencies);
+ unit->flags |= QV4::CompiledData::Unit::StaticData;
+ unit->flags |= QV4::CompiledData::Unit::PendingTypeCompilation;
+ irDocument.javaScriptCompilationUnit->data = unit;
+
+ if (!irDocument.javaScriptCompilationUnit->saveToDisk(inputFileName, &error->message))
+ return false;
+
+ free(unit);
+ }
+ return true;
+}
+
+static bool compileJSFile(const QString &inputFileName, QV4::EvalISelFactory *iselFactory, Error *error)
+{
+ QmlIR::Document irDocument(/*debugMode*/false);
+
+ QString sourceCode;
+ {
+ QFile f(inputFileName);
+ if (!f.open(QIODevice::ReadOnly)) {
+ error->message = QLatin1String("Error opening ") + inputFileName + QLatin1Char(':') + f.errorString();
+ return false;
+ }
+ sourceCode = QString::fromUtf8(f.readAll());
+ if (f.error() != QFileDevice::NoError) {
+ error->message = QLatin1String("Error reading from ") + inputFileName + QLatin1Char(':') + f.errorString();
+ return false;
+ }
+ irDocument.jsModule.sourceTimeStamp = QFileInfo(f).lastModified().toMSecsSinceEpoch();
+ }
+
+ QQmlJS::Engine *engine = &irDocument.jsParserEngine;
+ QmlIR::ScriptDirectivesCollector directivesCollector(engine, &irDocument.jsGenerator);
+ QQmlJS::Directives *oldDirs = engine->directives();
+ engine->setDirectives(&directivesCollector);
+
+ QQmlJS::AST::Program *program = nullptr;
+
+ {
+ QQmlJS::Lexer lexer(engine);
+ lexer.setCode(sourceCode, /*line*/1, /*parseAsBinding*/false);
+ QQmlJS::Parser parser(engine);
+
+ bool parsed = parser.parseProgram();
+
+ for (const QQmlJS::DiagnosticMessage &parseError: parser.diagnosticMessages()) {
+ if (!error->message.isEmpty())
+ error->message += QLatin1Char('\n');
+ error->message += diagnosticErrorMessage(inputFileName, parseError);
+ }
+
+ if (!parsed) {
+ engine->setDirectives(oldDirs);
+ return false;
+ }
+
+ program = QQmlJS::AST::cast<QQmlJS::AST::Program*>(parser.rootNode());
+ if (!program) {
+ lexer.setCode(QStringLiteral("undefined;"), 1, false);
+ parsed = parser.parseProgram();
+ Q_ASSERT(parsed);
+ program = QQmlJS::AST::cast<QQmlJS::AST::Program*>(parser.rootNode());
+ Q_ASSERT(program);
+ }
+ }
+
+ {
+ QmlIR::JSCodeGen v4CodeGen(inputFileName, irDocument.code, &irDocument.jsModule, &irDocument.jsParserEngine, irDocument.program, /*import cache*/0, &irDocument.jsGenerator.stringTable);
+ v4CodeGen.generateFromProgram(inputFileName, sourceCode, program, &irDocument.jsModule, QQmlJS::Codegen::GlobalCode);
+ QList<QQmlJS::DiagnosticMessage> jsErrors = v4CodeGen.errors();
+ if (!jsErrors.isEmpty()) {
+ for (const QQmlJS::DiagnosticMessage &e: qAsConst(jsErrors)) {
+ if (!error->message.isEmpty())
+ error->message += QLatin1Char('\n');
+ error->message += diagnosticErrorMessage(inputFileName, e);
+ }
+ engine->setDirectives(oldDirs);
+ return false;
+ }
+
+ QmlIR::QmlUnitGenerator generator;
+
+ // ### translation binding simplification
+
+ QScopedPointer<QV4::EvalInstructionSelection> isel(iselFactory->create(/*engine*/nullptr, /*executable allocator*/nullptr, &irDocument.jsModule, &irDocument.jsGenerator));
+ // Disable lookups in non-standalone (aka QML) mode
+ isel->setUseFastLookups(false);
+ irDocument.javaScriptCompilationUnit = isel->compile(/*generate unit*/false);
+ // ###
+ QV4::CompiledData::ResolvedTypeReferenceMap dummyDependencies;
+ QV4::CompiledData::Unit *unit = generator.generate(irDocument, /*engine*/nullptr, dummyDependencies);
+ unit->flags |= QV4::CompiledData::Unit::StaticData;
+ irDocument.javaScriptCompilationUnit->data = unit;
+
+ if (!irDocument.javaScriptCompilationUnit->saveToDisk(inputFileName, &error->message)) {
+ engine->setDirectives(oldDirs);
+ return false;
+ }
+
+ free(unit);
+ }
+ engine->setDirectives(oldDirs);
+ return true;
+}
+
+int main(int argc, char **argv)
+{
+ // Produce reliably the same output for the same input by disabling QHash's random seeding.
+ qt_qhash_seed.testAndSetRelaxed(-1, 0);
+
+ QCoreApplication app(argc, argv);
+ QCoreApplication::setApplicationName(QStringLiteral("qmlcachegen"));
+ QCoreApplication::setApplicationVersion(QLatin1String(QT_VERSION_STR));
+
+ QCommandLineParser parser;
+ parser.addHelpOption();
+ parser.addVersionOption();
+
+ QCommandLineOption targetArchitectureOption(QStringLiteral("target-architecture"), QCoreApplication::translate("main", "Target architecture"), QCoreApplication::translate("main", "architecture"));
+ parser.addOption(targetArchitectureOption);
+
+ parser.addPositionalArgument(QStringLiteral("[qml file]"),
+ QStringLiteral("QML source file to generate cache for."));
+
+ parser.process(app);
+
+ const QStringList sources = parser.positionalArguments();
+ if (sources.count() > 1) {
+ fprintf(stderr, "%s\n", qPrintable(QStringLiteral("Too many input files specified: '") + sources.join(QStringLiteral("' '")) + QLatin1Char('\'')));
+ return EXIT_FAILURE;
+ }
+ const QString inputFile = sources.first();
+
+ QScopedPointer<QV4::EvalISelFactory> isel;
+ const QString targetArchitecture = parser.value(targetArchitectureOption);
+
+ isel.reset(QV4::JIT::createISelForArchitecture(targetArchitecture));
+
+ if (!isel)
+ isel.reset(new QV4::Moth::ISelFactory);
+
+ Error error;
+
+ if (inputFile.endsWith(QLatin1String(".qml"))) {
+ if (!compileQmlFile(inputFile, isel.data(), &error)) {
+ error.augment(QLatin1String("Error compiling qml file: ")).print();
+ return EXIT_FAILURE;
+ }
+ } else if (inputFile.endsWith(QLatin1String(".js"))) {
+ if (!compileJSFile(inputFile, isel.data(), &error)) {
+ error.augment(QLatin1String("Error compiling qml file: ")).print();
+ return EXIT_FAILURE;
+ }
+ } else {
+ fprintf(stderr, "Ignoring %s input file as it is not QML source code - maybe remove from QML_FILES?\n", qPrintable(inputFile)); }
+
+
+ return EXIT_SUCCESS;
+}
diff --git a/tools/qmlcachegen/qmlcachegen.pro b/tools/qmlcachegen/qmlcachegen.pro
new file mode 100644
index 0000000000..81783d0396
--- /dev/null
+++ b/tools/qmlcachegen/qmlcachegen.pro
@@ -0,0 +1,24 @@
+option(host_build)
+
+QT = qmldevtools-private
+DEFINES += QT_NO_CAST_TO_ASCII QT_NO_CAST_FROM_ASCII
+
+SOURCES = qmlcachegen.cpp
+TARGET = qmlcachegen
+
+BUILD_INTEGRATION = qmlcache.prf
+!force_independent {
+ qmake_integration.input = BUILD_INTEGRATION
+ qmake_integration.output = $$[QT_HOST_DATA]/mkspecs/features/${QMAKE_FILE_BASE}.prf
+ qmake_integration.commands = $$QMAKE_COPY ${QMAKE_FILE_IN} ${QMAKE_FILE_OUT}
+ qmake_integration.name = COPY ${QMAKE_FILE_IN} ${QMAKE_FILE_OUT}
+ qmake_integration.CONFIG = no_clean no_link
+ !contains(TEMPLATE, vc.*): qmake_integration.variable_out = GENERATED_FILES
+ QMAKE_EXTRA_COMPILERS += qmake_integration
+}
+
+qmake_integration_installs.files = $$BUILD_INTEGRATION
+qmake_integration_installs.path = $$[QT_HOST_DATA]/mkspecs/features
+INSTALLS += qmake_integration_installs
+
+load(qt_tool)
diff --git a/tools/qmljs/qmljs.cpp b/tools/qmljs/qmljs.cpp
index 54e1b6cea8..4d0f9d278f 100644
--- a/tools/qmljs/qmljs.cpp
+++ b/tools/qmljs/qmljs.cpp
@@ -141,7 +141,7 @@ int main(int argc, char *argv[])
#endif
#ifdef V4_ENABLE_JIT
} else {
- iSelFactory = new QV4::JIT::ISelFactory;
+ iSelFactory = new QV4::JIT::ISelFactory<>;
#endif // V4_ENABLE_JIT
}
diff --git a/tools/tools.pro b/tools/tools.pro
index 3952ec4b01..f3988a909a 100644
--- a/tools/tools.pro
+++ b/tools/tools.pro
@@ -4,9 +4,6 @@ SUBDIRS += \
qmlmin \
qmlimportscanner
-qmlmin.CONFIG = host_build
-qmlimportscanner.CONFIG = host_build
-
!android|android_app {
SUBDIRS += \
qml \