diff options
42 files changed, 767 insertions, 340 deletions
diff --git a/src/3rdparty/masm/assembler/ARM64Assembler.h b/src/3rdparty/masm/assembler/ARM64Assembler.h index ad5acdbb85..7390997af1 100644 --- a/src/3rdparty/masm/assembler/ARM64Assembler.h +++ b/src/3rdparty/masm/assembler/ARM64Assembler.h @@ -26,9 +26,10 @@ #ifndef ARM64Assembler_h #define ARM64Assembler_h -#if ENABLE(ASSEMBLER) && CPU(ARM64) +#if ENABLE(ASSEMBLER) && (CPU(ARM64) || defined(V4_BOOTSTRAP)) #include "AssemblerBuffer.h" +#include "AbstractMacroAssembler.h" #include <limits.h> #include <wtf/Assertions.h> #include <wtf/Vector.h> @@ -520,8 +521,8 @@ typedef enum { #undef DECLARE_REGISTER } FPRegisterID; -static constexpr bool isSp(RegisterID reg) { return reg == sp; } -static constexpr bool isZr(RegisterID reg) { return reg == zr; } +static Q_DECL_CONSTEXPR bool isSp(RegisterID reg) { return reg == sp; } +static Q_DECL_CONSTEXPR bool isZr(RegisterID reg) { return reg == zr; } } // namespace ARM64Registers @@ -530,15 +531,15 @@ public: typedef ARM64Registers::RegisterID RegisterID; typedef ARM64Registers::FPRegisterID FPRegisterID; - static constexpr RegisterID firstRegister() { return ARM64Registers::x0; } - static constexpr RegisterID lastRegister() { return ARM64Registers::sp; } + static Q_DECL_CONSTEXPR RegisterID firstRegister() { return ARM64Registers::x0; } + static Q_DECL_CONSTEXPR RegisterID lastRegister() { return ARM64Registers::sp; } - static constexpr FPRegisterID firstFPRegister() { return ARM64Registers::q0; } - static constexpr FPRegisterID lastFPRegister() { return ARM64Registers::q31; } + static Q_DECL_CONSTEXPR FPRegisterID firstFPRegister() { return ARM64Registers::q0; } + static Q_DECL_CONSTEXPR FPRegisterID lastFPRegister() { return ARM64Registers::q31; } private: - static constexpr bool isSp(RegisterID reg) { return ARM64Registers::isSp(reg); } - static constexpr bool isZr(RegisterID reg) { return ARM64Registers::isZr(reg); } + static Q_DECL_CONSTEXPR bool isSp(RegisterID reg) { return ARM64Registers::isSp(reg); } + static Q_DECL_CONSTEXPR bool isZr(RegisterID reg) { return ARM64Registers::isZr(reg); } public: ARM64Assembler() @@ -546,7 +547,7 @@ public: , m_indexOfTailOfLastWatchpoint(INT_MIN) { } - + AssemblerBuffer& buffer() { return m_buffer; } // (HS, LO, HI, LS) -> (AE, B, A, BE) @@ -653,9 +654,7 @@ public: } void operator=(const LinkRecord& other) { - data.copyTypes.content[0] = other.data.copyTypes.content[0]; - data.copyTypes.content[1] = other.data.copyTypes.content[1]; - data.copyTypes.content[2] = other.data.copyTypes.content[2]; + data.realTypes = other.data.realTypes; } intptr_t from() const { return data.realTypes.m_from; } void setFrom(intptr_t from) { data.realTypes.m_from = from; } @@ -671,8 +670,8 @@ public: private: union { struct RealTypes { - intptr_t m_from : 48; - intptr_t m_to : 48; + int64_t m_from : 48; + int64_t m_to : 48; JumpType m_type : 8; JumpLinkType m_linkType : 8; Condition m_condition : 4; @@ -680,10 +679,6 @@ public: RegisterID m_compareRegister : 6; bool m_is64Bit : 1; } realTypes; - struct CopyTypes { - uint64_t content[3]; - } copyTypes; - COMPILE_ASSERT(sizeof(RealTypes) == sizeof(CopyTypes), LinkRecordCopyStructSizeEqualsRealStruct); } data; }; @@ -743,6 +738,89 @@ public: return isValidSignedImm9(offset); } + + // Jump: + // + // A jump object is a reference to a jump instruction that has been planted + // into the code buffer - it is typically used to link the jump, setting the + // relative offset such that when executed it will jump to the desired + // destination. + template <typename LabelType> + class Jump { + template<class TemplateAssemblerType> + friend class AbstractMacroAssembler; + friend class Call; + template <typename, template <typename> class> friend class LinkBufferBase; + public: + Jump() + { + } + + Jump(AssemblerLabel jmp, ARM64Assembler::JumpType type = ARM64Assembler::JumpNoCondition, ARM64Assembler::Condition condition = ARM64Assembler::ConditionInvalid) + : m_label(jmp) + , m_type(type) + , m_condition(condition) + { + } + + Jump(AssemblerLabel jmp, ARM64Assembler::JumpType type, ARM64Assembler::Condition condition, bool is64Bit, ARM64Assembler::RegisterID compareRegister) + : m_label(jmp) + , m_type(type) + , m_condition(condition) + , m_is64Bit(is64Bit) + , m_compareRegister(compareRegister) + { + ASSERT((type == ARM64Assembler::JumpCompareAndBranch) || (type == ARM64Assembler::JumpCompareAndBranchFixedSize)); + } + + Jump(AssemblerLabel jmp, ARM64Assembler::JumpType type, ARM64Assembler::Condition condition, unsigned bitNumber, ARM64Assembler::RegisterID compareRegister) + : m_label(jmp) + , m_type(type) + , m_condition(condition) + , m_bitNumber(bitNumber) + , m_compareRegister(compareRegister) + { + ASSERT((type == ARM64Assembler::JumpTestBit) || (type == ARM64Assembler::JumpTestBitFixedSize)); + } + + LabelType label() const + { + LabelType result; + result.m_label = m_label; + return result; + } + + void link(AbstractMacroAssembler<ARM64Assembler>* masm) const + { + if ((m_type == ARM64Assembler::JumpCompareAndBranch) || (m_type == ARM64Assembler::JumpCompareAndBranchFixedSize)) + masm->m_assembler.linkJump(m_label, masm->m_assembler.label(), m_type, m_condition, m_is64Bit, m_compareRegister); + else if ((m_type == ARM64Assembler::JumpTestBit) || (m_type == ARM64Assembler::JumpTestBitFixedSize)) + masm->m_assembler.linkJump(m_label, masm->m_assembler.label(), m_type, m_condition, m_bitNumber, m_compareRegister); + else + masm->m_assembler.linkJump(m_label, masm->m_assembler.label(), m_type, m_condition); + } + + void linkTo(LabelType label, AbstractMacroAssembler<ARM64Assembler>* masm) const + { + if ((m_type == ARM64Assembler::JumpCompareAndBranch) || (m_type == ARM64Assembler::JumpCompareAndBranchFixedSize)) + masm->m_assembler.linkJump(m_label, label.label(), m_type, m_condition, m_is64Bit, m_compareRegister); + else if ((m_type == ARM64Assembler::JumpTestBit) || (m_type == ARM64Assembler::JumpTestBitFixedSize)) + masm->m_assembler.linkJump(m_label, label.label(), m_type, m_condition, m_bitNumber, m_compareRegister); + else + masm->m_assembler.linkJump(m_label, label.label(), m_type, m_condition); + } + + bool isSet() const { return m_label.isSet(); } + + private: + AssemblerLabel m_label; + ARM64Assembler::JumpType m_type; + ARM64Assembler::Condition m_condition; + bool m_is64Bit; + unsigned m_bitNumber; + ARM64Assembler::RegisterID m_compareRegister; + }; + private: int encodeFPImm(double d) { @@ -2857,11 +2935,11 @@ public: expected = disassembleMoveWideImediate(address + 1, sf, opc, hw, imm16, rd); ASSERT_UNUSED(expected, expected && sf && opc == MoveWideOp_K && hw == 1 && rd == rdFirst); - result |= static_cast<uintptr_t>(imm16) << 16; + result |= static_cast<uint64_t>(imm16) << 16; expected = disassembleMoveWideImediate(address + 2, sf, opc, hw, imm16, rd); ASSERT_UNUSED(expected, expected && sf && opc == MoveWideOp_K && hw == 2 && rd == rdFirst); - result |= static_cast<uintptr_t>(imm16) << 32; + result |= static_cast<uint64_t>(imm16) << 32; return reinterpret_cast<void*>(result); } @@ -2932,7 +3010,10 @@ public: static void cacheFlush(void* code, size_t size) { -#if OS(IOS) +#if defined(V4_BOOTSTRAP) + UNUSED_PARAM(code) + UNUSED_PARAM(size) +#elif OS(IOS) sys_cache_control(kCacheFunctionPrepareForExecution, code, size); #elif OS(LINUX) size_t page = pageSize(); @@ -2989,7 +3070,7 @@ public: case JumpCondition: { ASSERT(!(reinterpret_cast<intptr_t>(from) & 0x3)); ASSERT(!(reinterpret_cast<intptr_t>(to) & 0x3)); - intptr_t relative = reinterpret_cast<intptr_t>(to) - (reinterpret_cast<intptr_t>(from)); + int64_t relative = reinterpret_cast<intptr_t>(to) - (reinterpret_cast<intptr_t>(from)); if (((relative << 43) >> 43) == relative) return LinkJumpConditionDirect; @@ -2999,7 +3080,7 @@ public: case JumpCompareAndBranch: { ASSERT(!(reinterpret_cast<intptr_t>(from) & 0x3)); ASSERT(!(reinterpret_cast<intptr_t>(to) & 0x3)); - intptr_t relative = reinterpret_cast<intptr_t>(to) - (reinterpret_cast<intptr_t>(from)); + int64_t relative = reinterpret_cast<intptr_t>(to) - (reinterpret_cast<intptr_t>(from)); if (((relative << 43) >> 43) == relative) return LinkJumpCompareAndBranchDirect; @@ -3009,7 +3090,7 @@ public: case JumpTestBit: { ASSERT(!(reinterpret_cast<intptr_t>(from) & 0x3)); ASSERT(!(reinterpret_cast<intptr_t>(to) & 0x3)); - intptr_t relative = reinterpret_cast<intptr_t>(to) - (reinterpret_cast<intptr_t>(from)); + int64_t relative = reinterpret_cast<intptr_t>(to) - (reinterpret_cast<intptr_t>(from)); if (((relative << 50) >> 50) == relative) return LinkJumpTestBitDirect; @@ -3121,7 +3202,7 @@ private: { ASSERT(!(reinterpret_cast<intptr_t>(from) & 3)); ASSERT(!(reinterpret_cast<intptr_t>(to) & 3)); - intptr_t offset = (reinterpret_cast<intptr_t>(to) - reinterpret_cast<intptr_t>(from)) >> 2; + int64_t offset = (reinterpret_cast<intptr_t>(to) - reinterpret_cast<intptr_t>(from)) >> 2; ASSERT(((offset << 38) >> 38) == offset); bool useDirect = ((offset << 45) >> 45) == offset; // Fits in 19 bits @@ -3142,7 +3223,7 @@ private: { ASSERT(!(reinterpret_cast<intptr_t>(from) & 3)); ASSERT(!(reinterpret_cast<intptr_t>(to) & 3)); - intptr_t offset = (reinterpret_cast<intptr_t>(to) - reinterpret_cast<intptr_t>(from)) >> 2; + int64_t offset = (reinterpret_cast<intptr_t>(to) - reinterpret_cast<intptr_t>(from)) >> 2; ASSERT(((offset << 38) >> 38) == offset); bool useDirect = ((offset << 45) >> 45) == offset; // Fits in 19 bits @@ -3163,7 +3244,7 @@ private: { ASSERT(!(reinterpret_cast<intptr_t>(from) & 3)); ASSERT(!(reinterpret_cast<intptr_t>(to) & 3)); - intptr_t offset = (reinterpret_cast<intptr_t>(to) - reinterpret_cast<intptr_t>(from)) >> 2; + int64_t offset = (reinterpret_cast<intptr_t>(to) - reinterpret_cast<intptr_t>(from)) >> 2; ASSERT(static_cast<int>(offset) == offset); ASSERT(((offset << 38) >> 38) == offset); @@ -3766,6 +3847,8 @@ private: #undef DATASIZE #undef MEMOPSIZE #undef CHECK_FP_MEMOP_DATASIZE +#undef JUMP_ENUM_WITH_SIZE +#undef JUMP_ENUM_SIZE #endif // ENABLE(ASSEMBLER) && CPU(ARM64) diff --git a/src/3rdparty/masm/assembler/ARMv7Assembler.h b/src/3rdparty/masm/assembler/ARMv7Assembler.h index 6b32fbf487..615c72fc15 100644 --- a/src/3rdparty/masm/assembler/ARMv7Assembler.h +++ b/src/3rdparty/masm/assembler/ARMv7Assembler.h @@ -2867,6 +2867,9 @@ private: int m_indexOfTailOfLastWatchpoint; }; +#undef JUMP_ENUM_WITH_SIZE +#undef JUMP_ENUM_SIZE + } // namespace JSC #endif // ENABLE(ASSEMBLER) && CPU(ARM_THUMB2) diff --git a/src/3rdparty/masm/assembler/AbstractMacroAssembler.h b/src/3rdparty/masm/assembler/AbstractMacroAssembler.h index 4f27e85c98..6fac27fdf1 100644 --- a/src/3rdparty/masm/assembler/AbstractMacroAssembler.h +++ b/src/3rdparty/masm/assembler/AbstractMacroAssembler.h @@ -66,7 +66,7 @@ public: typedef MacroAssemblerCodePtr CodePtr; typedef MacroAssemblerCodeRef CodeRef; -#if !CPU(ARM_THUMB2) && !defined(V4_BOOTSTRAP) +#if !CPU(ARM_THUMB2) && !CPU(ARM64) && !defined(V4_BOOTSTRAP) class Jump; #endif @@ -455,7 +455,7 @@ public: AssemblerLabel m_label; }; -#if CPU(ARM_THUMB2) || defined(V4_BOOTSTRAP) +#if CPU(ARM_THUMB2) || CPU(ARM64) || defined(V4_BOOTSTRAP) using Jump = typename AssemblerType::template Jump<Label>; friend Jump; #endif @@ -510,7 +510,7 @@ public: // into the code buffer - it is typically used to link the jump, setting the // relative offset such that when executed it will jump to the desired // destination. -#if !CPU(ARM_THUMB2) && !defined(V4_BOOTSTRAP) +#if !CPU(ARM_THUMB2) && !CPU(ARM64) && !defined(V4_BOOTSTRAP) class Jump { template<class TemplateAssemblerType> friend class AbstractMacroAssembler; diff --git a/src/3rdparty/masm/assembler/LinkBuffer.h b/src/3rdparty/masm/assembler/LinkBuffer.h index 3a659a23ce..bfd0e402ca 100644 --- a/src/3rdparty/masm/assembler/LinkBuffer.h +++ b/src/3rdparty/masm/assembler/LinkBuffer.h @@ -505,7 +505,7 @@ public: }; #endif -#if CPU(ARM64) +#if CPU(ARM64) || defined(V4_BOOTSTRAP) template <> class LinkBuffer<JSC::MacroAssembler<MacroAssemblerARM64>> : public BranchCompactingLinkBuffer<JSC::MacroAssembler<MacroAssemblerARM64>> { diff --git a/src/3rdparty/masm/assembler/MacroAssembler.h b/src/3rdparty/masm/assembler/MacroAssembler.h index 87794c8ef4..7d9f156c8c 100644 --- a/src/3rdparty/masm/assembler/MacroAssembler.h +++ b/src/3rdparty/masm/assembler/MacroAssembler.h @@ -31,12 +31,12 @@ #if ENABLE(ASSEMBLER) #include "MacroAssemblerARMv7.h" +#include "MacroAssemblerARM64.h" #if CPU(ARM_THUMB2) namespace JSC { typedef MacroAssemblerARMv7 MacroAssemblerBase; }; #elif CPU(ARM64) -#include "MacroAssemblerARM64.h" namespace JSC { typedef MacroAssemblerARM64 MacroAssemblerBase; }; #elif CPU(ARM_TRADITIONAL) diff --git a/src/3rdparty/masm/assembler/MacroAssemblerARM64.h b/src/3rdparty/masm/assembler/MacroAssemblerARM64.h index 3e425a0246..a11637f7ca 100644 --- a/src/3rdparty/masm/assembler/MacroAssemblerARM64.h +++ b/src/3rdparty/masm/assembler/MacroAssemblerARM64.h @@ -26,7 +26,7 @@ #ifndef MacroAssemblerARM64_h #define MacroAssemblerARM64_h -#if ENABLE(ASSEMBLER) +#if ENABLE(ASSEMBLER) && (CPU(ARM64) || defined(V4_BOOTSTRAP)) #include "ARM64Assembler.h" #include "AbstractMacroAssembler.h" @@ -42,7 +42,7 @@ class MacroAssemblerARM64 : public AbstractMacroAssembler<ARM64Assembler> { friend class DataLabelPtr; friend class DataLabel32; friend class DataLabelCompact; - friend class Jump; +// template <typename> friend class Jump; friend class Label; public: @@ -119,9 +119,9 @@ public: private: static const ARM64Registers::FPRegisterID fpTempRegister = ARM64Registers::q31; static const ARM64Assembler::SetFlags S = ARM64Assembler::S; - static const intptr_t maskHalfWord0 = 0xffffl; - static const intptr_t maskHalfWord1 = 0xffff0000l; - static const intptr_t maskUpperWord = 0xffffffff00000000l; + static const int64_t maskHalfWord0 = 0xffffl; + static const int64_t maskHalfWord1 = 0xffff0000l; + static const int64_t maskUpperWord = 0xffffffff00000000l; // 4 instructions - 3 to load the function pointer, + blr. static const ptrdiff_t REPATCH_OFFSET_CALL_TO_POINTER = -16; @@ -209,6 +209,33 @@ public: static bool shouldBlindForSpecificArch(uint32_t value) { return value >= 0x00ffffff; } static bool shouldBlindForSpecificArch(uint64_t value) { return value >= 0x00ffffff; } +#if defined(V4_BOOTSTRAP) + void loadPtr(ImplicitAddress address, RegisterID dest) + { + load32(address, dest); + } + + void subPtr(TrustedImm32 imm, RegisterID dest) + { + sub32(imm, dest); + } + + void addPtr(TrustedImm32 imm, RegisterID dest) + { + add32(imm, dest); + } + + void addPtr(TrustedImm32 imm, RegisterID src, RegisterID dest) + { + add32(imm, src, dest); + } + + void storePtr(RegisterID src, ImplicitAddress address) + { + store32(src, address); + } +#endif + // Integer operations: void add32(RegisterID a, RegisterID b, RegisterID dest) @@ -2757,6 +2784,7 @@ public: return branch32(cond, left, dataTempRegister); } +#if !defined(V4_BOOTSTRAP) PatchableJump patchableBranchPtr(RelationalCondition cond, Address left, TrustedImmPtr right) { m_makeJumpPatchable = true; @@ -2764,6 +2792,7 @@ public: m_makeJumpPatchable = false; return PatchableJump(result); } +#endif PatchableJump patchableBranchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1)) { @@ -3000,7 +3029,7 @@ private: return m_cachedMemoryTempRegister.registerIDInvalidate(); } - ALWAYS_INLINE bool isInIntRange(intptr_t value) + ALWAYS_INLINE bool isInIntRange(int64_t value) { return value == ((value << 32) >> 32); } @@ -3354,6 +3383,8 @@ private: } template <typename, template <typename> class> friend class LinkBufferBase; + template <typename> friend class BranchCompactingLinkBuffer; + template <typename> friend struct BranchCompactingExecutableOffsetCalculator; void recordLinkOffsets(int32_t regionStart, int32_t regionEnd, int32_t offset) {return m_assembler.recordLinkOffsets(regionStart, regionEnd, offset); } int executableOffsetFor(int location) { return m_assembler.executableOffsetFor(location); } diff --git a/src/3rdparty/masm/masm.pri b/src/3rdparty/masm/masm.pri index c63cd5da66..6c301fea38 100644 --- a/src/3rdparty/masm/masm.pri +++ b/src/3rdparty/masm/masm.pri @@ -31,7 +31,17 @@ HEADERS += $$PWD/stubs/WTFStubs.h SOURCES += $$PWD/stubs/Options.cpp -HEADERS += $$PWD/stubs/wtf/*.h +HEADERS += $$PWD/stubs/wtf/FastAllocBase.h \ + $$PWD/stubs/wtf/FastMalloc.h \ + $$PWD/stubs/wtf/Noncopyable.h \ + $$PWD/stubs/wtf/OwnPtr.h \ + $$PWD/stubs/wtf/PassOwnPtr.h \ + $$PWD/stubs/wtf/PassRefPtr.h \ + $$PWD/stubs/wtf/RefCounted.h \ + $$PWD/stubs/wtf/RefPtr.h \ + $$PWD/stubs/wtf/TypeTraits.h \ + $$PWD/stubs/wtf/UnusedParam.h \ + $$PWD/stubs/wtf/Vector.h SOURCES += $$PWD/disassembler/Disassembler.cpp SOURCES += $$PWD/disassembler/UDis86Disassembler.cpp @@ -67,8 +77,21 @@ SOURCES += $$PWD/disassembler/ARM64Disassembler.cpp SOURCES += $$PWD/disassembler/ARM64/A64DOpcode.cpp HEADERS += $$PWD/disassembler/ARM64/A64DOpcode.h -SOURCES += $$PWD/yarr/*.cpp -HEADERS += $$PWD/yarr/*.h +!qmldevtools_build { +SOURCES += $$PWD/yarr/YarrCanonicalizeUCS2.cpp \ + $$PWD/yarr/YarrInterpreter.cpp \ + $$PWD/yarr/YarrJIT.cpp \ + $$PWD/yarr/YarrPattern.cpp \ + $$PWD/yarr/YarrSyntaxChecker.cpp + +HEADERS += $$PWD/yarr/Yarr.h \ + $$PWD/yarr/YarrCanonicalizeUCS2.h \ + $$PWD/yarr/YarrInterpreter.h \ + $$PWD/yarr/YarrJIT.h \ + $$PWD/yarr/YarrParser.h \ + $$PWD/yarr/YarrPattern.h \ + $$PWD/yarr/YarrSyntaxChecker.h +} # # Generate RegExpJitTables.h diff --git a/src/3rdparty/masm/stubs/ExecutableAllocator.h b/src/3rdparty/masm/stubs/ExecutableAllocator.h index 9a2a9773b5..3b84b5c986 100644 --- a/src/3rdparty/masm/stubs/ExecutableAllocator.h +++ b/src/3rdparty/masm/stubs/ExecutableAllocator.h @@ -107,7 +107,7 @@ struct ExecutableAllocator { size = size + (iaddr - roundAddr); addr = reinterpret_cast<void*>(roundAddr); -#if ENABLE(ASSEMBLER_WX_EXCLUSIVE) +#if ENABLE(ASSEMBLER_WX_EXCLUSIVE) && !defined(V4_BOOTSTRAP) # if OS(WINDOWS) DWORD oldProtect; # if !OS(WINRT) @@ -140,6 +140,7 @@ struct ExecutableAllocator { size = size + (iaddr - roundAddr); addr = reinterpret_cast<void*>(roundAddr); +#if !defined(V4_BOOTSTRAP) #if ENABLE(ASSEMBLER_WX_EXCLUSIVE) # if OS(WINDOWS) DWORD oldProtect; @@ -161,6 +162,10 @@ struct ExecutableAllocator { #else # error "Only W^X is supported" #endif +#else + (void)addr; // suppress unused parameter warning + (void)size; // suppress unused parameter warning +#endif } QV4::ExecutableAllocator *realAllocator; diff --git a/src/imports/localstorage/plugin.cpp b/src/imports/localstorage/plugin.cpp index d3ea93c80a..60b8dad5fb 100644 --- a/src/imports/localstorage/plugin.cpp +++ b/src/imports/localstorage/plugin.cpp @@ -219,24 +219,6 @@ QQmlSqlDatabaseData::~QQmlSqlDatabaseData() { } -static QString qmlsqldatabase_databasesPath(QV4::ExecutionEngine *engine) -{ - return engine->qmlEngine()->offlineStoragePath() + - QDir::separator() + QLatin1String("Databases"); -} - -static void qmlsqldatabase_initDatabasesPath(QV4::ExecutionEngine *engine) -{ - QString databasesPath = qmlsqldatabase_databasesPath(engine); - if (!QDir().mkpath(databasesPath)) - qWarning() << "LocalStorage: can't create path - " << databasesPath; -} - -static QString qmlsqldatabase_databaseFile(const QString& connectionName, QV4::ExecutionEngine *engine) -{ - return qmlsqldatabase_databasesPath(engine) + QDir::separator() + connectionName; -} - static ReturnedValue qmlsqldatabase_rows_index(const QQmlSqlDatabaseWrapper *r, ExecutionEngine *v4, quint32 index, bool *hasProperty = 0) { Scope scope(v4); @@ -450,7 +432,8 @@ static ReturnedValue qmlsqldatabase_changeVersion(CallContext *ctx) if (ok) { *w->d()->version = to_version; #if QT_CONFIG(settings) - QSettings ini(qmlsqldatabase_databaseFile(db.connectionName(), scope.engine) + QLatin1String(".ini"), QSettings::IniFormat); + const QQmlEnginePrivate *enginePrivate = QQmlEnginePrivate::get(scope.engine->qmlEngine()); + QSettings ini(enginePrivate->offlineStorageDatabaseDirectory() + db.connectionName() + QLatin1String(".ini"), QSettings::IniFormat); ini.setValue(QLatin1String("Version"), to_version); #endif } @@ -723,24 +706,23 @@ void QQuickLocalStorage::openDatabaseSync(QQmlV4Function *args) if (scope.engine->qmlEngine()->offlineStoragePath().isEmpty()) V4THROW_SQL2(SQLEXCEPTION_DATABASE_ERR, QQmlEngine::tr("SQL: can't create database, offline storage is disabled.")); - qmlsqldatabase_initDatabasesPath(scope.engine); - - QSqlDatabase database; - QV4::ScopedValue v(scope); QString dbname = (v = (*args)[0])->toQStringNoThrow(); QString dbversion = (v = (*args)[1])->toQStringNoThrow(); QString dbdescription = (v = (*args)[2])->toQStringNoThrow(); int dbestimatedsize = (v = (*args)[3])->toInt32(); FunctionObject *dbcreationCallback = (v = (*args)[4])->as<FunctionObject>(); - - QCryptographicHash md5(QCryptographicHash::Md5); - md5.addData(dbname.toUtf8()); - QString dbid(QLatin1String(md5.result().toHex())); - - QString basename = qmlsqldatabase_databaseFile(dbid, scope.engine); + QString basename = args->v4engine()->qmlEngine()->offlineStorageDatabaseFilePath(dbname); + QFileInfo dbFile(basename); + if (!QDir().mkpath(dbFile.dir().absolutePath())) { + const QString message = QQmlEngine::tr("LocalStorage: can't create path %1"). + arg(QDir::toNativeSeparators(dbFile.dir().absolutePath())); + V4THROW_SQL2(SQLEXCEPTION_DATABASE_ERR, message); + } + QString dbid = dbFile.fileName(); bool created = false; QString version = dbversion; + QSqlDatabase database; { QSettings ini(basename+QLatin1String(".ini"),QSettings::IniFormat); diff --git a/src/qml/doc/src/cppintegration/data.qdoc b/src/qml/doc/src/cppintegration/data.qdoc index ac6600f38c..4523ee39d8 100644 --- a/src/qml/doc/src/cppintegration/data.qdoc +++ b/src/qml/doc/src/cppintegration/data.qdoc @@ -167,9 +167,9 @@ additional features. See the \l {qtqml-javascript-hostenvironment.html} The QML engine provides automatic type conversion between QVariantList and JavaScript arrays, and between QVariantMap and JavaScript objects. -For example, the function defined in QML below left expects two arguments, an +For example, the function defined in QML below expects two arguments, an array and an object, and prints their contents using the standard JavaScript -syntax for array and object item access. The C++ code below right calls this +syntax for array and object item access. The C++ code below calls this function, passing a QVariantList and a QVariantMap, which are automatically converted to JavaScript array and object values, repectively: @@ -204,9 +204,9 @@ when it is passed to C++. The QML engine provides automatic type conversion between QDateTime values and JavaScript \c Date objects. -For example, the function defined in QML below left expects a JavaScript +For example, the function defined in QML below expects a JavaScript \c Date object, and also returns a new \c Date object with the current date and -time. The C++ code below right calls this function, passing a QDateTime value +time. The C++ code below calls this function, passing a QDateTime value that is automatically converted by the engine into a \c Date object when it is passed to the \c readDate() function. In turn, the readDate() function returns a \c Date object that is automatically converted into a QDateTime value when it @@ -215,7 +215,7 @@ is received in C++: \table \header \row - +\li QML \li \qml // MyItem.qml @@ -227,6 +227,7 @@ Item { } \endqml \row +\li C++ \li \code // C++ diff --git a/src/qml/doc/src/cppintegration/exposecppattributes.qdoc b/src/qml/doc/src/cppintegration/exposecppattributes.qdoc index ed0d049564..3bffd2eb6f 100644 --- a/src/qml/doc/src/cppintegration/exposecppattributes.qdoc +++ b/src/qml/doc/src/cppintegration/exposecppattributes.qdoc @@ -390,8 +390,8 @@ that is a public slot: \endcode If an instance of \c MessageBoard was set as the context data for a file \c -MyItem.qml, as shown below left, then \c MyItem.qml could invoke the two -methods, as shown below right: +MyItem.qml, then \c MyItem.qml could invoke the two methods as shown in the +examples below: \table \row diff --git a/src/qml/jit/qv4assembler.cpp b/src/qml/jit/qv4assembler.cpp index 646d9a8871..a2cb56abbe 100644 --- a/src/qml/jit/qv4assembler.cpp +++ b/src/qml/jit/qv4assembler.cpp @@ -379,16 +379,7 @@ void Assembler<TargetConfiguration>::leaveStandardStackFrame(const RegisterInfor Q_ASSERT(slotAddr.offset == 0); const int frameSize = _stackLayout->calculateStackFrameSize(); - // Work around bug in ARMv7Assembler.h where add32(imm, sp, sp) doesn't - // work well for large immediates. -#if CPU(ARM_THUMB2) - move(TrustedImm32(frameSize), JSC::ARMRegisters::r3); - add32(JSC::ARMRegisters::r3, StackPointerRegister); -#else - addPtr(TrustedImm32(frameSize), StackPointerRegister); -#endif - - platformLeaveStandardStackFrame(this); + platformLeaveStandardStackFrame(this, frameSize); } @@ -709,8 +700,13 @@ JSC::MacroAssemblerCodeRef Assembler<TargetConfiguration>::link(int *codeSize) } template class QV4::JIT::Assembler<DefaultAssemblerTargetConfiguration>; -#if defined(V4_BOOTSTRAP) && CPU(X86_64) +#if defined(V4_BOOTSTRAP) +#if !CPU(ARM_THUMB2) template class QV4::JIT::Assembler<AssemblerTargetConfiguration<JSC::MacroAssemblerARMv7, NoOperatingSystemSpecialization>>; #endif +#if !CPU(ARM64) +template class QV4::JIT::Assembler<AssemblerTargetConfiguration<JSC::MacroAssemblerARM64, NoOperatingSystemSpecialization>>; +#endif +#endif #endif diff --git a/src/qml/jit/qv4assembler_p.h b/src/qml/jit/qv4assembler_p.h index 6d8d773ff0..fd65c9b3d2 100644 --- a/src/qml/jit/qv4assembler_p.h +++ b/src/qml/jit/qv4assembler_p.h @@ -145,11 +145,13 @@ struct RegisterSizeDependentAssembler<JITAssembler, MacroAssembler, TargetPlatfo using RegisterID = typename JITAssembler::RegisterID; using FPRegisterID = typename JITAssembler::FPRegisterID; using RelationalCondition = typename JITAssembler::RelationalCondition; + using ResultCondition = typename JITAssembler::ResultCondition; using Address = typename JITAssembler::Address; using Pointer = typename JITAssembler::Pointer; using TrustedImm32 = typename JITAssembler::TrustedImm32; using TrustedImm64 = typename JITAssembler::TrustedImm64; using Jump = typename JITAssembler::Jump; + using Label = typename JITAssembler::Label; static void loadDouble(JITAssembler *as, Address addr, FPRegisterID dest) { @@ -352,6 +354,19 @@ struct RegisterSizeDependentAssembler<JITAssembler, MacroAssembler, TargetPlatfo return as->branch32(RelationalCondition::NotEqual, tagOrValueRegister, TrustedImm32(Value::NotDouble_Mask)); } + + static void initializeLocalVariables(JITAssembler *as, int localsCount) + { + as->move(TrustedImm32(0), TargetPlatform::ReturnValueRegister); + as->move(TrustedImm32(localsCount), TargetPlatform::ScratchRegister); + Label loop = as->label(); + as->store32(TargetPlatform::ReturnValueRegister, Address(TargetPlatform::LocalsRegister)); + as->add32(TrustedImm32(4), TargetPlatform::LocalsRegister); + as->store32(TargetPlatform::ReturnValueRegister, Address(TargetPlatform::LocalsRegister)); + as->add32(TrustedImm32(4), TargetPlatform::LocalsRegister); + Jump jump = as->branchSub32(ResultCondition::NonZero, TrustedImm32(1), TargetPlatform::ScratchRegister); + jump.linkTo(loop, as); + } }; template <typename JITAssembler, typename MacroAssembler, typename TargetPlatform> @@ -364,8 +379,10 @@ struct RegisterSizeDependentAssembler<JITAssembler, MacroAssembler, TargetPlatfo using TrustedImm64 = typename JITAssembler::TrustedImm64; using Pointer = typename JITAssembler::Pointer; using RelationalCondition = typename JITAssembler::RelationalCondition; + using ResultCondition = typename JITAssembler::ResultCondition; using BranchTruncateType = typename JITAssembler::BranchTruncateType; using Jump = typename JITAssembler::Jump; + using Label = typename JITAssembler::Label; static void loadDouble(JITAssembler *as, Address addr, FPRegisterID dest) { @@ -464,7 +481,7 @@ struct RegisterSizeDependentAssembler<JITAssembler, MacroAssembler, TargetPlatfo { // Use ReturnValueRegister as "scratch" register because loadArgument // and storeArgument are functions that may need a scratch register themselves. - as->loadArgumentInRegister(source, TargetPlatform::ReturnValueRegister, 0); + loadArgumentInRegister(as, source, TargetPlatform::ReturnValueRegister, 0); as->storeReturnValue(destination); } @@ -477,6 +494,12 @@ struct RegisterSizeDependentAssembler<JITAssembler, MacroAssembler, TargetPlatfo as->move64ToDouble(TargetPlatform::ReturnValueRegister, target); } + static void loadArgumentInRegister(JITAssembler *as, Address addressOfValue, RegisterID dest, int argumentNumber) + { + Q_UNUSED(argumentNumber); + as->load64(addressOfValue, dest); + } + static void loadArgumentInRegister(JITAssembler *as, IR::Temp* temp, RegisterID dest, int argumentNumber) { Q_UNUSED(argumentNumber); @@ -625,6 +648,17 @@ struct RegisterSizeDependentAssembler<JITAssembler, MacroAssembler, TargetPlatfo return as->branch32(RelationalCondition::NotEqual, tagOrValueRegister, TrustedImm32(0)); } + + static void initializeLocalVariables(JITAssembler *as, int localsCount) + { + as->move(TrustedImm64(0), TargetPlatform::ReturnValueRegister); + as->move(TrustedImm32(localsCount), TargetPlatform::ScratchRegister); + Label loop = as->label(); + as->store64(TargetPlatform::ReturnValueRegister, Address(TargetPlatform::LocalsRegister)); + as->add64(TrustedImm32(8), TargetPlatform::LocalsRegister); + Jump jump = as->branchSub32(ResultCondition::NonZero, TrustedImm32(1), TargetPlatform::ScratchRegister); + jump.linkTo(loop, as); + } }; template <typename TargetConfiguration> @@ -1338,9 +1372,8 @@ public: if (prepareCall(function)) loadArgumentOnStackOrRegister<0>(arg1); -#ifdef RESTORE_EBX_ON_CALL - load32(this->ebxAddressOnStack(), JSC::X86Registers::ebx); // restore the GOT ptr -#endif + if (JITTargetPlatform::gotRegister != -1) + load32(Address(JITTargetPlatform::FramePointerRegister, JITTargetPlatform::savedGOTRegisterSlotOnStack()), static_cast<RegisterID>(JITTargetPlatform::gotRegister)); // restore the GOT ptr callAbsolute(functionName, function); @@ -1585,6 +1618,15 @@ public: void setStackLayout(int maxArgCountForBuiltins, int regularRegistersToSave, int fpRegistersToSave); const StackLayout &stackLayout() const { return *_stackLayout.data(); } + void initializeLocalVariables() + { + const int locals = _stackLayout->calculateJSStackFrameSize(); + if (locals <= 0) + return; + loadPtr(Address(JITTargetPlatform::EngineRegister, qOffsetOf(ExecutionEngine, jsStackTop)), JITTargetPlatform::LocalsRegister); + RegisterSizeDependentOps::initializeLocalVariables(this, locals); + storePtr(JITTargetPlatform::LocalsRegister, Address(JITTargetPlatform::EngineRegister, qOffsetOf(ExecutionEngine, jsStackTop))); + } Label exceptionReturnLabel; IR::BasicBlock * catchBlock; diff --git a/src/qml/jit/qv4binop.cpp b/src/qml/jit/qv4binop.cpp index 8468bf65a6..6d20a4e042 100644 --- a/src/qml/jit/qv4binop.cpp +++ b/src/qml/jit/qv4binop.cpp @@ -41,8 +41,128 @@ #if ENABLE(ASSEMBLER) -using namespace QV4; -using namespace JIT; +QT_BEGIN_NAMESPACE + +namespace QV4 { +namespace JIT { + +template <typename JITAssembler> +struct ArchitectureSpecificBinaryOperation +{ + using FPRegisterID = typename JITAssembler::FPRegisterID; + + static bool doubleAdd(JITAssembler *as, IR::Expr *lhs, IR::Expr *rhs, FPRegisterID targetReg) + { + Q_UNUSED(as); + Q_UNUSED(lhs); + Q_UNUSED(rhs); + Q_UNUSED(targetReg); + return false; + } + static bool doubleMul(JITAssembler *as, IR::Expr *lhs, IR::Expr *rhs, FPRegisterID targetReg) + { + Q_UNUSED(as); + Q_UNUSED(lhs); + Q_UNUSED(rhs); + Q_UNUSED(targetReg); + return false; + } + static bool doubleSub(JITAssembler *as, IR::Expr *lhs, IR::Expr *rhs, FPRegisterID targetReg) + { + Q_UNUSED(as); + Q_UNUSED(lhs); + Q_UNUSED(rhs); + Q_UNUSED(targetReg); + return false; + } + static bool doubleDiv(JITAssembler *as, IR::Expr *lhs, IR::Expr *rhs, FPRegisterID targetReg) + { + Q_UNUSED(as); + Q_UNUSED(lhs); + Q_UNUSED(rhs); + Q_UNUSED(targetReg); + return false; + } +}; + +#if CPU(X86) +template <> +struct ArchitectureSpecificBinaryOperation<Assembler<AssemblerTargetConfiguration<JSC::MacroAssemblerX86, NoOperatingSystemSpecialization>>> +{ + using JITAssembler = Assembler<AssemblerTargetConfiguration<JSC::MacroAssemblerX86, NoOperatingSystemSpecialization>>; + using FPRegisterID = JITAssembler::FPRegisterID; + using Address = JITAssembler::Address; + + static bool doubleAdd(JITAssembler *as, IR::Expr *lhs, IR::Expr *rhs, FPRegisterID targetReg) + { + if (IR::Const *c = rhs->asConst()) { // Y = X + constant -> Y = X; Y += [constant-address] + as->moveDouble(as->toDoubleRegister(lhs, targetReg), targetReg); + Address addr = as->loadConstant(c, JITAssembler::ScratchRegister); + as->addDouble(addr, targetReg); + return true; + } + if (IR::Temp *t = rhs->asTemp()) { // Y = X + [temp-memory-address] -> Y = X; Y += [temp-memory-address] + if (t->kind != IR::Temp::PhysicalRegister) { + as->moveDouble(as->toDoubleRegister(lhs, targetReg), targetReg); + as->addDouble(as->loadTempAddress(t), targetReg); + return true; + } + } + return false; + } + static bool doubleMul(JITAssembler *as, IR::Expr *lhs, IR::Expr *rhs, FPRegisterID targetReg) + { + if (IR::Const *c = rhs->asConst()) { // Y = X * constant -> Y = X; Y *= [constant-address] + as->moveDouble(as->toDoubleRegister(lhs, targetReg), targetReg); + Address addr = as->loadConstant(c, JITAssembler::ScratchRegister); + as->mulDouble(addr, targetReg); + return true; + } + if (IR::Temp *t = rhs->asTemp()) { // Y = X * [temp-memory-address] -> Y = X; Y *= [temp-memory-address] + if (t->kind != IR::Temp::PhysicalRegister) { + as->moveDouble(as->toDoubleRegister(lhs, targetReg), targetReg); + as->mulDouble(as->loadTempAddress(t), targetReg); + return true; + } + } + return false; + } + static bool doubleSub(JITAssembler *as, IR::Expr *lhs, IR::Expr *rhs, FPRegisterID targetReg) + { + if (IR::Const *c = rhs->asConst()) { // Y = X - constant -> Y = X; Y -= [constant-address] + as->moveDouble(as->toDoubleRegister(lhs, targetReg), targetReg); + Address addr = as->loadConstant(c, JITAssembler::ScratchRegister); + as->subDouble(addr, targetReg); + return true; + } + if (IR::Temp *t = rhs->asTemp()) { // Y = X - [temp-memory-address] -> Y = X; Y -= [temp-memory-address] + if (t->kind != IR::Temp::PhysicalRegister) { + as->moveDouble(as->toDoubleRegister(lhs, targetReg), targetReg); + as->subDouble(as->loadTempAddress(t), targetReg); + return true; + } + } + return false; + } + static bool doubleDiv(JITAssembler *as, IR::Expr *lhs, IR::Expr *rhs, FPRegisterID targetReg) + { + if (IR::Const *c = rhs->asConst()) { // Y = X / constant -> Y = X; Y /= [constant-address] + as->moveDouble(as->toDoubleRegister(lhs, targetReg), targetReg); + Address addr = as->loadConstant(c, JITAssembler::ScratchRegister); + as->divDouble(addr, targetReg); + return true; + } + if (IR::Temp *t = rhs->asTemp()) { // Y = X / [temp-memory-address] -> Y = X; Y /= [temp-memory-address] + if (t->kind != IR::Temp::PhysicalRegister) { + as->moveDouble(as->toDoubleRegister(lhs, targetReg), targetReg); + as->divDouble(as->loadTempAddress(t), targetReg); + return true; + } + } + return false; + } +}; +#endif #define OP(op) \ { "Runtime::" isel_stringIfy(op), offsetof(QV4::Runtime, op), INT_MIN, 0, 0, QV4::Runtime::Method_##op##_NeedsExceptionCheck } @@ -162,21 +282,9 @@ void Binop<JITAssembler>::doubleBinop(IR::Expr *lhs, IR::Expr *rhs, IR::Expr *ta if (lhs->asConst()) std::swap(lhs, rhs); // Y = constant + X -> Y = X + constant -#if CPU(X86) - if (IR::Const *c = rhs->asConst()) { // Y = X + constant -> Y = X; Y += [constant-address] - as->moveDouble(as->toDoubleRegister(lhs, targetReg), targetReg); - Address addr = as->loadConstant(c, JITAssembler::ScratchRegister); - as->addDouble(addr, targetReg); + if (ArchitectureSpecificBinaryOperation<JITAssembler>::doubleAdd(as, lhs, rhs, targetReg)) break; - } - if (IR::Temp *t = rhs->asTemp()) { // Y = X + [temp-memory-address] -> Y = X; Y += [temp-memory-address] - if (t->kind != IR::Temp::PhysicalRegister) { - as->moveDouble(as->toDoubleRegister(lhs, targetReg), targetReg); - as->addDouble(as->loadTempAddress(t), targetReg); - break; - } - } -#endif + as->addDouble(as->toDoubleRegister(lhs, JITAssembler::FPGpr0), as->toDoubleRegister(rhs, JITAssembler::FPGpr1), targetReg); break; @@ -184,40 +292,16 @@ void Binop<JITAssembler>::doubleBinop(IR::Expr *lhs, IR::Expr *rhs, IR::Expr *ta if (lhs->asConst()) std::swap(lhs, rhs); // Y = constant * X -> Y = X * constant -#if CPU(X86) - if (IR::Const *c = rhs->asConst()) { // Y = X * constant -> Y = X; Y *= [constant-address] - as->moveDouble(as->toDoubleRegister(lhs, targetReg), targetReg); - Address addr = as->loadConstant(c, JITAssembler::ScratchRegister); - as->mulDouble(addr, targetReg); + if (ArchitectureSpecificBinaryOperation<JITAssembler>::doubleMul(as, lhs, rhs, targetReg)) break; - } - if (IR::Temp *t = rhs->asTemp()) { // Y = X * [temp-memory-address] -> Y = X; Y *= [temp-memory-address] - if (t->kind != IR::Temp::PhysicalRegister) { - as->moveDouble(as->toDoubleRegister(lhs, targetReg), targetReg); - as->mulDouble(as->loadTempAddress(t), targetReg); - break; - } - } -#endif + as->mulDouble(as->toDoubleRegister(lhs, JITAssembler::FPGpr0), as->toDoubleRegister(rhs, JITAssembler::FPGpr1), targetReg); break; case IR::OpSub: -#if CPU(X86) - if (IR::Const *c = rhs->asConst()) { // Y = X - constant -> Y = X; Y -= [constant-address] - as->moveDouble(as->toDoubleRegister(lhs, targetReg), targetReg); - Address addr = as->loadConstant(c, JITAssembler::ScratchRegister); - as->subDouble(addr, targetReg); + if (ArchitectureSpecificBinaryOperation<JITAssembler>::doubleSub(as, lhs, rhs, targetReg)) break; - } - if (IR::Temp *t = rhs->asTemp()) { // Y = X - [temp-memory-address] -> Y = X; Y -= [temp-memory-address] - if (t->kind != IR::Temp::PhysicalRegister) { - as->moveDouble(as->toDoubleRegister(lhs, targetReg), targetReg); - as->subDouble(as->loadTempAddress(t), targetReg); - break; - } - } -#endif + if (rhs->asTemp() && rhs->asTemp()->kind == IR::Temp::PhysicalRegister && targetTemp && targetTemp->kind == IR::Temp::PhysicalRegister @@ -231,21 +315,8 @@ void Binop<JITAssembler>::doubleBinop(IR::Expr *lhs, IR::Expr *rhs, IR::Expr *ta break; case IR::OpDiv: -#if CPU(X86) - if (IR::Const *c = rhs->asConst()) { // Y = X / constant -> Y = X; Y /= [constant-address] - as->moveDouble(as->toDoubleRegister(lhs, targetReg), targetReg); - Address addr = as->loadConstant(c, JITAssembler::ScratchRegister); - as->divDouble(addr, targetReg); + if (ArchitectureSpecificBinaryOperation<JITAssembler>::doubleDiv(as, lhs, rhs, targetReg)) break; - } - if (IR::Temp *t = rhs->asTemp()) { // Y = X / [temp-memory-address] -> Y = X; Y /= [temp-memory-address] - if (t->kind != IR::Temp::PhysicalRegister) { - as->moveDouble(as->toDoubleRegister(lhs, targetReg), targetReg); - as->divDouble(as->loadTempAddress(t), targetReg); - break; - } - } -#endif if (rhs->asTemp() && rhs->asTemp()->kind == IR::Temp::PhysicalRegister && targetTemp @@ -577,8 +648,19 @@ typename JITAssembler::Jump Binop<JITAssembler>::genInlineBinop(IR::Expr *leftSo } template struct QV4::JIT::Binop<QV4::JIT::Assembler<DefaultAssemblerTargetConfiguration>>; -#if defined(V4_BOOTSTRAP) && CPU(X86_64) +#if defined(V4_BOOTSTRAP) +#if !CPU(ARM_THUMB2) template struct QV4::JIT::Binop<QV4::JIT::Assembler<AssemblerTargetConfiguration<JSC::MacroAssemblerARMv7, NoOperatingSystemSpecialization>>>; #endif +#if !CPU(ARM64) +template struct QV4::JIT::Binop<QV4::JIT::Assembler<AssemblerTargetConfiguration<JSC::MacroAssemblerARM64, NoOperatingSystemSpecialization>>>; +#endif +#endif + +} // end of namespace JIT +} // end of namespace QV4 + +QT_END_NAMESPACE + #endif diff --git a/src/qml/jit/qv4isel_masm.cpp b/src/qml/jit/qv4isel_masm.cpp index b1134d2bec..69d6951bb9 100644 --- a/src/qml/jit/qv4isel_masm.cpp +++ b/src/qml/jit/qv4isel_masm.cpp @@ -114,37 +114,12 @@ void InstructionSelection<JITAssembler>::run(int functionIndex) fpRegistersToSave.size()); _as->enterStandardStackFrame(regularRegistersToSave, fpRegistersToSave); -#ifdef ARGUMENTS_IN_REGISTERS - _as->move(_as->registerForArgument(0), JITTargetPlatform::EngineRegister); -#else - _as->loadPtr(addressForArgument(0), JITTargetPlatform::EngineRegister); -#endif - - const int locals = _as->stackLayout().calculateJSStackFrameSize(); - if (locals > 0) { - _as->loadPtr(Address(JITTargetPlatform::EngineRegister, qOffsetOf(ExecutionEngine, jsStackTop)), JITTargetPlatform::LocalsRegister); -#ifdef VALUE_FITS_IN_REGISTER - _as->move(TrustedImm64(0), JITTargetPlatform::ReturnValueRegister); - _as->move(TrustedImm32(locals), JITTargetPlatform::ScratchRegister); - Label loop = _as->label(); - _as->store64(JITTargetPlatform::ReturnValueRegister, Address(JITTargetPlatform::LocalsRegister)); - _as->add64(TrustedImm32(8), JITTargetPlatform::LocalsRegister); - Jump jump = _as->branchSub32(ResultCondition::NonZero, TrustedImm32(1), JITTargetPlatform::ScratchRegister); - jump.linkTo(loop, _as); -#else - _as->move(TrustedImm32(0), JITTargetPlatform::ReturnValueRegister); - _as->move(TrustedImm32(locals), JITTargetPlatform::ScratchRegister); - Label loop = _as->label(); - _as->store32(JITTargetPlatform::ReturnValueRegister, Address(JITTargetPlatform::LocalsRegister)); - _as->add32(TrustedImm32(4), JITTargetPlatform::LocalsRegister); - _as->store32(JITTargetPlatform::ReturnValueRegister, Address(JITTargetPlatform::LocalsRegister)); - _as->add32(TrustedImm32(4), JITTargetPlatform::LocalsRegister); - Jump jump = _as->branchSub32(ResultCondition::NonZero, TrustedImm32(1), JITTargetPlatform::ScratchRegister); - jump.linkTo(loop, _as); -#endif - _as->storePtr(JITTargetPlatform::LocalsRegister, Address(JITTargetPlatform::EngineRegister, qOffsetOf(ExecutionEngine, jsStackTop))); - } + if (JITTargetPlatform::RegisterArgumentCount > 0) + _as->move(_as->registerForArgument(0), JITTargetPlatform::EngineRegister); + else + _as->loadPtr(addressForArgument(0), JITTargetPlatform::EngineRegister); + _as->initializeLocalVariables(); int lastLine = 0; for (int i = 0, ei = _function->basicBlockCount(); i != ei; ++i) { @@ -474,13 +449,7 @@ void InstructionSelection<JITAssembler>::loadThisObject(IR::Expr *temp) { _as->loadPtr(Address(JITTargetPlatform::EngineRegister, qOffsetOf(QV4::ExecutionEngine, current)), JITTargetPlatform::ScratchRegister); _as->loadPtr(Address(JITTargetPlatform::ScratchRegister, qOffsetOf(ExecutionContext::Data, callData)), JITTargetPlatform::ScratchRegister); -#if defined(VALUE_FITS_IN_REGISTER) - _as->load64(Pointer(JITTargetPlatform::ScratchRegister, qOffsetOf(CallData, thisObject)), - JITTargetPlatform::ReturnValueRegister); - _as->storeReturnValue(temp); -#else - _as->copyValue(temp, Pointer(JITTargetPlatform::ScratchRegister, qOffsetOf(CallData, thisObject))); -#endif + _as->copyValue(temp, Address(JITTargetPlatform::ScratchRegister, qOffsetOf(CallData, thisObject))); } template <typename JITAssembler> @@ -1374,12 +1343,10 @@ void InstructionSelection<JITAssembler>::calculateRegistersToSave(const Register fpRegistersToSave.clear(); for (const RegisterInfo &ri : JITTargetPlatform::getRegisterInfo()) { -#if defined(RESTORE_EBX_ON_CALL) - if (ri.isRegularRegister() && ri.reg<JSC::X86Registers::RegisterID>() == JSC::X86Registers::ebx) { + if (JITTargetPlatform::gotRegister != -1 && ri.isRegularRegister() && ri.reg<RegisterID>() == JITTargetPlatform::gotRegister) { regularRegistersToSave.append(ri); continue; } -#endif // RESTORE_EBX_ON_CALL if (ri.isCallerSaved()) continue; if (ri.isRegularRegister()) { @@ -1666,20 +1633,21 @@ QT_BEGIN_NAMESPACE namespace QV4 { namespace JIT { template class Q_QML_EXPORT InstructionSelection<>; template class Q_QML_EXPORT ISelFactory<>; -#if defined(V4_BOOTSTRAP) && CPU(X86_64) +#if defined(V4_BOOTSTRAP) Q_QML_EXPORT QV4::EvalISelFactory *createISelForArchitecture(const QString &architecture) { using ARMv7CrossAssembler = QV4::JIT::Assembler<AssemblerTargetConfiguration<JSC::MacroAssemblerARMv7, NoOperatingSystemSpecialization>>; + using ARM64CrossAssembler = QV4::JIT::Assembler<AssemblerTargetConfiguration<JSC::MacroAssemblerARM64, NoOperatingSystemSpecialization>>; if (architecture == QLatin1String("armv7")) return new ISelFactory<ARMv7CrossAssembler>; + else if (architecture == QLatin1String("armv8")) + return new ISelFactory<ARM64CrossAssembler>; QString hostArch; #if CPU(ARM_THUMB2) hostArch = QStringLiteral("armv7"); -#elif CPU(ARM64) - hostArch = QStringLiteral("armv8"); #elif CPU(MIPS) hostArch = QStringLiteral("mips"); #elif CPU(X86) diff --git a/src/qml/jit/qv4isel_masm_p.h b/src/qml/jit/qv4isel_masm_p.h index 6ae50c3260..5c046cb397 100644 --- a/src/qml/jit/qv4isel_masm_p.h +++ b/src/qml/jit/qv4isel_masm_p.h @@ -155,7 +155,6 @@ protected: using JITTargetPlatform = typename JITAssembler::JITTargetPlatform; -#if !defined(ARGUMENTS_IN_REGISTERS) Address addressForArgument(int index) const { // FramePointerRegister points to its old value on the stack, and above @@ -163,7 +162,6 @@ protected: // values before reaching the first argument. return Address(JITTargetPlatform::FramePointerRegister, (index + 2) * sizeof(void*)); } -#endif Pointer baseAddressForCallArguments() { diff --git a/src/qml/jit/qv4targetplatform_p.h b/src/qml/jit/qv4targetplatform_p.h index 1c29aa2a70..725567fa05 100644 --- a/src/qml/jit/qv4targetplatform_p.h +++ b/src/qml/jit/qv4targetplatform_p.h @@ -97,6 +97,7 @@ public: using PlatformAssembler = JSC::MacroAssemblerX86; using RegisterID = PlatformAssembler::RegisterID; using FPRegisterID = PlatformAssembler::FPRegisterID; + using TrustedImm32 = PlatformAssembler::TrustedImm32; enum { RegAllocIsSupported = 1 }; @@ -130,10 +131,8 @@ public: } # define HAVE_ALU_OPS_WITH_MEM_OPERAND 1 -# undef VALUE_FITS_IN_REGISTER static const int RegisterSize = 4; -# undef ARGUMENTS_IN_REGISTERS static const int RegisterArgumentCount = 0; static RegisterID registerForArgument(int) { Q_UNREACHABLE(); } @@ -141,15 +140,18 @@ public: static const int StackShadowSpace = 0; static const int StackSpaceAllocatedUponFunctionEntry = RegisterSize; // Return address is pushed onto stack by the CPU. static void platformEnterStandardStackFrame(PlatformAssembler *as) { as->push(FramePointerRegister); } - static void platformLeaveStandardStackFrame(PlatformAssembler *as) { as->pop(FramePointerRegister); } + static void platformLeaveStandardStackFrame(PlatformAssembler *as, int frameSize) + { + if (frameSize > 0) + as->add32(TrustedImm32(frameSize), StackPointerRegister); + as->pop(FramePointerRegister); + } #if OS(WINDOWS) || OS(QNX) || \ ((OS(LINUX) || OS(FREEBSD)) && (defined(__PIC__) || defined(__PIE__))) -#define RESTORE_EBX_ON_CALL - using Address = PlatformAssembler::Address; - static Address ebxAddressOnStack() - { + static const int gotRegister = JSC::X86Registers::ebx; + static int savedGOTRegisterSlotOnStack() { static int ebxIdx = -1; if (ebxIdx == -1) { int calleeSaves = 0; @@ -165,8 +167,11 @@ public: Q_ASSERT(ebxIdx >= 0); ebxIdx += 1; } - return Address(FramePointerRegister, ebxIdx * -int(sizeof(void*))); + return ebxIdx * -int(sizeof(void*)); } +#else + static const int gotRegister = -1; + static int savedGOTRegisterSlotOnStack() { return -1; } #endif }; #endif // x86 @@ -179,6 +184,7 @@ public: using PlatformAssembler = JSC::MacroAssemblerX86_64; using RegisterID = PlatformAssembler::RegisterID; using FPRegisterID = PlatformAssembler::FPRegisterID; + using TrustedImm32 = PlatformAssembler::TrustedImm32; enum { RegAllocIsSupported = 1 }; @@ -216,10 +222,8 @@ public: } #define HAVE_ALU_OPS_WITH_MEM_OPERAND 1 -#define VALUE_FITS_IN_REGISTER static const int RegisterSize = 8; -#define ARGUMENTS_IN_REGISTERS static const int RegisterArgumentCount = 6; static RegisterID registerForArgument(int index) { @@ -239,7 +243,15 @@ public: static const int StackShadowSpace = 0; static const int StackSpaceAllocatedUponFunctionEntry = RegisterSize; // Return address is pushed onto stack by the CPU. static void platformEnterStandardStackFrame(PlatformAssembler *as) { as->push(FramePointerRegister); } - static void platformLeaveStandardStackFrame(PlatformAssembler *as) { as->pop(FramePointerRegister); } + static void platformLeaveStandardStackFrame(PlatformAssembler *as, int frameSize) + { + if (frameSize > 0) + as->add64(TrustedImm32(frameSize), StackPointerRegister); + as->pop(FramePointerRegister); + } + + static const int gotRegister = -1; + static int savedGOTRegisterSlotOnStack() { return -1; } }; #endif // Linux/MacOS on x86_64 @@ -251,6 +263,7 @@ public: using PlatformAssembler = JSC::MacroAssemblerX86_64; using RegisterID = PlatformAssembler::RegisterID; using FPRegisterID = PlatformAssembler::FPRegisterID; + using TrustedImm32 = PlatformAssembler::TrustedImm32; // Register allocation is not (yet) supported on win64, because the ABI related stack handling // is not completely implemented. Specifically, the saving of xmm registers, and the saving of @@ -285,10 +298,8 @@ public: } #define HAVE_ALU_OPS_WITH_MEM_OPERAND 1 -#define VALUE_FITS_IN_REGISTER static const int RegisterSize = 8; -#define ARGUMENTS_IN_REGISTERS static const int RegisterArgumentCount = 4; static RegisterID registerForArgument(int index) { @@ -306,7 +317,15 @@ public: static const int StackShadowSpace = 32; static const int StackSpaceAllocatedUponFunctionEntry = RegisterSize; // Return address is pushed onto stack by the CPU. static void platformEnterStandardStackFrame(PlatformAssembler *as) { as->push(FramePointerRegister); } - static void platformLeaveStandardStackFrame(PlatformAssembler *as) { as->pop(FramePointerRegister); } + static void platformLeaveStandardStackFrame(PlatformAssembler *as, int frameSize) + { + if (frameSize > 0) + as->add64(TrustedImm32(frameSize), StackPointerRegister); + as->pop(FramePointerRegister); + } + + static const int gotRegister = -1; + static int savedGOTRegisterSlotOnStack() { return -1; } }; #endif // Windows on x86_64 @@ -318,6 +337,7 @@ public: using PlatformAssembler = JSC::MacroAssemblerARMv7; using RegisterID = PlatformAssembler::RegisterID; using FPRegisterID = PlatformAssembler::FPRegisterID; + using TrustedImm32 = PlatformAssembler::TrustedImm32; enum { RegAllocIsSupported = 1 }; @@ -389,10 +409,8 @@ public: } #undef HAVE_ALU_OPS_WITH_MEM_OPERAND -#undef VALUE_FITS_IN_REGISTER static const int RegisterSize = 4; -#define ARGUMENTS_IN_REGISTERS static const int RegisterArgumentCount = 4; static RegisterID registerForArgument(int index) { @@ -417,15 +435,24 @@ public: as->push(FramePointerRegister); } - static void platformLeaveStandardStackFrame(PlatformAssembler *as) + static void platformLeaveStandardStackFrame(PlatformAssembler *as, int frameSize) { + if (frameSize > 0) { + // Work around bug in ARMv7Assembler.h where add32(imm, sp, sp) doesn't + // work well for large immediates. + as->move(TrustedImm32(frameSize), JSC::ARMRegisters::r3); + as->add32(JSC::ARMRegisters::r3, StackPointerRegister); + } as->pop(FramePointerRegister); as->pop(JSC::ARMRegisters::lr); } + + static const int gotRegister = -1; + static int savedGOTRegisterSlotOnStack() { return -1; } }; #endif // ARM (32 bit) -#if CPU(ARM64) +#if CPU(ARM64) || defined(V4_BOOTSTRAP) template <> class TargetPlatform<JSC::MacroAssemblerARM64, NoOperatingSystemSpecialization> { @@ -433,6 +460,7 @@ public: using PlatformAssembler = JSC::MacroAssemblerARM64; using RegisterID = PlatformAssembler::RegisterID; using FPRegisterID = PlatformAssembler::FPRegisterID; + using TrustedImm32 = PlatformAssembler::TrustedImm32; enum { RegAllocIsSupported = 1 }; @@ -510,10 +538,8 @@ public: } #undef HAVE_ALU_OPS_WITH_MEM_OPERAND -#define VALUE_FITS_IN_REGISTER static const int RegisterSize = 8; -#define ARGUMENTS_IN_REGISTERS static const int RegisterArgumentCount = 8; static RegisterID registerForArgument(int index) { @@ -541,10 +567,15 @@ public: as->pushPair(FramePointerRegister, JSC::ARM64Registers::lr); } - static void platformLeaveStandardStackFrame(PlatformAssembler *as) + static void platformLeaveStandardStackFrame(PlatformAssembler *as, int frameSize) { + if (frameSize > 0) + as->add64(TrustedImm32(frameSize), StackPointerRegister); as->popPair(FramePointerRegister, JSC::ARM64Registers::lr); } + + static const int gotRegister = -1; + static int savedGOTRegisterSlotOnStack() { return -1; } }; #endif // ARM64 @@ -556,6 +587,7 @@ public: using PlatformAssembler = JSC::MacroAssemblerMIPS; using RegisterID = PlatformAssembler::RegisterID; using FPRegisterID = PlatformAssembler::FPRegisterID; + using TrustedImm32 = PlatformAssembler::TrustedImm32; enum { RegAllocIsSupported = 1 }; static const RegisterID FramePointerRegister = JSC::MIPSRegisters::fp; @@ -598,10 +630,8 @@ public: } #undef HAVE_ALU_OPS_WITH_MEM_OPERAND -#undef VALUE_FITS_IN_REGISTER static const int RegisterSize = 4; -#define ARGUMENTS_IN_REGISTERS static const int RegisterArgumentCount = 4; static RegisterID registerForArgument(int index) { @@ -626,11 +656,17 @@ public: as->push(FramePointerRegister); } - static void platformLeaveStandardStackFrame(PlatformAssembler *as) + static void platformLeaveStandardStackFrame(PlatformAssembler *as, int frameSize) { + if (frameSize > 0) + as->add32(TrustedImm32(frameSize), StackPointerRegister); as->pop(FramePointerRegister); as->pop(JSC::MIPSRegisters::ra); } + + + static const int gotRegister = -1; + static int savedGOTRegisterSlotOnStack() { return -1; } }; #endif // Linux on MIPS (32 bit) diff --git a/src/qml/jit/qv4unop.cpp b/src/qml/jit/qv4unop.cpp index 31355e5dce..76c6457d67 100644 --- a/src/qml/jit/qv4unop.cpp +++ b/src/qml/jit/qv4unop.cpp @@ -145,8 +145,13 @@ void Unop<JITAssembler>::generateCompl(IR::Expr *source, IR::Expr *target) } template struct QV4::JIT::Unop<QV4::JIT::Assembler<DefaultAssemblerTargetConfiguration>>; -#if defined(V4_BOOTSTRAP) && CPU(X86_64) +#if defined(V4_BOOTSTRAP) +#if !CPU(ARM_THUMB2) template struct QV4::JIT::Unop<QV4::JIT::Assembler<AssemblerTargetConfiguration<JSC::MacroAssemblerARMv7, NoOperatingSystemSpecialization>>>; #endif +#if !CPU(ARM64) +template struct QV4::JIT::Unop<QV4::JIT::Assembler<AssemblerTargetConfiguration<JSC::MacroAssemblerARM64, NoOperatingSystemSpecialization>>>; +#endif +#endif #endif diff --git a/src/qml/jsruntime/qv4object.cpp b/src/qml/jsruntime/qv4object.cpp index b95bfb85a7..dd3bbccde3 100644 --- a/src/qml/jsruntime/qv4object.cpp +++ b/src/qml/jsruntime/qv4object.cpp @@ -61,7 +61,8 @@ DEFINE_OBJECT_VTABLE(Object); void Object::setInternalClass(InternalClass *ic) { d()->internalClass = ic; - if ((!d()->memberData && ic->size) || (d()->memberData->size < ic->size)) + bool hasMD = d()->memberData != nullptr; + if ((!hasMD && ic->size) || (hasMD && d()->memberData->size < ic->size)) d()->memberData = MemberData::allocate(ic->engine, ic->size, d()->memberData); } diff --git a/src/qml/jsruntime/qv4runtime.cpp b/src/qml/jsruntime/qv4runtime.cpp index 7f184f8221..6590054bf3 100644 --- a/src/qml/jsruntime/qv4runtime.cpp +++ b/src/qml/jsruntime/qv4runtime.cpp @@ -1264,12 +1264,16 @@ ReturnedValue Runtime::method_unwindException(ExecutionEngine *engine) */ void Runtime::method_pushWithScope(const Value &o, NoThrowEngine *engine) { - engine->pushContext(engine->currentContext->newWithContext(o.toObject(engine))); + QV4::Value *v = engine->jsAlloca(1); + Heap::Object *withObject = o.toObject(engine); + *v = withObject; + engine->pushContext(engine->currentContext->newWithContext(withObject)); Q_ASSERT(engine->jsStackTop == engine->currentContext + 2); } void Runtime::method_pushCatchScope(NoThrowEngine *engine, int exceptionVarNameIndex) { + engine->jsAlloca(1); // keep this symmetric with pushWithScope ExecutionContext *c = engine->currentContext; engine->pushContext(c->newCatchContext(c->d()->compilationUnit->runtimeStrings[exceptionVarNameIndex], engine->catchException(0))); Q_ASSERT(engine->jsStackTop == engine->currentContext + 2); @@ -1279,7 +1283,7 @@ void Runtime::method_popScope(NoThrowEngine *engine) { Q_ASSERT(engine->jsStackTop == engine->currentContext + 2); engine->popContext(); - engine->jsStackTop -= 2; + engine->jsStackTop -= 3; } void Runtime::method_declareVar(ExecutionEngine *engine, bool deletable, int nameIndex) diff --git a/src/qml/memory/qv4mm.cpp b/src/qml/memory/qv4mm.cpp index 6330ef6038..1609dd5adb 100644 --- a/src/qml/memory/qv4mm.cpp +++ b/src/qml/memory/qv4mm.cpp @@ -328,40 +328,65 @@ void Chunk::freeAll() void Chunk::sortIntoBins(HeapItem **bins, uint nBins) { +// qDebug() << "sortIntoBins:"; HeapItem *base = realBase(); #if QT_POINTER_SIZE == 8 const int start = 0; #else const int start = 1; #endif +#ifndef QT_NO_DEBUG + uint freeSlots = 0; + uint allocatedSlots = 0; +#endif for (int i = start; i < EntriesInBitmap; ++i) { quintptr usedSlots = (objectBitmap[i]|extendsBitmap[i]); #if QT_POINTER_SIZE == 8 if (!i) usedSlots |= (static_cast<quintptr>(1) << (HeaderSize/SlotSize)) - 1; #endif - uint index = qCountTrailingZeroBits(usedSlots + 1); - if (index == Bits) - continue; - uint freeStart = i*Bits + index; - usedSlots &= ~((static_cast<quintptr>(1) << index) - 1); - while (i < EntriesInBitmap && !usedSlots) { - ++i; - usedSlots = (objectBitmap[i]|extendsBitmap[i]); +#ifndef QT_NO_DEBUG + allocatedSlots += qPopulationCount(usedSlots); +// qDebug() << hex << " i=" << i << "used=" << usedSlots; +#endif + while (1) { + uint index = qCountTrailingZeroBits(usedSlots + 1); + if (index == Bits) + break; + uint freeStart = i*Bits + index; + usedSlots &= ~((static_cast<quintptr>(1) << index) - 1); + while (!usedSlots) { + ++i; + if (i == EntriesInBitmap) { + usedSlots = (quintptr)-1; + break; + } + usedSlots = (objectBitmap[i]|extendsBitmap[i]); +#ifndef QT_NO_DEBUG + allocatedSlots += qPopulationCount(usedSlots); +// qDebug() << hex << " i=" << i << "used=" << usedSlots; +#endif + } + HeapItem *freeItem = base + freeStart; + + index = qCountTrailingZeroBits(usedSlots); + usedSlots |= (quintptr(1) << index) - 1; + uint freeEnd = i*Bits + index; + uint nSlots = freeEnd - freeStart; +#ifndef QT_NO_DEBUG +// qDebug() << hex << " got free slots from" << freeStart << "to" << freeEnd << "n=" << nSlots << "usedSlots=" << usedSlots; + freeSlots += nSlots; +#endif + Q_ASSERT(freeEnd > freeStart && freeEnd <= NumSlots); + freeItem->freeData.availableSlots = nSlots; + uint bin = qMin(nBins - 1, nSlots); + freeItem->freeData.next = bins[bin]; + bins[bin] = freeItem; } - if (i == EntriesInBitmap) - usedSlots = 1; - HeapItem *freeItem = base + freeStart; - - uint freeEnd = i*Bits + qCountTrailingZeroBits(usedSlots); - uint nSlots = freeEnd - freeStart; - Q_ASSERT(freeEnd > freeStart && freeEnd <= NumSlots); - freeItem->freeData.availableSlots = nSlots; - uint bin = qMin(nBins - 1, nSlots); - freeItem->freeData.next = bins[bin]; - bins[bin] = freeItem; - // DEBUG << "binnig item" << freeItem << nSlots << bin << freeItem->freeData.availableSlots; } +#ifndef QT_NO_DEBUG + Q_ASSERT(freeSlots + allocatedSlots == EntriesInBitmap*8*sizeof(quintptr)); +#endif } @@ -427,28 +452,7 @@ HeapItem *BlockAllocator::allocate(size_t size, bool forceAllocation) { goto done; } } -#if 0 - for (uint b = bin + 1; b < NumBins - 1; ++b) { - if ((m = freeBins[b])) { - Q_ASSERT(binForSlots(m->freeData.availableSlots) == b); - freeBins[b] = m->freeData.next; - // DEBUG << "looking for empty bin" << bin << "size" << size << "found" << b; - uint remainingSlots = m->freeData.availableSlots - slotsRequired; - // DEBUG << "found free slots of size" << m->freeData.availableSlots << m << "remaining" << remainingSlots; - if (remainingSlots < 2) { - // avoid too much fragmentation and rather mark the memory as used - size += remainingSlots*Chunk::SlotSize; - goto done; - } - HeapItem *remainder = m + slotsRequired; - remainder->freeData.availableSlots = remainingSlots; - uint binForRemainder = binForSlots(remainingSlots); - remainder->freeData.next = freeBins[binForRemainder]; - freeBins[binForRemainder] = remainder; - goto done; - } - } -#endif + if (nFree >= slotsRequired) { // use bump allocation Q_ASSERT(nextFree); @@ -467,13 +471,11 @@ HeapItem *BlockAllocator::allocate(size_t size, bool forceAllocation) { size_t remainingSlots = m->freeData.availableSlots - slotsRequired; // DEBUG << "found large free slots of size" << m->freeData.availableSlots << m << "remaining" << remainingSlots; - if (remainingSlots < 2) { - // avoid too much fragmentation and rather mark the memory as used - size += remainingSlots*Chunk::SlotSize; + if (remainingSlots == 0) goto done; - } + HeapItem *remainder = m + slotsRequired; - if (remainingSlots >= 2*NumBins) { + if (remainingSlots > nFree) { if (nFree) { size_t bin = binForSlots(nFree); nextFree->freeData.next = freeBins[bin]; @@ -493,6 +495,24 @@ HeapItem *BlockAllocator::allocate(size_t size, bool forceAllocation) { last = &m->freeData.next; } + if (slotsRequired < NumBins - 1) { + // check if we can split up another slot + for (size_t i = slotsRequired + 1; i < NumBins - 1; ++i) { + m = freeBins[i]; + if (m) { + freeBins[i] = m->freeData.next; // take it out of the list +// qDebug() << "got item" << slotsRequired << "from slot" << i; + size_t remainingSlots = i - slotsRequired; + Q_ASSERT(remainingSlots < NumBins - 1); + HeapItem *remainder = m + slotsRequired; + remainder->freeData.availableSlots = remainingSlots; + remainder->freeData.next = freeBins[remainingSlots]; + freeBins[remainingSlots] = remainder; + goto done; + } + } + } + if (!m) { if (!forceAllocation) return 0; @@ -622,14 +642,24 @@ MemoryManager::MemoryManager(ExecutionEngine *engine) #endif } +#ifndef QT_NO_DEBUG +static int lastAllocRequestedSlots = 0; +#endif + Heap::Base *MemoryManager::allocString(std::size_t unmanagedSize) { - if (aggressiveGC) + const size_t stringSize = align(sizeof(Heap::String)); +#ifndef QT_NO_DEBUG + lastAllocRequestedSlots = stringSize >> Chunk::SlotSizeShift; +#endif + + bool didGCRun = false; + if (aggressiveGC) { runGC(); + didGCRun = true; + } - const size_t stringSize = align(sizeof(Heap::String)); unmanagedHeapSize += unmanagedSize; - bool didGCRun = false; if (unmanagedHeapSize > unmanagedHeapSizeGCLimit) { runGC(); @@ -655,8 +685,15 @@ Heap::Base *MemoryManager::allocString(std::size_t unmanagedSize) Heap::Base *MemoryManager::allocData(std::size_t size) { - if (aggressiveGC) +#ifndef QT_NO_DEBUG + lastAllocRequestedSlots = size >> Chunk::SlotSizeShift; +#endif + + bool didRunGC = false; + if (aggressiveGC) { runGC(); + didRunGC = true; + } #ifdef DETAILED_MM_STATS willAllocate(size); #endif // DETAILED_MM_STATS @@ -671,7 +708,7 @@ Heap::Base *MemoryManager::allocData(std::size_t size) HeapItem *m = blockAllocator.allocate(size); if (!m) { - if (shouldRunGC()) + if (!didRunGC && shouldRunGC()) runGC(); m = blockAllocator.allocate(size, true); } @@ -817,6 +854,26 @@ bool MemoryManager::shouldRunGC() const return false; } +size_t dumpBins(BlockAllocator *b, bool printOutput = true) +{ + size_t totalFragmentedSlots = 0; + if (printOutput) + qDebug() << "Fragmentation map:"; + for (uint i = 0; i < BlockAllocator::NumBins; ++i) { + uint nEntries = 0; + HeapItem *h = b->freeBins[i]; + while (h) { + ++nEntries; + totalFragmentedSlots += h->freeData.availableSlots; + h = h->freeData.next; + } + if (printOutput) + qDebug() << " number of entries in slot" << i << ":" << nEntries; + } + if (printOutput) + qDebug() << " total mem in bins" << totalFragmentedSlots*Chunk::SlotSize; + return totalFragmentedSlots*Chunk::SlotSize; +} void MemoryManager::runGC() { @@ -833,31 +890,56 @@ void MemoryManager::runGC() sweep(); // DEBUG << "RUN GC: allocated:" << allocator.allocatedMem() << "used before" << oldUsed << "used now" << allocator.usedMem(); } else { + bool triggeredByUnmanagedHeap = (unmanagedHeapSize > unmanagedHeapSizeGCLimit); + size_t oldUnmanagedSize = unmanagedHeapSize; const size_t totalMem = getAllocatedMem(); + const size_t usedBefore = getUsedMem(); + const size_t largeItemsBefore = getLargeItemsMem(); + + qDebug() << "========== GC =========="; +#ifndef QT_NO_DEBUG + qDebug() << " Triggered by alloc request of" << lastAllocRequestedSlots << "slots."; +#endif + qDebug() << "Allocated" << totalMem << "bytes in" << blockAllocator.chunks.size() << "chunks"; + qDebug() << "Fragmented memory before GC" << (totalMem - usedBefore); + dumpBins(&blockAllocator); QElapsedTimer t; t.start(); mark(); qint64 markTime = t.restart(); - const size_t usedBefore = getUsedMem(); - const size_t largeItemsBefore = getLargeItemsMem(); sweep(); const size_t usedAfter = getUsedMem(); const size_t largeItemsAfter = getLargeItemsMem(); qint64 sweepTime = t.elapsed(); - qDebug() << "========== GC =========="; + if (triggeredByUnmanagedHeap) { + qDebug() << "triggered by unmanaged heap:"; + qDebug() << " old unmanaged heap size:" << oldUnmanagedSize; + qDebug() << " new unmanaged heap:" << unmanagedHeapSize; + qDebug() << " unmanaged heap limit:" << unmanagedHeapSizeGCLimit; + } + size_t memInBins = dumpBins(&blockAllocator); qDebug() << "Marked object in" << markTime << "ms."; qDebug() << "Sweeped object in" << sweepTime << "ms."; - qDebug() << "Allocated" << totalMem << "bytes"; qDebug() << "Used memory before GC:" << usedBefore; qDebug() << "Used memory after GC:" << usedAfter; qDebug() << "Freed up bytes:" << (usedBefore - usedAfter); - qDebug() << "Large item memory before GC:" << largeItemsBefore; - qDebug() << "Large item memory after GC:" << largeItemsAfter; - qDebug() << "Large item memory freed up:" << (largeItemsBefore - largeItemsAfter); + size_t lost = blockAllocator.allocatedMem() - memInBins - usedAfter; + if (lost) + qDebug() << "!!!!!!!!!!!!!!!!!!!!! LOST MEM:" << lost << "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"; + if (largeItemsBefore || largeItemsAfter) { + qDebug() << "Large item memory before GC:" << largeItemsBefore; + qDebug() << "Large item memory after GC:" << largeItemsAfter; + qDebug() << "Large item memory freed up:" << (largeItemsBefore - largeItemsAfter); + } qDebug() << "======== End GC ========"; } + + if (aggressiveGC) { + // ensure we don't 'loose' any memory + Q_ASSERT(blockAllocator.allocatedMem() == getUsedMem() + dumpBins(&blockAllocator, false)); + } } size_t MemoryManager::getUsedMem() const diff --git a/src/qml/qml/qqmlengine.cpp b/src/qml/qml/qqmlengine.cpp index e1fa97b52f..9e6d046485 100644 --- a/src/qml/qml/qqmlengine.cpp +++ b/src/qml/qml/qqmlengine.cpp @@ -65,6 +65,7 @@ #include <QtCore/qmetaobject.h> #include <QDebug> #include <QtCore/qcoreapplication.h> +#include <QtCore/qcryptographichash.h> #include <QtCore/qdir.h> #include <QtCore/qmutex.h> #include <QtCore/qthread.h> @@ -564,7 +565,7 @@ The following functions are also on the Qt object. \l{Screen} attached object. In practice the array corresponds to the screen list returned by QGuiApplication::screens(). In addition to examining properties like name, width, height, etc., the array elements can also be - assigned to the targetScreen property of Window items, thus serving as an + assigned to the screen property of Window items, thus serving as an alternative to the C++ side's QWindow::setScreen(). This property has been added in Qt 5.9. @@ -585,7 +586,7 @@ The following functions are also on the Qt object. \li application.font \endlist - \sa Screen, Window, Window.targetScreen + \sa Screen, Window, Window.screen */ /*! @@ -2178,6 +2179,27 @@ QString QQmlEngine::offlineStoragePath() const return d->offlineStoragePath; } +/*! + Returns the file path where a \l{QtQuick.LocalStorage}{Local Storage} + database with the identifier \a databaseName is (or would be) located. + + \sa LocalStorage.openDatabaseSync() + \since 5.9 +*/ +QString QQmlEngine::offlineStorageDatabaseFilePath(const QString &databaseName) const +{ + Q_D(const QQmlEngine); + QCryptographicHash md5(QCryptographicHash::Md5); + md5.addData(databaseName.toUtf8()); + return d->offlineStorageDatabaseDirectory() + QLatin1String(md5.result().toHex()); +} + +QString QQmlEnginePrivate::offlineStorageDatabaseDirectory() const +{ + Q_Q(const QQmlEngine); + return q->offlineStoragePath() + QDir::separator() + QLatin1String("Databases") + QDir::separator(); +} + QQmlPropertyCache *QQmlEnginePrivate::createCache(QQmlType *type, int minorVersion) { QList<QQmlType *> types; diff --git a/src/qml/qml/qqmlengine.h b/src/qml/qml/qqmlengine.h index 3102a20fac..8cada954fe 100644 --- a/src/qml/qml/qqmlengine.h +++ b/src/qml/qml/qqmlengine.h @@ -136,6 +136,7 @@ public: void setOfflineStoragePath(const QString& dir); QString offlineStoragePath() const; + QString offlineStorageDatabaseFilePath(const QString &databaseName) const; QUrl baseUrl() const; void setBaseUrl(const QUrl &); diff --git a/src/qml/qml/qqmlengine_p.h b/src/qml/qml/qqmlengine_p.h index 916566b6c7..1bdeacd524 100644 --- a/src/qml/qml/qqmlengine_p.h +++ b/src/qml/qml/qqmlengine_p.h @@ -205,6 +205,7 @@ public: inline void deleteInEngineThread(T *); template<typename T> inline static void deleteInEngineThread(QQmlEngine *, T *); + QString offlineStorageDatabaseDirectory() const; // These methods may be called from the loader thread inline QQmlPropertyCache *cache(QQmlType *, int); diff --git a/src/qmldevtools/qtqmldevtoolsglobal_p.h b/src/qmldevtools/qtqmldevtoolsglobal_p.h index 5c803a4b32..e1a01aa78d 100644 --- a/src/qmldevtools/qtqmldevtoolsglobal_p.h +++ b/src/qmldevtools/qtqmldevtoolsglobal_p.h @@ -58,5 +58,12 @@ QT_BEGIN_NAMESPACE #define Q_QML_EXPORT #define Q_QML_PRIVATE_EXPORT +/* Some classes built into QtQmlDevTools are marked Q_AUTOTEST_EXPORT but we + have nothing to export in this static library */ +#if defined(Q_AUTOTEST_EXPORT) +#undef Q_AUTOTEST_EXPORT +#endif +#define Q_AUTOTEST_EXPORT + QT_END_NAMESPACE #endif // QTQMLGLOBAL_P_H diff --git a/src/quick/items/context2d/qquickcanvasitem.cpp b/src/quick/items/context2d/qquickcanvasitem.cpp index b772ed97d2..1167f408f5 100644 --- a/src/quick/items/context2d/qquickcanvasitem.cpp +++ b/src/quick/items/context2d/qquickcanvasitem.cpp @@ -640,6 +640,17 @@ void QQuickCanvasItem::releaseResources() } } +bool QQuickCanvasItem::event(QEvent *event) +{ + switch (event->type()) { + case QEvent::PolishRequest: + polish(); + return true; + default: + return QQuickItem::event(event); + } +} + void QQuickCanvasItem::invalidateSceneGraph() { Q_D(QQuickCanvasItem); @@ -651,6 +662,12 @@ void QQuickCanvasItem::invalidateSceneGraph() d->textureProvider = 0; } +void QQuickCanvasItem::schedulePolish() +{ + auto polishRequestEvent = new QEvent(QEvent::PolishRequest); + QCoreApplication::postEvent(this, polishRequestEvent); +} + void QQuickCanvasItem::componentComplete() { QQuickItem::componentComplete(); @@ -892,8 +909,9 @@ void QQuickCanvasItem::requestAnimationFrame(QQmlV4Function *args) d->animationCallbacks.insert(++id, QV4::PersistentValue(scope.engine, f->asReturnedValue())); + // QTBUG-55778: Calling polish directly here can lead to a polish loop if (isVisible()) - polish(); + schedulePolish(); args->setReturnValue(QV4::Encode(id)); } diff --git a/src/quick/items/context2d/qquickcanvasitem_p.h b/src/quick/items/context2d/qquickcanvasitem_p.h index 8af84d0e7c..217ae9bb69 100644 --- a/src/quick/items/context2d/qquickcanvasitem_p.h +++ b/src/quick/items/context2d/qquickcanvasitem_p.h @@ -182,6 +182,7 @@ private Q_SLOTS: void sceneGraphInitialized(); void checkAnimationCallbacks(); void invalidateSceneGraph(); + void schedulePolish(); protected: void componentComplete() Q_DECL_OVERRIDE; @@ -190,6 +191,7 @@ protected: QSGNode *updatePaintNode(QSGNode *, UpdatePaintNodeData *) Q_DECL_OVERRIDE; void geometryChanged(const QRectF &newGeometry, const QRectF &oldGeometry) Q_DECL_OVERRIDE; void releaseResources() Q_DECL_OVERRIDE; + bool event(QEvent *event) Q_DECL_OVERRIDE; private: Q_DECLARE_PRIVATE(QQuickCanvasItem) Q_INVOKABLE void delayedCreate(); diff --git a/src/quick/items/context2d/qquickcontext2d.cpp b/src/quick/items/context2d/qquickcontext2d.cpp index 95fa0c530e..b9b701313e 100644 --- a/src/quick/items/context2d/qquickcontext2d.cpp +++ b/src/quick/items/context2d/qquickcontext2d.cpp @@ -1879,7 +1879,7 @@ void QQuickJSContext2D::method_get_lineWidth(const QV4::BuiltinFunction *, QV4:: QV4::Scoped<QQuickJSContext2D> r(scope, callData->thisObject); CHECK_CONTEXT(r) - RETURN_RESULT(r->d()->context->state.lineWidth); + RETURN_RESULT(QV4::Encode(r->d()->context->state.lineWidth)); } void QQuickJSContext2D::method_set_lineWidth(const QV4::BuiltinFunction *, QV4::Scope &scope, QV4::CallData *callData) @@ -1906,7 +1906,7 @@ void QQuickJSContext2D::method_get_miterLimit(const QV4::BuiltinFunction *, QV4: QV4::Scoped<QQuickJSContext2D> r(scope, callData->thisObject); CHECK_CONTEXT(r) - RETURN_RESULT(r->d()->context->state.miterLimit); + RETURN_RESULT(QV4::Encode(r->d()->context->state.miterLimit)); } void QQuickJSContext2D::method_set_miterLimit(const QV4::BuiltinFunction *, QV4::Scope &scope, QV4::CallData *callData) @@ -1933,7 +1933,7 @@ void QQuickJSContext2D::method_get_shadowBlur(const QV4::BuiltinFunction *, QV4: QV4::Scoped<QQuickJSContext2D> r(scope, callData->thisObject); CHECK_CONTEXT(r) - RETURN_RESULT(r->d()->context->state.shadowBlur); + RETURN_RESULT(QV4::Encode(r->d()->context->state.shadowBlur)); } void QQuickJSContext2D::method_set_shadowBlur(const QV4::BuiltinFunction *, QV4::Scope &scope, QV4::CallData *callData) @@ -1990,7 +1990,7 @@ void QQuickJSContext2D::method_get_shadowOffsetX(const QV4::BuiltinFunction *, Q QV4::Scoped<QQuickJSContext2D> r(scope, callData->thisObject); CHECK_CONTEXT(r) - RETURN_RESULT(r->d()->context->state.shadowOffsetX); + RETURN_RESULT(QV4::Encode(r->d()->context->state.shadowOffsetX)); } void QQuickJSContext2D::method_set_shadowOffsetX(const QV4::BuiltinFunction *, QV4::Scope &scope, QV4::CallData *callData) @@ -2016,7 +2016,7 @@ void QQuickJSContext2D::method_get_shadowOffsetY(const QV4::BuiltinFunction *, Q QV4::Scoped<QQuickJSContext2D> r(scope, callData->thisObject); CHECK_CONTEXT(r) - RETURN_RESULT(r->d()->context->state.shadowOffsetY); + RETURN_RESULT(QV4::Encode(r->d()->context->state.shadowOffsetY)); } void QQuickJSContext2D::method_set_shadowOffsetY(const QV4::BuiltinFunction *, QV4::Scope &scope, QV4::CallData *callData) @@ -3043,7 +3043,7 @@ void QQuickJSContext2DPixelData::proto_get_length(const QV4::BuiltinFunction *, if (!r || r->d()->image->isNull()) RETURN_UNDEFINED(); - RETURN_RESULT(r->d()->image->width() * r->d()->image->height() * 4); + RETURN_RESULT(QV4::Encode(r->d()->image->width() * r->d()->image->height() * 4)); } QV4::ReturnedValue QQuickJSContext2DPixelData::getIndexed(const QV4::Managed *m, uint index, bool *hasProperty) diff --git a/src/quick/items/qquickanimatedimage.cpp b/src/quick/items/qquickanimatedimage.cpp index a1833081c8..22ea4774be 100644 --- a/src/quick/items/qquickanimatedimage.cpp +++ b/src/quick/items/qquickanimatedimage.cpp @@ -67,7 +67,7 @@ QQuickPixmap* QQuickAnimatedImagePrivate::infoForCurrentFrame(QQmlEngine *engine .arg(current)); } if (!requestedUrl.isEmpty()) { - if (QQuickPixmap::isCached(requestedUrl, QSize())) + if (QQuickPixmap::isCached(requestedUrl, QSize(), QQuickImageProviderOptions())) pixmap = new QQuickPixmap(engine, requestedUrl); else pixmap = new QQuickPixmap(requestedUrl, _movie->currentImage()); diff --git a/src/quick/items/qquickscreen.cpp b/src/quick/items/qquickscreen.cpp index 9347b55c70..20c6973ee1 100644 --- a/src/quick/items/qquickscreen.cpp +++ b/src/quick/items/qquickscreen.cpp @@ -207,9 +207,9 @@ QT_BEGIN_NAMESPACE By default it is set to the value of the QScreen that the window uses. */ -QQuickScreenInfo::QQuickScreenInfo(QObject *parent) - : QObject(parent), - m_screen(nullptr) +QQuickScreenInfo::QQuickScreenInfo(QObject *parent, QScreen *wrappedScreen) + : QObject(parent) + , m_screen(wrappedScreen) { } diff --git a/src/quick/items/qquickscreen_p.h b/src/quick/items/qquickscreen_p.h index 06efb3ab45..99e1466631 100644 --- a/src/quick/items/qquickscreen_p.h +++ b/src/quick/items/qquickscreen_p.h @@ -84,7 +84,7 @@ class Q_AUTOTEST_EXPORT QQuickScreenInfo : public QObject Q_PROPERTY(int virtualY READ virtualY NOTIFY virtualYChanged REVISION 1) public: - QQuickScreenInfo(QObject *parent = nullptr); + QQuickScreenInfo(QObject *parent = nullptr, QScreen *wrappedScreen = nullptr); QString name() const; int width() const; diff --git a/src/quick/items/qquickwindow.cpp b/src/quick/items/qquickwindow.cpp index 0e7c50ffcf..0ebd644d69 100644 --- a/src/quick/items/qquickwindow.cpp +++ b/src/quick/items/qquickwindow.cpp @@ -4172,25 +4172,25 @@ void QQuickWindow::resetOpenGLState() */ /*! - \qmlproperty variant Window::targetScreen + \qmlproperty variant Window::screen - Specifies the screen the window should be placed on. Equivalent to - QWindow::setScreen(). + The screen with which the window is associated. - The value must be an element from the Qt.application.screens array. + If specified before showing a window, will result in the window being shown + on that screen, unless an explicit window position has been set. The value + must be an element from the Qt.application.screens array. - By default the value is null which leads to using the primary screen. - - \note To ensure that the window is associated with the desired screen right - upon the underlying native window's initial creation, make sure this - property is set as early as possible and that the setting of its value is - not deferred. This can be particularly important on embedded platforms - without a windowing system, where only one window per screen is allowed at a - time. + \note To ensure that the window is associated with the desired screen when + the underlying native window is created, make sure this property is set as + early as possible and that the setting of its value is not deferred. This + can be particularly important on embedded platforms without a windowing system, + where only one window per screen is allowed at a time. Setting the screen after + a window has been created does not move the window if the new screen is part of + the same virtual desktop as the old screen. \since 5.9 - \sa QWindow::setScreen(), QScreen, Qt.application + \sa QWindow::setScreen(), QWindow::screen(), QScreen, Qt.application */ /*! diff --git a/src/quick/items/qquickwindowmodule.cpp b/src/quick/items/qquickwindowmodule.cpp index 42313e4584..6211b7802f 100644 --- a/src/quick/items/qquickwindowmodule.cpp +++ b/src/quick/items/qquickwindowmodule.cpp @@ -59,7 +59,6 @@ public: : complete(false) , visible(false) , visibility(QQuickWindow::AutomaticVisibility) - , targetScreen(nullptr) { } @@ -67,7 +66,6 @@ public: bool visible; QQuickWindow::Visibility visibility; QV4::PersistentValue rootItemMarker; - QObject *targetScreen; }; QQuickWindowQmlImpl::QQuickWindowQmlImpl(QWindow *parent) @@ -75,6 +73,7 @@ QQuickWindowQmlImpl::QQuickWindowQmlImpl(QWindow *parent) { connect(this, &QWindow::visibleChanged, this, &QQuickWindowQmlImpl::visibleChanged); connect(this, &QWindow::visibilityChanged, this, &QQuickWindowQmlImpl::visibilityChanged); + connect(this, &QWindow::screenChanged, this, &QQuickWindowQmlImpl::screenChanged); } void QQuickWindowQmlImpl::setVisible(bool visible) @@ -175,24 +174,15 @@ void QQuickWindowQmlImpl::setWindowVisibility() } } -QObject *QQuickWindowQmlImpl::targetScreen() const +QObject *QQuickWindowQmlImpl::screen() const { - Q_D(const QQuickWindowQmlImpl); - return d->targetScreen; + return new QQuickScreenInfo(const_cast<QQuickWindowQmlImpl *>(this), QWindow::screen()); } -void QQuickWindowQmlImpl::setTargetScreen(QObject *screen) +void QQuickWindowQmlImpl::setScreen(QObject *screen) { - Q_D(QQuickWindowQmlImpl); - if (d->targetScreen != screen) { - d->targetScreen = screen; - emit targetScreenChanged(); - QQuickScreenInfo *screenWrapper = qobject_cast<QQuickScreenInfo *>(screen); - if (screenWrapper) - setScreen(screenWrapper->wrappedScreen()); - else - setScreen(nullptr); - } + QQuickScreenInfo *screenWrapper = qobject_cast<QQuickScreenInfo *>(screen); + QWindow::setScreen(screenWrapper ? screenWrapper->wrappedScreen() : nullptr); } void QQuickWindowModule::defineModule() diff --git a/src/quick/items/qquickwindowmodule_p.h b/src/quick/items/qquickwindowmodule_p.h index 7ca29880ea..16130bc8a0 100644 --- a/src/quick/items/qquickwindowmodule_p.h +++ b/src/quick/items/qquickwindowmodule_p.h @@ -67,7 +67,7 @@ class Q_QUICK_PRIVATE_EXPORT QQuickWindowQmlImpl : public QQuickWindow, public Q Q_PROPERTY(bool visible READ isVisible WRITE setVisible NOTIFY visibleChanged) Q_PROPERTY(Visibility visibility READ visibility WRITE setVisibility NOTIFY visibilityChanged) - Q_PROPERTY(QObject *targetScreen READ targetScreen WRITE setTargetScreen NOTIFY targetScreenChanged REVISION 2) + Q_PROPERTY(QObject *screen READ screen WRITE setScreen NOTIFY screenChanged REVISION 2) public: QQuickWindowQmlImpl(QWindow *parent = Q_NULLPTR); @@ -75,15 +75,15 @@ public: void setVisible(bool visible); void setVisibility(Visibility visibility); - QObject *targetScreen() const; - void setTargetScreen(QObject *screen); + QObject *screen() const; + void setScreen(QObject *screen); static QQuickWindowAttached *qmlAttachedProperties(QObject *object); Q_SIGNALS: void visibleChanged(bool arg); void visibilityChanged(QWindow::Visibility visibility); - Q_REVISION(2) void targetScreenChanged(); + Q_REVISION(2) void screenChanged(); protected: void classBegin() Q_DECL_OVERRIDE; diff --git a/src/quick/scenegraph/coreapi/qsgrendererinterface.cpp b/src/quick/scenegraph/coreapi/qsgrendererinterface.cpp index 3b0b2faf97..0f49e615e4 100644 --- a/src/quick/scenegraph/coreapi/qsgrendererinterface.cpp +++ b/src/quick/scenegraph/coreapi/qsgrendererinterface.cpp @@ -127,7 +127,7 @@ QSGRendererInterface::~QSGRendererInterface() */ /*! - Queries a graphics \a resource. Returns null when the resource in question is + Queries a graphics \a resource in \a window. Returns null when the resource in question is not supported or not available. When successful, the returned pointer is either a direct pointer to an diff --git a/src/quick/util/qquickpixmapcache.cpp b/src/quick/util/qquickpixmapcache.cpp index be6d4d18bd..be27cba989 100644 --- a/src/quick/util/qquickpixmapcache.cpp +++ b/src/quick/util/qquickpixmapcache.cpp @@ -1541,9 +1541,9 @@ void QQuickPixmap::clear(QObject *obj) } } -bool QQuickPixmap::isCached(const QUrl &url, const QSize &requestSize) +bool QQuickPixmap::isCached(const QUrl &url, const QSize &requestSize, const QQuickImageProviderOptions &options) { - QQuickPixmapKey key = { &url, &requestSize, QQuickImageProviderOptions() }; + QQuickPixmapKey key = { &url, &requestSize, options }; QQuickPixmapStore *store = pixmapStore(); return store->m_cache.contains(key); diff --git a/src/quick/util/qquickpixmapcache_p.h b/src/quick/util/qquickpixmapcache_p.h index a867771755..93d5a1cf56 100644 --- a/src/quick/util/qquickpixmapcache_p.h +++ b/src/quick/util/qquickpixmapcache_p.h @@ -175,7 +175,7 @@ public: bool connectDownloadProgress(QObject *, int); static void purgeCache(); - static bool isCached(const QUrl &url, const QSize &requestSize); + static bool isCached(const QUrl &url, const QSize &requestSize, const QQuickImageProviderOptions &options); static const QLatin1String itemGrabberScheme; diff --git a/tests/auto/qml/qqmlecmascript/tst_qqmlecmascript.cpp b/tests/auto/qml/qqmlecmascript/tst_qqmlecmascript.cpp index 99fe262cb4..44582817d5 100644 --- a/tests/auto/qml/qqmlecmascript/tst_qqmlecmascript.cpp +++ b/tests/auto/qml/qqmlecmascript/tst_qqmlecmascript.cpp @@ -337,6 +337,7 @@ private slots: void constkw_data(); void constkw(); void redefineGlobalProp(); + void freeze_empty_object(); private: // static void propertyVarWeakRefCallback(v8::Persistent<v8::Value> object, void* parameter); @@ -8289,6 +8290,18 @@ void tst_qqmlecmascript::redefineGlobalProp() } } +void tst_qqmlecmascript::freeze_empty_object() +{ + // this shouldn't crash + QJSEngine engine; + QJSValue v = engine.evaluate(QString::fromLatin1( + "var obj = {};\n" + "Object.freeze(obj);\n" + )); + QVERIFY(!v.isError()); + QCOMPARE(v.toBool(), true); +} + QTEST_MAIN(tst_qqmlecmascript) diff --git a/tests/auto/qml/qqmlengine/tst_qqmlengine.cpp b/tests/auto/qml/qqmlengine/tst_qqmlengine.cpp index 9c155eda5b..e170920486 100644 --- a/tests/auto/qml/qqmlengine/tst_qqmlengine.cpp +++ b/tests/auto/qml/qqmlengine/tst_qqmlengine.cpp @@ -56,6 +56,7 @@ private slots: void baseUrl(); void contextForObject(); void offlineStoragePath(); + void offlineDatabaseStoragePath(); void clearComponentCache(); void trimComponentCache(); void trimComponentCache_data(); @@ -252,6 +253,34 @@ void tst_qqmlengine::offlineStoragePath() QCOMPARE(engine.offlineStoragePath(), QDir::homePath()); } +void tst_qqmlengine::offlineDatabaseStoragePath() +{ + // Without these set, QDesktopServices::storageLocation returns + // strings with extra "//" at the end. We set them to ignore this problem. + qApp->setApplicationName("tst_qqmlengine"); + qApp->setOrganizationName("QtProject"); + qApp->setOrganizationDomain("www.qt-project.org"); + + QQmlEngine engine; + QString dataLocation = QStandardPaths::writableLocation(QStandardPaths::DataLocation); + const QString databaseName = QLatin1String("foo"); + QString databaseLocation = engine.offlineStorageDatabaseFilePath(databaseName); + QCOMPARE(dataLocation.isEmpty(), databaseLocation.isEmpty()); + + QDir dir(dataLocation); + dir.mkpath("QML"); + dir.cd("QML"); + dir.mkpath("OfflineStorage"); + dir.cd("OfflineStorage"); + dir.mkpath("Databases"); + dir.cd("Databases"); + QCOMPARE(QFileInfo(databaseLocation).dir().path(), dir.path()); + + QCryptographicHash md5(QCryptographicHash::Md5); + md5.addData(databaseName.toUtf8()); + QCOMPARE(databaseLocation, QDir::toNativeSeparators(dir.filePath(QLatin1String(md5.result().toHex())))); +} + void tst_qqmlengine::clearComponentCache() { QQmlEngine engine; diff --git a/tests/auto/quick/qquickwindow/data/windowWithScreen.qml b/tests/auto/quick/qquickwindow/data/windowWithScreen.qml index fdc0be3388..2a5a7b7b76 100644 --- a/tests/auto/quick/qquickwindow/data/windowWithScreen.qml +++ b/tests/auto/quick/qquickwindow/data/windowWithScreen.qml @@ -3,7 +3,7 @@ import QtQuick.Window 2.3 as Window Window.Window { color: "#00FF00" - targetScreen: Qt.application.screens[0] + screen: Qt.application.screens[0] Item { objectName: "item" } diff --git a/tools/qmlcachegen/qmlcachegen.cpp b/tools/qmlcachegen/qmlcachegen.cpp index 4b902eda0f..10d9829520 100644 --- a/tools/qmlcachegen/qmlcachegen.cpp +++ b/tools/qmlcachegen/qmlcachegen.cpp @@ -141,7 +141,8 @@ static bool compileQmlFile(const QString &inputFileName, QV4::EvalISelFactory *i // ### translation binding simplification - QScopedPointer<QV4::EvalInstructionSelection> isel(iselFactory->create(/*engine*/nullptr, /*executable allocator*/nullptr, &irDocument.jsModule, &irDocument.jsGenerator)); + QV4::ExecutableAllocator allocator; + QScopedPointer<QV4::EvalInstructionSelection> isel(iselFactory->create(/*engine*/nullptr, &allocator, &irDocument.jsModule, &irDocument.jsGenerator)); // Disable lookups in non-standalone (aka QML) mode isel->setUseFastLookups(false); irDocument.javaScriptCompilationUnit = isel->compile(/*generate unit*/false); diff --git a/tools/tools.pro b/tools/tools.pro index f3988a909a..1a90b8e776 100644 --- a/tools/tools.pro +++ b/tools/tools.pro @@ -2,7 +2,8 @@ TEMPLATE = subdirs QT_FOR_CONFIG += qml-private SUBDIRS += \ qmlmin \ - qmlimportscanner + qmlimportscanner \ + qmlcachegen !android|android_app { SUBDIRS += \ @@ -27,7 +28,7 @@ SUBDIRS += \ qml.depends = qmlimportscanner qmleasing.depends = qmlimportscanner -# qmlmin, qmlimportscanner & qmlbundle are build tools. +# qmlmin, qmlimportscanner & qmlcachegen are build tools. # qmlscene is needed by the autotests. # qmltestrunner may be useful for manual testing. # qmlplugindump cannot be a build tool, because it loads target plugins. |