aboutsummaryrefslogtreecommitdiffstats
path: root/masm
diff options
context:
space:
mode:
authorSimon Hausmann <simon.hausmann@digia.com>2012-10-17 13:43:58 +0200
committerLars Knoll <lars.knoll@digia.com>2012-10-17 14:06:59 +0200
commit159a394901dda04742cb028e9b1a27f87759ac83 (patch)
treef28ffdba80d4f3a9bdf8a7fbd58f203a169ad6cb /masm
parentc3d6d31543b2070025a10c4f9457d70ebfcfb502 (diff)
Update wtf and masm from upstream (http://svn.webkit.org/repository/webkit/trunk@131582)
This brings in the distinction between loadPtr and load64 in the assembler. Change-Id: Ifd8a8fe727f4fdb56b1d1d8bdd7065604904c472 Reviewed-by: Lars Knoll <lars.knoll@digia.com>
Diffstat (limited to 'masm')
-rw-r--r--masm/assembler/AbstractMacroAssembler.h48
-rw-r--r--masm/assembler/LinkBuffer.h3
-rw-r--r--masm/assembler/MIPSAssembler.h160
-rw-r--r--masm/assembler/MacroAssembler.h431
-rw-r--r--masm/assembler/MacroAssemblerARM.h26
-rw-r--r--masm/assembler/MacroAssemblerARMv7.h56
-rw-r--r--masm/assembler/MacroAssemblerCodeRef.h8
-rw-r--r--masm/assembler/MacroAssemblerMIPS.h433
-rw-r--r--masm/assembler/MacroAssemblerX86.h28
-rw-r--r--masm/assembler/MacroAssemblerX86Common.h8
-rw-r--r--masm/assembler/MacroAssemblerX86_64.h211
-rw-r--r--masm/assembler/X86Assembler.h14
-rw-r--r--masm/masm.pri3
-rw-r--r--masm/wtf/Assertions.h2
-rw-r--r--masm/wtf/Compiler.h4
-rw-r--r--masm/wtf/Noncopyable.h6
-rw-r--r--masm/wtf/Platform.h87
-rw-r--r--masm/wtf/RefCounted.h52
-rw-r--r--masm/wtf/TypeTraits.h214
-rw-r--r--masm/wtf/Vector.h37
20 files changed, 1284 insertions, 547 deletions
diff --git a/masm/assembler/AbstractMacroAssembler.h b/masm/assembler/AbstractMacroAssembler.h
index e6a9df9944..c75adb7e96 100644
--- a/masm/assembler/AbstractMacroAssembler.h
+++ b/masm/assembler/AbstractMacroAssembler.h
@@ -261,6 +261,50 @@ public:
};
+ // TrustedImm64:
+ //
+ // A 64bit immediate operand to an instruction - this is wrapped in a
+ // class requiring explicit construction in order to prevent RegisterIDs
+ // (which are implemented as an enum) from accidentally being passed as
+ // immediate values.
+ struct TrustedImm64 {
+ TrustedImm64() { }
+
+ explicit TrustedImm64(int64_t value)
+ : m_value(value)
+ {
+ }
+
+#if CPU(X86_64)
+ explicit TrustedImm64(TrustedImmPtr ptr)
+ : m_value(ptr.asIntptr())
+ {
+ }
+#endif
+
+ int64_t m_value;
+ };
+
+ struct Imm64 :
+#if ENABLE(JIT_CONSTANT_BLINDING)
+ private TrustedImm64
+#else
+ public TrustedImm64
+#endif
+ {
+ explicit Imm64(int64_t value)
+ : TrustedImm64(value)
+ {
+ }
+#if CPU(X86_64)
+ explicit Imm64(TrustedImmPtr ptr)
+ : TrustedImm64(ptr)
+ {
+ }
+#endif
+ const TrustedImm64& asTrustedImm64() const { return *this; }
+ };
+
// Section 2: MacroAssembler code buffer handles
//
// The following types are used to reference items in the code buffer
@@ -564,7 +608,7 @@ public:
m_jumps.append(jump);
}
- void append(JumpList& other)
+ void append(const JumpList& other)
{
m_jumps.append(other.m_jumps.begin(), other.m_jumps.size());
}
@@ -579,7 +623,7 @@ public:
m_jumps.clear();
}
- const JumpVector& jumps() { return m_jumps; }
+ const JumpVector& jumps() const { return m_jumps; }
private:
JumpVector m_jumps;
diff --git a/masm/assembler/LinkBuffer.h b/masm/assembler/LinkBuffer.h
index 484d3a73fb..770144d64a 100644
--- a/masm/assembler/LinkBuffer.h
+++ b/masm/assembler/LinkBuffer.h
@@ -287,6 +287,9 @@ private:
#define FINALIZE_CODE(linkBufferReference, dataLogArgumentsForHeading) \
FINALIZE_CODE_IF(Options::showDisassembly(), linkBufferReference, dataLogArgumentsForHeading)
+#define FINALIZE_DFG_CODE(linkBufferReference, dataLogArgumentsForHeading) \
+ FINALIZE_CODE_IF(Options::showDFGDisassembly(), linkBufferReference, dataLogArgumentsForHeading)
+
} // namespace JSC
#endif // ENABLE(ASSEMBLER)
diff --git a/masm/assembler/MIPSAssembler.h b/masm/assembler/MIPSAssembler.h
index 65307d9508..30f172fb8b 100644
--- a/masm/assembler/MIPSAssembler.h
+++ b/masm/assembler/MIPSAssembler.h
@@ -227,20 +227,17 @@ public:
void addiu(RegisterID rt, RegisterID rs, int imm)
{
- emitInst(0x24000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS)
- | (imm & 0xffff));
+ emitInst(0x24000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS) | (imm & 0xffff));
}
void addu(RegisterID rd, RegisterID rs, RegisterID rt)
{
- emitInst(0x00000021 | (rd << OP_SH_RD) | (rs << OP_SH_RS)
- | (rt << OP_SH_RT));
+ emitInst(0x00000021 | (rd << OP_SH_RD) | (rs << OP_SH_RS) | (rt << OP_SH_RT));
}
void subu(RegisterID rd, RegisterID rs, RegisterID rt)
{
- emitInst(0x00000023 | (rd << OP_SH_RD) | (rs << OP_SH_RS)
- | (rt << OP_SH_RT));
+ emitInst(0x00000023 | (rd << OP_SH_RD) | (rs << OP_SH_RS) | (rt << OP_SH_RT));
}
void mult(RegisterID rs, RegisterID rt)
@@ -266,8 +263,7 @@ public:
void mul(RegisterID rd, RegisterID rs, RegisterID rt)
{
#if WTF_MIPS_ISA_AT_LEAST(32)
- emitInst(0x70000002 | (rd << OP_SH_RD) | (rs << OP_SH_RS)
- | (rt << OP_SH_RT));
+ emitInst(0x70000002 | (rd << OP_SH_RD) | (rs << OP_SH_RS) | (rt << OP_SH_RT));
#else
mult(rs, rt);
mflo(rd);
@@ -276,139 +272,139 @@ public:
void andInsn(RegisterID rd, RegisterID rs, RegisterID rt)
{
- emitInst(0x00000024 | (rd << OP_SH_RD) | (rs << OP_SH_RS)
- | (rt << OP_SH_RT));
+ emitInst(0x00000024 | (rd << OP_SH_RD) | (rs << OP_SH_RS) | (rt << OP_SH_RT));
}
void andi(RegisterID rt, RegisterID rs, int imm)
{
- emitInst(0x30000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS)
- | (imm & 0xffff));
+ emitInst(0x30000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS) | (imm & 0xffff));
}
void nor(RegisterID rd, RegisterID rs, RegisterID rt)
{
- emitInst(0x00000027 | (rd << OP_SH_RD) | (rs << OP_SH_RS)
- | (rt << OP_SH_RT));
+ emitInst(0x00000027 | (rd << OP_SH_RD) | (rs << OP_SH_RS) | (rt << OP_SH_RT));
}
void orInsn(RegisterID rd, RegisterID rs, RegisterID rt)
{
- emitInst(0x00000025 | (rd << OP_SH_RD) | (rs << OP_SH_RS)
- | (rt << OP_SH_RT));
+ emitInst(0x00000025 | (rd << OP_SH_RD) | (rs << OP_SH_RS) | (rt << OP_SH_RT));
}
void ori(RegisterID rt, RegisterID rs, int imm)
{
- emitInst(0x34000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS)
- | (imm & 0xffff));
+ emitInst(0x34000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS) | (imm & 0xffff));
}
void xorInsn(RegisterID rd, RegisterID rs, RegisterID rt)
{
- emitInst(0x00000026 | (rd << OP_SH_RD) | (rs << OP_SH_RS)
- | (rt << OP_SH_RT));
+ emitInst(0x00000026 | (rd << OP_SH_RD) | (rs << OP_SH_RS) | (rt << OP_SH_RT));
}
void xori(RegisterID rt, RegisterID rs, int imm)
{
- emitInst(0x38000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS)
- | (imm & 0xffff));
+ emitInst(0x38000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS) | (imm & 0xffff));
}
void slt(RegisterID rd, RegisterID rs, RegisterID rt)
{
- emitInst(0x0000002a | (rd << OP_SH_RD) | (rs << OP_SH_RS)
- | (rt << OP_SH_RT));
+ emitInst(0x0000002a | (rd << OP_SH_RD) | (rs << OP_SH_RS) | (rt << OP_SH_RT));
}
void sltu(RegisterID rd, RegisterID rs, RegisterID rt)
{
- emitInst(0x0000002b | (rd << OP_SH_RD) | (rs << OP_SH_RS)
- | (rt << OP_SH_RT));
+ emitInst(0x0000002b | (rd << OP_SH_RD) | (rs << OP_SH_RS) | (rt << OP_SH_RT));
}
void sltiu(RegisterID rt, RegisterID rs, int imm)
{
- emitInst(0x2c000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS)
- | (imm & 0xffff));
+ emitInst(0x2c000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS) | (imm & 0xffff));
}
void sll(RegisterID rd, RegisterID rt, int shamt)
{
- emitInst(0x00000000 | (rd << OP_SH_RD) | (rt << OP_SH_RT)
- | ((shamt & 0x1f) << OP_SH_SHAMT));
+ emitInst(0x00000000 | (rd << OP_SH_RD) | (rt << OP_SH_RT) | ((shamt & 0x1f) << OP_SH_SHAMT));
}
void sllv(RegisterID rd, RegisterID rt, int rs)
{
- emitInst(0x00000004 | (rd << OP_SH_RD) | (rt << OP_SH_RT)
- | (rs << OP_SH_RS));
+ emitInst(0x00000004 | (rd << OP_SH_RD) | (rt << OP_SH_RT) | (rs << OP_SH_RS));
}
void sra(RegisterID rd, RegisterID rt, int shamt)
{
- emitInst(0x00000003 | (rd << OP_SH_RD) | (rt << OP_SH_RT)
- | ((shamt & 0x1f) << OP_SH_SHAMT));
+ emitInst(0x00000003 | (rd << OP_SH_RD) | (rt << OP_SH_RT) | ((shamt & 0x1f) << OP_SH_SHAMT));
}
void srav(RegisterID rd, RegisterID rt, RegisterID rs)
{
- emitInst(0x00000007 | (rd << OP_SH_RD) | (rt << OP_SH_RT)
- | (rs << OP_SH_RS));
+ emitInst(0x00000007 | (rd << OP_SH_RD) | (rt << OP_SH_RT) | (rs << OP_SH_RS));
}
void srl(RegisterID rd, RegisterID rt, int shamt)
{
- emitInst(0x00000002 | (rd << OP_SH_RD) | (rt << OP_SH_RT)
- | ((shamt & 0x1f) << OP_SH_SHAMT));
+ emitInst(0x00000002 | (rd << OP_SH_RD) | (rt << OP_SH_RT) | ((shamt & 0x1f) << OP_SH_SHAMT));
}
void srlv(RegisterID rd, RegisterID rt, RegisterID rs)
{
- emitInst(0x00000006 | (rd << OP_SH_RD) | (rt << OP_SH_RT)
- | (rs << OP_SH_RS));
+ emitInst(0x00000006 | (rd << OP_SH_RD) | (rt << OP_SH_RT) | (rs << OP_SH_RS));
+ }
+
+ void lb(RegisterID rt, RegisterID rs, int offset)
+ {
+ emitInst(0x80000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS) | (offset & 0xffff));
+ loadDelayNop();
}
void lbu(RegisterID rt, RegisterID rs, int offset)
{
- emitInst(0x90000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS)
- | (offset & 0xffff));
+ emitInst(0x90000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS) | (offset & 0xffff));
loadDelayNop();
}
void lw(RegisterID rt, RegisterID rs, int offset)
{
- emitInst(0x8c000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS)
- | (offset & 0xffff));
+ emitInst(0x8c000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS) | (offset & 0xffff));
loadDelayNop();
}
void lwl(RegisterID rt, RegisterID rs, int offset)
{
- emitInst(0x88000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS)
- | (offset & 0xffff));
+ emitInst(0x88000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS) | (offset & 0xffff));
loadDelayNop();
}
void lwr(RegisterID rt, RegisterID rs, int offset)
{
- emitInst(0x98000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS)
- | (offset & 0xffff));
+ emitInst(0x98000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS) | (offset & 0xffff));
+ loadDelayNop();
+ }
+
+ void lh(RegisterID rt, RegisterID rs, int offset)
+ {
+ emitInst(0x84000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS) | (offset & 0xffff));
loadDelayNop();
}
void lhu(RegisterID rt, RegisterID rs, int offset)
{
- emitInst(0x94000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS)
- | (offset & 0xffff));
+ emitInst(0x94000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS) | (offset & 0xffff));
loadDelayNop();
}
+ void sb(RegisterID rt, RegisterID rs, int offset)
+ {
+ emitInst(0xa0000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS) | (offset & 0xffff));
+ }
+
+ void sh(RegisterID rt, RegisterID rs, int offset)
+ {
+ emitInst(0xa4000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS) | (offset & 0xffff));
+ }
+
void sw(RegisterID rt, RegisterID rs, int offset)
{
- emitInst(0xac000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS)
- | (offset & 0xffff));
+ emitInst(0xac000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS) | (offset & 0xffff));
}
void jr(RegisterID rs)
@@ -469,51 +465,43 @@ public:
void addd(FPRegisterID fd, FPRegisterID fs, FPRegisterID ft)
{
- emitInst(0x46200000 | (fd << OP_SH_FD) | (fs << OP_SH_FS)
- | (ft << OP_SH_FT));
+ emitInst(0x46200000 | (fd << OP_SH_FD) | (fs << OP_SH_FS) | (ft << OP_SH_FT));
}
void subd(FPRegisterID fd, FPRegisterID fs, FPRegisterID ft)
{
- emitInst(0x46200001 | (fd << OP_SH_FD) | (fs << OP_SH_FS)
- | (ft << OP_SH_FT));
+ emitInst(0x46200001 | (fd << OP_SH_FD) | (fs << OP_SH_FS) | (ft << OP_SH_FT));
}
void muld(FPRegisterID fd, FPRegisterID fs, FPRegisterID ft)
{
- emitInst(0x46200002 | (fd << OP_SH_FD) | (fs << OP_SH_FS)
- | (ft << OP_SH_FT));
+ emitInst(0x46200002 | (fd << OP_SH_FD) | (fs << OP_SH_FS) | (ft << OP_SH_FT));
}
void divd(FPRegisterID fd, FPRegisterID fs, FPRegisterID ft)
{
- emitInst(0x46200003 | (fd << OP_SH_FD) | (fs << OP_SH_FS)
- | (ft << OP_SH_FT));
+ emitInst(0x46200003 | (fd << OP_SH_FD) | (fs << OP_SH_FS) | (ft << OP_SH_FT));
}
void lwc1(FPRegisterID ft, RegisterID rs, int offset)
{
- emitInst(0xc4000000 | (ft << OP_SH_FT) | (rs << OP_SH_RS)
- | (offset & 0xffff));
+ emitInst(0xc4000000 | (ft << OP_SH_FT) | (rs << OP_SH_RS) | (offset & 0xffff));
copDelayNop();
}
void ldc1(FPRegisterID ft, RegisterID rs, int offset)
{
- emitInst(0xd4000000 | (ft << OP_SH_FT) | (rs << OP_SH_RS)
- | (offset & 0xffff));
+ emitInst(0xd4000000 | (ft << OP_SH_FT) | (rs << OP_SH_RS) | (offset & 0xffff));
}
void swc1(FPRegisterID ft, RegisterID rs, int offset)
{
- emitInst(0xe4000000 | (ft << OP_SH_FT) | (rs << OP_SH_RS)
- | (offset & 0xffff));
+ emitInst(0xe4000000 | (ft << OP_SH_FT) | (rs << OP_SH_RS) | (offset & 0xffff));
}
void sdc1(FPRegisterID ft, RegisterID rs, int offset)
{
- emitInst(0xf4000000 | (ft << OP_SH_FT) | (rs << OP_SH_RS)
- | (offset & 0xffff));
+ emitInst(0xf4000000 | (ft << OP_SH_FT) | (rs << OP_SH_RS) | (offset & 0xffff));
}
void mtc1(RegisterID rt, FPRegisterID fs)
@@ -549,11 +537,21 @@ public:
emitInst(0x46800021 | (fd << OP_SH_FD) | (fs << OP_SH_FS));
}
+ void cvtds(FPRegisterID fd, FPRegisterID fs)
+ {
+ emitInst(0x46000021 | (fd << OP_SH_FD) | (fs << OP_SH_FS));
+ }
+
void cvtwd(FPRegisterID fd, FPRegisterID fs)
{
emitInst(0x46200024 | (fd << OP_SH_FD) | (fs << OP_SH_FS));
}
+ void cvtsd(FPRegisterID fd, FPRegisterID fs)
+ {
+ emitInst(0x46200020 | (fd << OP_SH_FD) | (fs << OP_SH_FS));
+ }
+
void ceqd(FPRegisterID fs, FPRegisterID ft)
{
emitInst(0x46200032 | (fs << OP_SH_FS) | (ft << OP_SH_FT));
@@ -663,6 +661,19 @@ public:
unsigned debugOffset() { return m_buffer.debugOffset(); }
+ // Assembly helpers for moving data between fp and registers.
+ void vmov(RegisterID rd1, RegisterID rd2, FPRegisterID rn)
+ {
+ mfc1(rd1, rn);
+ mfc1(rd2, FPRegisterID(rn + 1));
+ }
+
+ void vmov(FPRegisterID rd, RegisterID rn1, RegisterID rn2)
+ {
+ mtc1(rn1, rd);
+ mtc1(rn2, FPRegisterID(rd + 1));
+ }
+
static unsigned getCallReturnOffset(AssemblerLabel call)
{
// The return address is after a call and a delay slot instruction
@@ -672,7 +683,7 @@ public:
// Linking & patching:
//
// 'link' and 'patch' methods are for use on unprotected code - such as the code
- // within the AssemblerBuffer, and code being patched by the patch buffer. Once
+ // within the AssemblerBuffer, and code being patched by the patch buffer. Once
// code has been finalized it is (platform support permitting) within a non-
// writable region of memory; to modify the code in an execute-only execuable
// pool the 'repatch' and 'relink' methods should be used.
@@ -846,7 +857,7 @@ private:
MIPSWord* insn = reinterpret_cast<MIPSWord*>(reinterpret_cast<intptr_t>(newBase) + pos);
insn = insn + 2;
// Need to make sure we have 5 valid instructions after pos
- if ((unsigned int)pos >= m_buffer.codeSize() - 5 * sizeof(MIPSWord))
+ if ((unsigned)pos >= m_buffer.codeSize() - 5 * sizeof(MIPSWord))
continue;
if ((*insn & 0xfc000000) == 0x08000000) { // j
@@ -882,11 +893,10 @@ private:
static int linkWithOffset(MIPSWord* insn, void* to)
{
ASSERT((*insn & 0xfc000000) == 0x10000000 // beq
- || (*insn & 0xfc000000) == 0x14000000 // bne
- || (*insn & 0xffff0000) == 0x45010000 // bc1t
- || (*insn & 0xffff0000) == 0x45000000); // bc1f
- intptr_t diff = (reinterpret_cast<intptr_t>(to)
- - reinterpret_cast<intptr_t>(insn) - 4) >> 2;
+ || (*insn & 0xfc000000) == 0x14000000 // bne
+ || (*insn & 0xffff0000) == 0x45010000 // bc1t
+ || (*insn & 0xffff0000) == 0x45000000); // bc1f
+ intptr_t diff = (reinterpret_cast<intptr_t>(to) - reinterpret_cast<intptr_t>(insn) - 4) >> 2;
if (diff < -32768 || diff > 32767 || *(insn + 2) != 0x10000003) {
/*
diff --git a/masm/assembler/MacroAssembler.h b/masm/assembler/MacroAssembler.h
index b17e19bb9d..134908387d 100644
--- a/masm/assembler/MacroAssembler.h
+++ b/masm/assembler/MacroAssembler.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2008 Apple Inc. All rights reserved.
+ * Copyright (C) 2008, 2012 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,6 +26,8 @@
#ifndef MacroAssembler_h
#define MacroAssembler_h
+#include <wtf/Platform.h>
+
#if ENABLE(ASSEMBLER)
#if CPU(ARM_THUMB2)
@@ -68,10 +70,6 @@ public:
using MacroAssemblerBase::pop;
using MacroAssemblerBase::jump;
using MacroAssemblerBase::branch32;
-#if CPU(X86_64)
- using MacroAssemblerBase::branchPtr;
- using MacroAssemblerBase::branchTestPtr;
-#endif
using MacroAssemblerBase::move;
#if ENABLE(JIT_CONSTANT_BLINDING)
@@ -89,6 +87,8 @@ public:
using MacroAssemblerBase::xor32;
#endif
+ static const double twoToThe32; // This is super useful for some double code.
+
// Utilities used by the DFG JIT.
#if ENABLE(DFG_JIT)
using MacroAssemblerBase::invert;
@@ -183,6 +183,23 @@ public:
storePtr(imm, addressForPoke(index));
}
+#if CPU(X86_64)
+ void peek64(RegisterID dest, int index = 0)
+ {
+ load64(Address(stackPointerRegister, (index * sizeof(void*))), dest);
+ }
+
+ void poke(TrustedImm64 value, int index = 0)
+ {
+ store64(value, addressForPoke(index));
+ }
+
+ void poke64(RegisterID src, int index = 0)
+ {
+ store64(src, addressForPoke(index));
+ }
+#endif
+
// Backwards banches, these are currently all implemented using existing forwards branch mechanisms.
void branchPtr(RelationalCondition cond, RegisterID op1, TrustedImmPtr imm, Label target)
@@ -230,6 +247,11 @@ public:
}
#if !CPU(ARM_THUMB2)
+ PatchableJump patchableBranchPtr(RelationalCondition cond, Address left, TrustedImmPtr right = TrustedImmPtr(0))
+ {
+ return PatchableJump(branchPtr(cond, left, right));
+ }
+
PatchableJump patchableBranchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
{
return PatchableJump(branchPtrWithPatch(cond, left, dataLabel, initialRightValue));
@@ -239,6 +261,16 @@ public:
{
return PatchableJump(jump());
}
+
+ PatchableJump patchableBranchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ return PatchableJump(branchTest32(cond, reg, mask));
+ }
+
+ PatchableJump patchableBranch32(RelationalCondition cond, RegisterID reg, TrustedImm32 imm)
+ {
+ return PatchableJump(branch32(cond, reg, imm));
+ }
#endif
void jump(Label target)
@@ -486,6 +518,11 @@ public:
return branch32(cond, left, TrustedImm32(right));
}
+ Jump branchSubPtr(ResultCondition cond, RegisterID src, RegisterID dest)
+ {
+ return branchSub32(cond, src, dest);
+ }
+
Jump branchTestPtr(ResultCondition cond, RegisterID reg, RegisterID mask)
{
return branchTest32(cond, reg, mask);
@@ -521,15 +558,295 @@ public:
return MacroAssemblerBase::branchTest8(cond, Address(address.base, address.offset), mask);
}
#else
+ void addPtr(RegisterID src, RegisterID dest)
+ {
+ add64(src, dest);
+ }
+
+ void addPtr(Address src, RegisterID dest)
+ {
+ add64(src, dest);
+ }
+
+ void addPtr(TrustedImm32 imm, RegisterID srcDest)
+ {
+ add64(imm, srcDest);
+ }
+
+ void addPtr(TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ add64(imm, src, dest);
+ }
+
+ void addPtr(TrustedImm32 imm, Address address)
+ {
+ add64(imm, address);
+ }
+
+ void addPtr(AbsoluteAddress src, RegisterID dest)
+ {
+ add64(src, dest);
+ }
+
+ void addPtr(TrustedImmPtr imm, RegisterID dest)
+ {
+ add64(TrustedImm64(imm), dest);
+ }
+
+ void addPtr(TrustedImm32 imm, AbsoluteAddress address)
+ {
+ add64(imm, address);
+ }
+
+ void andPtr(RegisterID src, RegisterID dest)
+ {
+ and64(src, dest);
+ }
+
+ void andPtr(TrustedImm32 imm, RegisterID srcDest)
+ {
+ and64(imm, srcDest);
+ }
+
+ void negPtr(RegisterID dest)
+ {
+ neg64(dest);
+ }
+
+ void orPtr(RegisterID src, RegisterID dest)
+ {
+ or64(src, dest);
+ }
+
+ void orPtr(TrustedImm32 imm, RegisterID dest)
+ {
+ or64(imm, dest);
+ }
+
+ void orPtr(TrustedImmPtr imm, RegisterID dest)
+ {
+ or64(TrustedImm64(imm), dest);
+ }
+
+ void orPtr(RegisterID op1, RegisterID op2, RegisterID dest)
+ {
+ or64(op1, op2, dest);
+ }
+
+ void orPtr(TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ or64(imm, src, dest);
+ }
+
+ void rotateRightPtr(TrustedImm32 imm, RegisterID srcDst)
+ {
+ rotateRight64(imm, srcDst);
+ }
+
+ void subPtr(RegisterID src, RegisterID dest)
+ {
+ sub64(src, dest);
+ }
+
+ void subPtr(TrustedImm32 imm, RegisterID dest)
+ {
+ sub64(imm, dest);
+ }
+
+ void subPtr(TrustedImmPtr imm, RegisterID dest)
+ {
+ sub64(TrustedImm64(imm), dest);
+ }
+
+ void xorPtr(RegisterID src, RegisterID dest)
+ {
+ xor64(src, dest);
+ }
+
+ void xorPtr(RegisterID src, Address dest)
+ {
+ xor64(src, dest);
+ }
+
+ void xorPtr(TrustedImm32 imm, RegisterID srcDest)
+ {
+ xor64(imm, srcDest);
+ }
+
+ void loadPtr(ImplicitAddress address, RegisterID dest)
+ {
+ load64(address, dest);
+ }
+
+ void loadPtr(BaseIndex address, RegisterID dest)
+ {
+ load64(address, dest);
+ }
+
+ void loadPtr(const void* address, RegisterID dest)
+ {
+ load64(address, dest);
+ }
+
+ DataLabel32 loadPtrWithAddressOffsetPatch(Address address, RegisterID dest)
+ {
+ return load64WithAddressOffsetPatch(address, dest);
+ }
+
+ DataLabelCompact loadPtrWithCompactAddressOffsetPatch(Address address, RegisterID dest)
+ {
+ return load64WithCompactAddressOffsetPatch(address, dest);
+ }
+
+ void storePtr(RegisterID src, ImplicitAddress address)
+ {
+ store64(src, address);
+ }
+
+ void storePtr(RegisterID src, BaseIndex address)
+ {
+ store64(src, address);
+ }
+
+ void storePtr(RegisterID src, void* address)
+ {
+ store64(src, address);
+ }
+
+ void storePtr(TrustedImmPtr imm, ImplicitAddress address)
+ {
+ store64(TrustedImm64(imm), address);
+ }
+
+ void storePtr(TrustedImmPtr imm, BaseIndex address)
+ {
+ store64(TrustedImm64(imm), address);
+ }
+
+ DataLabel32 storePtrWithAddressOffsetPatch(RegisterID src, Address address)
+ {
+ return store64WithAddressOffsetPatch(src, address);
+ }
+
+ void movePtrToDouble(RegisterID src, FPRegisterID dest)
+ {
+ move64ToDouble(src, dest);
+ }
+
+ void moveDoubleToPtr(FPRegisterID src, RegisterID dest)
+ {
+ moveDoubleTo64(src, dest);
+ }
+
+ void comparePtr(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
+ {
+ compare64(cond, left, right, dest);
+ }
+
+ void comparePtr(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest)
+ {
+ compare64(cond, left, right, dest);
+ }
+
+ void testPtr(ResultCondition cond, RegisterID reg, TrustedImm32 mask, RegisterID dest)
+ {
+ test64(cond, reg, mask, dest);
+ }
+
+ void testPtr(ResultCondition cond, RegisterID reg, RegisterID mask, RegisterID dest)
+ {
+ test64(cond, reg, mask, dest);
+ }
+
+ Jump branchPtr(RelationalCondition cond, RegisterID left, RegisterID right)
+ {
+ return branch64(cond, left, right);
+ }
+
+ Jump branchPtr(RelationalCondition cond, RegisterID left, TrustedImmPtr right)
+ {
+ return branch64(cond, left, TrustedImm64(right));
+ }
+
+ Jump branchPtr(RelationalCondition cond, RegisterID left, Address right)
+ {
+ return branch64(cond, left, right);
+ }
+
+ Jump branchPtr(RelationalCondition cond, Address left, RegisterID right)
+ {
+ return branch64(cond, left, right);
+ }
+
+ Jump branchPtr(RelationalCondition cond, AbsoluteAddress left, RegisterID right)
+ {
+ return branch64(cond, left, right);
+ }
+
+ Jump branchPtr(RelationalCondition cond, Address left, TrustedImmPtr right)
+ {
+ return branch64(cond, left, TrustedImm64(right));
+ }
+
+ Jump branchTestPtr(ResultCondition cond, RegisterID reg, RegisterID mask)
+ {
+ return branchTest64(cond, reg, mask);
+ }
+ Jump branchTestPtr(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ return branchTest64(cond, reg, mask);
+ }
+
+ Jump branchTestPtr(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ return branchTest64(cond, address, mask);
+ }
+
+ Jump branchTestPtr(ResultCondition cond, Address address, RegisterID reg)
+ {
+ return branchTest64(cond, address, reg);
+ }
+
+ Jump branchTestPtr(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ return branchTest64(cond, address, mask);
+ }
+
+ Jump branchTestPtr(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ return branchTest64(cond, address, mask);
+ }
+
+ Jump branchAddPtr(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
+ {
+ return branchAdd64(cond, imm, dest);
+ }
+
+ Jump branchAddPtr(ResultCondition cond, RegisterID src, RegisterID dest)
+ {
+ return branchAdd64(cond, src, dest);
+ }
+
+ Jump branchSubPtr(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
+ {
+ return branchSub64(cond, imm, dest);
+ }
+
+ Jump branchSubPtr(ResultCondition cond, RegisterID src, RegisterID dest)
+ {
+ return branchSub64(cond, src, dest);
+ }
+
+ Jump branchSubPtr(ResultCondition cond, RegisterID src1, TrustedImm32 src2, RegisterID dest)
+ {
+ return branchSub64(cond, src1, src2, dest);
+ }
+
#if ENABLE(JIT_CONSTANT_BLINDING)
- using MacroAssemblerBase::addPtr;
- using MacroAssemblerBase::andPtr;
- using MacroAssemblerBase::branchSubPtr;
+ using MacroAssemblerBase::and64;
using MacroAssemblerBase::convertInt32ToDouble;
- using MacroAssemblerBase::storePtr;
- using MacroAssemblerBase::subPtr;
- using MacroAssemblerBase::xorPtr;
+ using MacroAssemblerBase::store64;
bool shouldBlindDouble(double value)
{
@@ -539,7 +856,7 @@ public:
// Try to force normalisation, and check that there's no change
// in the bit pattern
- if (bitwise_cast<uintptr_t>(value * 1.0) != bitwise_cast<uintptr_t>(value))
+ if (bitwise_cast<uint64_t>(value * 1.0) != bitwise_cast<uint64_t>(value))
return true;
value = abs(value);
@@ -578,7 +895,6 @@ public:
default: {
if (value <= 0xff)
return false;
-#if CPU(X86_64) && 0
JSValue jsValue = JSValue::decode(reinterpret_cast<void*>(value));
if (jsValue.isInt32())
return shouldBlind(Imm32(jsValue.asInt32()));
@@ -587,7 +903,6 @@ public:
if (!shouldBlindDouble(bitwise_cast<double>(value)))
return false;
-#endif
}
}
return shouldBlindForSpecificArch(value);
@@ -617,6 +932,59 @@ public:
rotateRightPtr(constant.rotation, dest);
}
+ bool shouldBlind(Imm64 imm)
+ {
+#if !defined(NDEBUG)
+ UNUSED_PARAM(imm);
+ // Debug always blind all constants, if only so we know
+ // if we've broken blinding during patch development.
+ return true;
+#endif
+
+ // First off we'll special case common, "safe" values to avoid hurting
+ // performance too much
+ uint64_t value = imm.asTrustedImm64().m_value;
+ switch (value) {
+ case 0xffff:
+ case 0xffffff:
+ case 0xffffffffL:
+ case 0xffffffffffL:
+ case 0xffffffffffffL:
+ case 0xffffffffffffffL:
+ case 0xffffffffffffffffL:
+ return false;
+ default: {
+ if (value <= 0xff)
+ return false;
+ }
+ }
+ return shouldBlindForSpecificArch(value);
+ }
+
+ struct RotatedImm64 {
+ RotatedImm64(uint64_t v1, uint8_t v2)
+ : value(v1)
+ , rotation(v2)
+ {
+ }
+ TrustedImm64 value;
+ TrustedImm32 rotation;
+ };
+
+ RotatedImm64 rotationBlindConstant(Imm64 imm)
+ {
+ uint8_t rotation = random() % (sizeof(int64_t) * 8);
+ uint64_t value = imm.asTrustedImm64().m_value;
+ value = (value << rotation) | (value >> (sizeof(int64_t) * 8 - rotation));
+ return RotatedImm64(value, rotation);
+ }
+
+ void loadRotationBlindedConstant(RotatedImm64 constant, RegisterID dest)
+ {
+ move(constant.value, dest);
+ rotateRight64(constant.rotation, dest);
+ }
+
void convertInt32ToDouble(Imm32 imm, FPRegisterID dest)
{
if (shouldBlind(imm)) {
@@ -635,6 +1003,24 @@ public:
move(imm.asTrustedImmPtr(), dest);
}
+ void move(Imm64 imm, RegisterID dest)
+ {
+ if (shouldBlind(imm))
+ loadRotationBlindedConstant(rotationBlindConstant(imm), dest);
+ else
+ move(imm.asTrustedImm64(), dest);
+ }
+
+ void and64(Imm32 imm, RegisterID dest)
+ {
+ if (shouldBlind(imm)) {
+ BlindedImm32 key = andBlindedConstant(imm);
+ and64(key.value1, dest);
+ and64(key.value2, dest);
+ } else
+ and64(imm.asTrustedImm32(), dest);
+ }
+
Jump branchPtr(RelationalCondition cond, RegisterID left, ImmPtr right)
{
if (shouldBlind(right)) {
@@ -655,6 +1041,16 @@ public:
storePtr(imm.asTrustedImmPtr(), dest);
}
+ void store64(Imm64 imm, Address dest)
+ {
+ if (shouldBlind(imm)) {
+ RegisterID scratchRegister = scratchRegisterForBlinding();
+ loadRotationBlindedConstant(rotationBlindConstant(imm), scratchRegister);
+ store64(scratchRegister, dest);
+ } else
+ store64(imm.asTrustedImm64(), dest);
+ }
+
#endif
#endif // !CPU(X86_64)
@@ -848,6 +1244,13 @@ public:
storePtr(value, addressForPoke(index));
}
+#if CPU(X86_64)
+ void poke(Imm64 value, int index = 0)
+ {
+ store64(value, addressForPoke(index));
+ }
+#endif
+
void store32(Imm32 imm, Address dest)
{
if (shouldBlind(imm)) {
diff --git a/masm/assembler/MacroAssemblerARM.h b/masm/assembler/MacroAssemblerARM.h
index e3b0be9daa..39d94adeaf 100644
--- a/masm/assembler/MacroAssemblerARM.h
+++ b/masm/assembler/MacroAssemblerARM.h
@@ -212,6 +212,14 @@ public:
m_assembler.orrs(dest, dest, src);
}
+ void or32(RegisterID src, AbsoluteAddress dest)
+ {
+ move(TrustedImmPtr(dest.m_ptr), ARMRegisters::S0);
+ load32(Address(ARMRegisters::S0), ARMRegisters::S1);
+ or32(src, ARMRegisters::S1);
+ store32(ARMRegisters::S1, ARMRegisters::S0);
+ }
+
void or32(TrustedImm32 imm, RegisterID dest)
{
m_assembler.orrs(dest, dest, m_assembler.getImm(imm.m_value, ARMRegisters::S0));
@@ -437,6 +445,13 @@ public:
m_assembler.baseIndexTransfer32(ARMAssembler::StoreUint8, src, address.base, address.index, static_cast<int>(address.scale), address.offset);
}
+ void store8(TrustedImm32 imm, void* address)
+ {
+ move(TrustedImm32(reinterpret_cast<ARMWord>(address)), ARMRegisters::S0);
+ m_assembler.moveImm(imm.m_value, ARMRegisters::S1);
+ m_assembler.dtrUp(ARMAssembler::StoreUint8, ARMRegisters::S1, ARMRegisters::S0, 0);
+ }
+
void store16(RegisterID src, BaseIndex address)
{
m_assembler.baseIndexTransfer16(ARMAssembler::StoreUint16, src, address.base, address.index, static_cast<int>(address.scale), address.offset);
@@ -657,6 +672,17 @@ public:
load32(Address(ARMRegisters::S0, 0), ARMRegisters::pc);
}
+ void moveDoubleToInts(FPRegisterID src, RegisterID dest1, RegisterID dest2)
+ {
+ m_assembler.vmov(dest1, dest2, src);
+ }
+
+ void moveIntsToDouble(RegisterID src1, RegisterID src2, FPRegisterID dest, FPRegisterID scratch)
+ {
+ UNUSED_PARAM(scratch);
+ m_assembler.vmov(dest, src1, src2);
+ }
+
Jump branchAdd32(ResultCondition cond, RegisterID src, RegisterID dest)
{
ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
diff --git a/masm/assembler/MacroAssemblerARMv7.h b/masm/assembler/MacroAssemblerARMv7.h
index d2da886c27..1301038e57 100644
--- a/masm/assembler/MacroAssemblerARMv7.h
+++ b/masm/assembler/MacroAssemblerARMv7.h
@@ -313,6 +313,14 @@ public:
{
m_assembler.orr(dest, dest, src);
}
+
+ void or32(RegisterID src, AbsoluteAddress dest)
+ {
+ move(TrustedImmPtr(dest.m_ptr), addressTempRegister);
+ load32(addressTempRegister, dataTempRegister);
+ or32(src, dataTempRegister);
+ store32(dataTempRegister, addressTempRegister);
+ }
void or32(TrustedImm32 imm, RegisterID dest)
{
@@ -729,11 +737,35 @@ public:
store8(src, setupArmAddress(address));
}
+ void store8(RegisterID src, void* address)
+ {
+ move(TrustedImmPtr(address), addressTempRegister);
+ store8(src, ArmAddress(addressTempRegister, 0));
+ }
+
+ void store8(TrustedImm32 imm, void* address)
+ {
+ move(imm, dataTempRegister);
+ store8(dataTempRegister, address);
+ }
+
void store16(RegisterID src, BaseIndex address)
{
store16(src, setupArmAddress(address));
}
+ // Possibly clobbers src, but not on this architecture.
+ void moveDoubleToInts(FPRegisterID src, RegisterID dest1, RegisterID dest2)
+ {
+ m_assembler.vmov(dest1, dest2, src);
+ }
+
+ void moveIntsToDouble(RegisterID src1, RegisterID src2, FPRegisterID dest, FPRegisterID scratch)
+ {
+ UNUSED_PARAM(scratch);
+ m_assembler.vmov(dest, src1, src2);
+ }
+
#if ENABLE(JIT_CONSTANT_BLINDING)
static bool shouldBlindForSpecificArch(uint32_t value)
{
@@ -1652,6 +1684,30 @@ public:
dataLabel = moveWithPatch(initialRightValue, dataTempRegister);
return branch32(cond, addressTempRegister, dataTempRegister);
}
+
+ PatchableJump patchableBranchPtr(RelationalCondition cond, Address left, TrustedImmPtr right = TrustedImmPtr(0))
+ {
+ m_makeJumpPatchable = true;
+ Jump result = branch32(cond, left, TrustedImm32(right));
+ m_makeJumpPatchable = false;
+ return PatchableJump(result);
+ }
+
+ PatchableJump patchableBranchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ m_makeJumpPatchable = true;
+ Jump result = branchTest32(cond, reg, mask);
+ m_makeJumpPatchable = false;
+ return PatchableJump(result);
+ }
+
+ PatchableJump patchableBranch32(RelationalCondition cond, RegisterID reg, TrustedImm32 imm)
+ {
+ m_makeJumpPatchable = true;
+ Jump result = branch32(cond, reg, imm);
+ m_makeJumpPatchable = false;
+ return PatchableJump(result);
+ }
PatchableJump patchableBranchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
{
diff --git a/masm/assembler/MacroAssemblerCodeRef.h b/masm/assembler/MacroAssemblerCodeRef.h
index 2978f10f85..c2af24060a 100644
--- a/masm/assembler/MacroAssemblerCodeRef.h
+++ b/masm/assembler/MacroAssemblerCodeRef.h
@@ -134,14 +134,6 @@ public:
ASSERT_VALID_CODE_POINTER(m_value);
}
- template<typename returnType, typename argType1, typename argType2, typename argType3, typename argType4, typename argType5, typename argType6>
- FunctionPtr(returnType(*value)(argType1, argType2, argType3, argType4, argType5, argType6))
- : m_value((void*)value)
- {
- ASSERT_VALID_CODE_POINTER(m_value);
- }
-
-
// MSVC doesn't seem to treat functions with different calling conventions as
// different types; these methods already defined for fastcall, below.
#if CALLING_CONVENTION_IS_STDCALL && !OS(WINDOWS)
diff --git a/masm/assembler/MacroAssemblerMIPS.h b/masm/assembler/MacroAssemblerMIPS.h
index 8b3ce9f034..fc6f9f40d5 100644
--- a/masm/assembler/MacroAssemblerMIPS.h
+++ b/masm/assembler/MacroAssemblerMIPS.h
@@ -29,8 +29,8 @@
#if ENABLE(ASSEMBLER) && CPU(MIPS)
-#include "MIPSAssembler.h"
#include "AbstractMacroAssembler.h"
+#include "MIPSAssembler.h"
namespace JSC {
@@ -155,12 +155,10 @@ public:
m_assembler.lw(dataTempRegister, address.base, address.offset);
if (imm.m_value >= -32768 && imm.m_value <= 32767
&& !m_fixedWidth)
- m_assembler.addiu(dataTempRegister, dataTempRegister,
- imm.m_value);
+ m_assembler.addiu(dataTempRegister, dataTempRegister, imm.m_value);
else {
move(imm, immTempRegister);
- m_assembler.addu(dataTempRegister, dataTempRegister,
- immTempRegister);
+ m_assembler.addu(dataTempRegister, dataTempRegister, immTempRegister);
}
m_assembler.sw(dataTempRegister, address.base, address.offset);
} else {
@@ -177,12 +175,10 @@ public:
m_assembler.lw(dataTempRegister, addrTempRegister, address.offset);
if (imm.m_value >= -32768 && imm.m_value <= 32767 && !m_fixedWidth)
- m_assembler.addiu(dataTempRegister, dataTempRegister,
- imm.m_value);
+ m_assembler.addiu(dataTempRegister, dataTempRegister, imm.m_value);
else {
move(imm, immTempRegister);
- m_assembler.addu(dataTempRegister, dataTempRegister,
- immTempRegister);
+ m_assembler.addu(dataTempRegister, dataTempRegister, immTempRegister);
}
m_assembler.sw(dataTempRegister, addrTempRegister, address.offset);
}
@@ -257,8 +253,7 @@ public:
{
if (!imm.m_value && !m_fixedWidth)
move(MIPSRegisters::zero, dest);
- else if (imm.m_value > 0 && imm.m_value < 65535
- && !m_fixedWidth)
+ else if (imm.m_value > 0 && imm.m_value < 65535 && !m_fixedWidth)
m_assembler.andi(dest, dest, imm.m_value);
else {
/*
@@ -335,6 +330,13 @@ public:
m_assembler.orInsn(dest, dest, dataTempRegister);
}
+ void or32(RegisterID src, AbsoluteAddress dest)
+ {
+ load32(dest.m_ptr, dataTempRegister);
+ m_assembler.orInsn(dataTempRegister, dataTempRegister, src);
+ store32(dataTempRegister, dest.m_ptr);
+ }
+
void rshift32(RegisterID shiftAmount, RegisterID dest)
{
m_assembler.srav(dest, dest, shiftAmount);
@@ -412,14 +414,11 @@ public:
sw dataTemp, offset(base)
*/
m_assembler.lw(dataTempRegister, address.base, address.offset);
- if (imm.m_value >= -32767 && imm.m_value <= 32768
- && !m_fixedWidth)
- m_assembler.addiu(dataTempRegister, dataTempRegister,
- -imm.m_value);
+ if (imm.m_value >= -32767 && imm.m_value <= 32768 && !m_fixedWidth)
+ m_assembler.addiu(dataTempRegister, dataTempRegister, -imm.m_value);
else {
move(imm, immTempRegister);
- m_assembler.subu(dataTempRegister, dataTempRegister,
- immTempRegister);
+ m_assembler.subu(dataTempRegister, dataTempRegister, immTempRegister);
}
m_assembler.sw(dataTempRegister, address.base, address.offset);
} else {
@@ -437,12 +436,10 @@ public:
if (imm.m_value >= -32767 && imm.m_value <= 32768
&& !m_fixedWidth)
- m_assembler.addiu(dataTempRegister, dataTempRegister,
- -imm.m_value);
+ m_assembler.addiu(dataTempRegister, dataTempRegister, -imm.m_value);
else {
move(imm, immTempRegister);
- m_assembler.subu(dataTempRegister, dataTempRegister,
- immTempRegister);
+ m_assembler.subu(dataTempRegister, dataTempRegister, immTempRegister);
}
m_assembler.sw(dataTempRegister, addrTempRegister, address.offset);
}
@@ -466,11 +463,9 @@ public:
move(TrustedImmPtr(address.m_ptr), addrTempRegister);
m_assembler.lw(dataTempRegister, addrTempRegister, 0);
- if (imm.m_value >= -32767 && imm.m_value <= 32768
- && !m_fixedWidth) {
- m_assembler.addiu(dataTempRegister, dataTempRegister,
- -imm.m_value);
- } else {
+ if (imm.m_value >= -32767 && imm.m_value <= 32768 && !m_fixedWidth)
+ m_assembler.addiu(dataTempRegister, dataTempRegister, -imm.m_value);
+ else {
move(imm, immTempRegister);
m_assembler.subu(dataTempRegister, dataTempRegister, immTempRegister);
}
@@ -524,7 +519,7 @@ public:
// Memory access operations:
//
// Loads are of the form load(address, destination) and stores of the form
- // store(source, address). The source for a store may be an TrustedImm32. Address
+ // store(source, address). The source for a store may be an TrustedImm32. Address
// operand objects to loads and store will be implicitly constructed if a
// register is passed.
@@ -569,12 +564,39 @@ public:
m_assembler.sll(addrTempRegister, address.index, address.scale);
m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
m_assembler.lui(immTempRegister, (address.offset + 0x8000) >> 16);
- m_assembler.addu(addrTempRegister, addrTempRegister,
- immTempRegister);
+ m_assembler.addu(addrTempRegister, addrTempRegister, immTempRegister);
m_assembler.lbu(dest, addrTempRegister, address.offset);
}
}
+ void load8Signed(BaseIndex address, RegisterID dest)
+ {
+ if (address.offset >= -32768 && address.offset <= 32767
+ && !m_fixedWidth) {
+ /*
+ sll addrTemp, address.index, address.scale
+ addu addrTemp, addrTemp, address.base
+ lb dest, address.offset(addrTemp)
+ */
+ m_assembler.sll(addrTempRegister, address.index, address.scale);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.lb(dest, addrTempRegister, address.offset);
+ } else {
+ /*
+ sll addrTemp, address.index, address.scale
+ addu addrTemp, addrTemp, address.base
+ lui immTemp, (address.offset + 0x8000) >> 16
+ addu addrTemp, addrTemp, immTemp
+ lb dest, (address.offset & 0xffff)(at)
+ */
+ m_assembler.sll(addrTempRegister, address.index, address.scale);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.lui(immTempRegister, (address.offset + 0x8000) >> 16);
+ m_assembler.addu(addrTempRegister, addrTempRegister, immTempRegister);
+ m_assembler.lb(dest, addrTempRegister, address.offset);
+ }
+ }
+
void load32(ImplicitAddress address, RegisterID dest)
{
if (address.offset >= -32768 && address.offset <= 32767
@@ -615,8 +637,7 @@ public:
m_assembler.sll(addrTempRegister, address.index, address.scale);
m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
m_assembler.lui(immTempRegister, (address.offset + 0x8000) >> 16);
- m_assembler.addu(addrTempRegister, addrTempRegister,
- immTempRegister);
+ m_assembler.addu(addrTempRegister, addrTempRegister, immTempRegister);
m_assembler.lw(dest, addrTempRegister, address.offset);
}
}
@@ -668,8 +689,7 @@ public:
m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
m_assembler.lui(immTempRegister, address.offset >> 16);
m_assembler.ori(immTempRegister, immTempRegister, address.offset);
- m_assembler.addu(addrTempRegister, addrTempRegister,
- immTempRegister);
+ m_assembler.addu(addrTempRegister, addrTempRegister, immTempRegister);
#if CPU(BIG_ENDIAN)
m_assembler.lwl(dest, addrTempRegister, 0);
m_assembler.lwr(dest, addrTempRegister, 3);
@@ -756,12 +776,39 @@ public:
m_assembler.sll(addrTempRegister, address.index, address.scale);
m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
m_assembler.lui(immTempRegister, (address.offset + 0x8000) >> 16);
- m_assembler.addu(addrTempRegister, addrTempRegister,
- immTempRegister);
+ m_assembler.addu(addrTempRegister, addrTempRegister, immTempRegister);
m_assembler.lhu(dest, addrTempRegister, address.offset);
}
}
+ void load16Signed(BaseIndex address, RegisterID dest)
+ {
+ if (address.offset >= -32768 && address.offset <= 32767
+ && !m_fixedWidth) {
+ /*
+ sll addrTemp, address.index, address.scale
+ addu addrTemp, addrTemp, address.base
+ lh dest, address.offset(addrTemp)
+ */
+ m_assembler.sll(addrTempRegister, address.index, address.scale);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.lh(dest, addrTempRegister, address.offset);
+ } else {
+ /*
+ sll addrTemp, address.index, address.scale
+ addu addrTemp, addrTemp, address.base
+ lui immTemp, (address.offset + 0x8000) >> 16
+ addu addrTemp, addrTemp, immTemp
+ lh dest, (address.offset & 0xffff)(addrTemp)
+ */
+ m_assembler.sll(addrTempRegister, address.index, address.scale);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.lui(immTempRegister, (address.offset + 0x8000) >> 16);
+ m_assembler.addu(addrTempRegister, addrTempRegister, immTempRegister);
+ m_assembler.lh(dest, addrTempRegister, address.offset);
+ }
+ }
+
DataLabel32 store32WithAddressOffsetPatch(RegisterID src, Address address)
{
m_fixedWidth = true;
@@ -779,6 +826,79 @@ public:
return dataLabel;
}
+ void store8(RegisterID src, BaseIndex address)
+ {
+ if (address.offset >= -32768 && address.offset <= 32767
+ && !m_fixedWidth) {
+ /*
+ sll addrTemp, address.index, address.scale
+ addu addrTemp, addrTemp, address.base
+ sb src, address.offset(addrTemp)
+ */
+ m_assembler.sll(addrTempRegister, address.index, address.scale);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.sb(src, addrTempRegister, address.offset);
+ } else {
+ /*
+ sll addrTemp, address.index, address.scale
+ addu addrTemp, addrTemp, address.base
+ lui immTemp, (address.offset + 0x8000) >> 16
+ addu addrTemp, addrTemp, immTemp
+ sb src, (address.offset & 0xffff)(at)
+ */
+ m_assembler.sll(addrTempRegister, address.index, address.scale);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.lui(immTempRegister, (address.offset + 0x8000) >> 16);
+ m_assembler.addu(addrTempRegister, addrTempRegister, immTempRegister);
+ m_assembler.sb(src, addrTempRegister, address.offset);
+ }
+ }
+
+ void store8(TrustedImm32 imm, void* address)
+ {
+ /*
+ li immTemp, imm
+ li addrTemp, address
+ sb src, 0(addrTemp)
+ */
+ if (!imm.m_value && !m_fixedWidth) {
+ move(TrustedImmPtr(address), addrTempRegister);
+ m_assembler.sb(MIPSRegisters::zero, addrTempRegister, 0);
+ } else {
+ move(imm, immTempRegister);
+ move(TrustedImmPtr(address), addrTempRegister);
+ m_assembler.sb(immTempRegister, addrTempRegister, 0);
+ }
+ }
+
+ void store16(RegisterID src, BaseIndex address)
+ {
+ if (address.offset >= -32768 && address.offset <= 32767
+ && !m_fixedWidth) {
+ /*
+ sll addrTemp, address.index, address.scale
+ addu addrTemp, addrTemp, address.base
+ sh src, address.offset(addrTemp)
+ */
+ m_assembler.sll(addrTempRegister, address.index, address.scale);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.sh(src, addrTempRegister, address.offset);
+ } else {
+ /*
+ sll addrTemp, address.index, address.scale
+ addu addrTemp, addrTemp, address.base
+ lui immTemp, (address.offset + 0x8000) >> 16
+ addu addrTemp, addrTemp, immTemp
+ sh src, (address.offset & 0xffff)(at)
+ */
+ m_assembler.sll(addrTempRegister, address.index, address.scale);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.lui(immTempRegister, (address.offset + 0x8000) >> 16);
+ m_assembler.addu(addrTempRegister, addrTempRegister, immTempRegister);
+ m_assembler.sh(src, addrTempRegister, address.offset);
+ }
+ }
+
void store32(RegisterID src, ImplicitAddress address)
{
if (address.offset >= -32768 && address.offset <= 32767
@@ -819,8 +939,7 @@ public:
m_assembler.sll(addrTempRegister, address.index, address.scale);
m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
m_assembler.lui(immTempRegister, (address.offset + 0x8000) >> 16);
- m_assembler.addu(addrTempRegister, addrTempRegister,
- immTempRegister);
+ m_assembler.addu(addrTempRegister, addrTempRegister, immTempRegister);
m_assembler.sw(src, addrTempRegister, address.offset);
}
}
@@ -830,8 +949,7 @@ public:
if (address.offset >= -32768 && address.offset <= 32767
&& !m_fixedWidth) {
if (!imm.m_value)
- m_assembler.sw(MIPSRegisters::zero, address.base,
- address.offset);
+ m_assembler.sw(MIPSRegisters::zero, address.base, address.offset);
else {
move(imm, immTempRegister);
m_assembler.sw(immTempRegister, address.base, address.offset);
@@ -845,12 +963,10 @@ public:
m_assembler.lui(addrTempRegister, (address.offset + 0x8000) >> 16);
m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
if (!imm.m_value && !m_fixedWidth)
- m_assembler.sw(MIPSRegisters::zero, addrTempRegister,
- address.offset);
+ m_assembler.sw(MIPSRegisters::zero, addrTempRegister, address.offset);
else {
move(imm, immTempRegister);
- m_assembler.sw(immTempRegister, addrTempRegister,
- address.offset);
+ m_assembler.sw(immTempRegister, addrTempRegister, address.offset);
}
}
}
@@ -915,9 +1031,9 @@ public:
// Stack manipulation operations:
//
// The ABI is assumed to provide a stack abstraction to memory,
- // containing machine word sized units of data. Push and pop
+ // containing machine word sized units of data. Push and pop
// operations add and remove a single register sized unit of data
- // to or from the stack. Peek and poke operations read or write
+ // to or from the stack. Peek and poke operations read or write
// values on the stack, without moving the current stack position.
void pop(RegisterID dest)
@@ -1201,10 +1317,21 @@ public:
m_fixedWidth = false;
}
+ void moveDoubleToInts(FPRegisterID src, RegisterID dest1, RegisterID dest2)
+ {
+ m_assembler.vmov(dest1, dest2, src);
+ }
+
+ void moveIntsToDouble(RegisterID src1, RegisterID src2, FPRegisterID dest, FPRegisterID scratch)
+ {
+ UNUSED_PARAM(scratch);
+ m_assembler.vmov(dest, src1, src2);
+ }
+
// Arithmetic control flow operations:
//
// This set of conditional branch operations branch based
- // on the result of an arithmetic operation. The operation
+ // on the result of an arithmetic operation. The operation
// is performed as normal, storing the result.
//
// * jz operations branch if the result is zero.
@@ -1413,7 +1540,7 @@ public:
Call nearCall()
{
- /* We need two words for relaxation. */
+ /* We need two words for relaxation. */
m_assembler.nop();
m_assembler.nop();
m_assembler.jal();
@@ -1501,8 +1628,7 @@ public:
m_assembler.sltu(dest, MIPSRegisters::zero, dataTempRegister);
} else {
move(mask, immTempRegister);
- m_assembler.andInsn(cmpTempRegister, dataTempRegister,
- immTempRegister);
+ m_assembler.andInsn(cmpTempRegister, dataTempRegister, immTempRegister);
if (cond == Zero)
m_assembler.sltiu(dest, cmpTempRegister, 1);
else
@@ -1521,8 +1647,7 @@ public:
m_assembler.sltu(dest, MIPSRegisters::zero, dataTempRegister);
} else {
move(mask, immTempRegister);
- m_assembler.andInsn(cmpTempRegister, dataTempRegister,
- immTempRegister);
+ m_assembler.andInsn(cmpTempRegister, dataTempRegister, immTempRegister);
if (cond == Zero)
m_assembler.sltiu(dest, cmpTempRegister, 1);
else
@@ -1598,6 +1723,34 @@ public:
return tailRecursiveCall();
}
+ void loadFloat(BaseIndex address, FPRegisterID dest)
+ {
+ if (address.offset >= -32768 && address.offset <= 32767
+ && !m_fixedWidth) {
+ /*
+ sll addrTemp, address.index, address.scale
+ addu addrTemp, addrTemp, address.base
+ lwc1 dest, address.offset(addrTemp)
+ */
+ m_assembler.sll(addrTempRegister, address.index, address.scale);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.lwc1(dest, addrTempRegister, address.offset);
+ } else {
+ /*
+ sll addrTemp, address.index, address.scale
+ addu addrTemp, addrTemp, address.base
+ lui immTemp, (address.offset + 0x8000) >> 16
+ addu addrTemp, addrTemp, immTemp
+ lwc1 dest, (address.offset & 0xffff)(at)
+ */
+ m_assembler.sll(addrTempRegister, address.index, address.scale);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.lui(immTempRegister, (address.offset + 0x8000) >> 16);
+ m_assembler.addu(addrTempRegister, addrTempRegister, immTempRegister);
+ m_assembler.lwc1(dest, addrTempRegister, address.offset);
+ }
+ }
+
void loadDouble(ImplicitAddress address, FPRegisterID dest)
{
#if WTF_MIPS_ISA(1)
@@ -1628,6 +1781,65 @@ public:
#endif
}
+ void loadDouble(BaseIndex address, FPRegisterID dest)
+ {
+#if WTF_MIPS_ISA(1)
+ if (address.offset >= -32768 && address.offset <= 32767
+ && !m_fixedWidth) {
+ /*
+ sll addrTemp, address.index, address.scale
+ addu addrTemp, addrTemp, address.base
+ lwc1 dest, address.offset(addrTemp)
+ lwc1 dest+1, (address.offset+4)(addrTemp)
+ */
+ m_assembler.sll(addrTempRegister, address.index, address.scale);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.lwc1(dest, addrTempRegister, address.offset);
+ m_assembler.lwc1(FPRegisterID(dest + 1), addrTempRegister, address.offset + 4);
+ } else {
+ /*
+ sll addrTemp, address.index, address.scale
+ addu addrTemp, addrTemp, address.base
+ lui immTemp, (address.offset + 0x8000) >> 16
+ addu addrTemp, addrTemp, immTemp
+ lwc1 dest, (address.offset & 0xffff)(at)
+ lwc1 dest+4, (address.offset & 0xffff + 4)(at)
+ */
+ m_assembler.sll(addrTempRegister, address.index, address.scale);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.lui(immTempRegister, (address.offset + 0x8000) >> 16);
+ m_assembler.addu(addrTempRegister, addrTempRegister, immTempRegister);
+ m_assembler.lwc1(dest, addrTempRegister, address.offset);
+ m_assembler.lwc1(FPRegisterID(dest + 1), addrTempRegister, address.offset + 4);
+ }
+#else
+ if (address.offset >= -32768 && address.offset <= 32767
+ && !m_fixedWidth) {
+ /*
+ sll addrTemp, address.index, address.scale
+ addu addrTemp, addrTemp, address.base
+ ldc1 dest, address.offset(addrTemp)
+ */
+ m_assembler.sll(addrTempRegister, address.index, address.scale);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.ldc1(dest, addrTempRegister, address.offset);
+ } else {
+ /*
+ sll addrTemp, address.index, address.scale
+ addu addrTemp, addrTemp, address.base
+ lui immTemp, (address.offset + 0x8000) >> 16
+ addu addrTemp, addrTemp, immTemp
+ ldc1 dest, (address.offset & 0xffff)(at)
+ */
+ m_assembler.sll(addrTempRegister, address.index, address.scale);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.lui(immTempRegister, (address.offset + 0x8000) >> 16);
+ m_assembler.addu(addrTempRegister, addrTempRegister, immTempRegister);
+ m_assembler.ldc1(dest, addrTempRegister, address.offset);
+ }
+#endif
+ }
+
void loadDouble(const void* address, FPRegisterID dest)
{
#if WTF_MIPS_ISA(1)
@@ -1649,6 +1861,33 @@ public:
#endif
}
+ void storeFloat(FPRegisterID src, BaseIndex address)
+ {
+ if (address.offset >= -32768 && address.offset <= 32767
+ && !m_fixedWidth) {
+ /*
+ sll addrTemp, address.index, address.scale
+ addu addrTemp, addrTemp, address.base
+ swc1 src, address.offset(addrTemp)
+ */
+ m_assembler.sll(addrTempRegister, address.index, address.scale);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.swc1(src, addrTempRegister, address.offset);
+ } else {
+ /*
+ sll addrTemp, address.index, address.scale
+ addu addrTemp, addrTemp, address.base
+ lui immTemp, (address.offset + 0x8000) >> 16
+ addu addrTemp, addrTemp, immTemp
+ swc1 src, (address.offset & 0xffff)(at)
+ */
+ m_assembler.sll(addrTempRegister, address.index, address.scale);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.lui(immTempRegister, (address.offset + 0x8000) >> 16);
+ m_assembler.addu(addrTempRegister, addrTempRegister, immTempRegister);
+ m_assembler.swc1(src, addrTempRegister, address.offset);
+ }
+ }
void storeDouble(FPRegisterID src, ImplicitAddress address)
{
@@ -1680,17 +1919,99 @@ public:
#endif
}
+ void storeDouble(FPRegisterID src, BaseIndex address)
+ {
+#if WTF_MIPS_ISA(1)
+ if (address.offset >= -32768 && address.offset <= 32767
+ && !m_fixedWidth) {
+ /*
+ sll addrTemp, address.index, address.scale
+ addu addrTemp, addrTemp, address.base
+ swc1 src, address.offset(addrTemp)
+ swc1 src+1, (address.offset + 4)(addrTemp)
+ */
+ m_assembler.sll(addrTempRegister, address.index, address.scale);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.swc1(src, addrTempRegister, address.offset);
+ m_assembler.swc1(FPRegisterID(src + 1), addrTempRegister, address.offset + 4);
+ } else {
+ /*
+ sll addrTemp, address.index, address.scale
+ addu addrTemp, addrTemp, address.base
+ lui immTemp, (address.offset + 0x8000) >> 16
+ addu addrTemp, addrTemp, immTemp
+ swc1 src, (address.offset & 0xffff)(at)
+ swc1 src+1, (address.offset & 0xffff + 4)(at)
+ */
+ m_assembler.sll(addrTempRegister, address.index, address.scale);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.lui(immTempRegister, (address.offset + 0x8000) >> 16);
+ m_assembler.addu(addrTempRegister, addrTempRegister, immTempRegister);
+ m_assembler.swc1(src, addrTempRegister, address.offset);
+ m_assembler.swc1(FPRegisterID(src + 1), addrTempRegister, address.offset + 4);
+ }
+#else
+ if (address.offset >= -32768 && address.offset <= 32767
+ && !m_fixedWidth) {
+ /*
+ sll addrTemp, address.index, address.scale
+ addu addrTemp, addrTemp, address.base
+ sdc1 src, address.offset(addrTemp)
+ */
+ m_assembler.sll(addrTempRegister, address.index, address.scale);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.sdc1(src, addrTempRegister, address.offset);
+ } else {
+ /*
+ sll addrTemp, address.index, address.scale
+ addu addrTemp, addrTemp, address.base
+ lui immTemp, (address.offset + 0x8000) >> 16
+ addu addrTemp, addrTemp, immTemp
+ sdc1 src, (address.offset & 0xffff)(at)
+ */
+ m_assembler.sll(addrTempRegister, address.index, address.scale);
+ m_assembler.addu(addrTempRegister, addrTempRegister, address.base);
+ m_assembler.lui(immTempRegister, (address.offset + 0x8000) >> 16);
+ m_assembler.addu(addrTempRegister, addrTempRegister, immTempRegister);
+ m_assembler.sdc1(src, addrTempRegister, address.offset);
+ }
+#endif
+ }
+
+ void storeDouble(FPRegisterID src, const void* address)
+ {
+#if WTF_MIPS_ISA(1)
+ move(TrustedImmPtr(address), addrTempRegister);
+ m_assembler.swc1(src, addrTempRegister, 0);
+ m_assembler.swc1(FPRegisterID(src + 1), addrTempRegister, 4);
+#else
+ move(TrustedImmPtr(address), addrTempRegister);
+ m_assembler.sdc1(src, addrTempRegister, 0);
+#endif
+ }
+
void addDouble(FPRegisterID src, FPRegisterID dest)
{
m_assembler.addd(dest, dest, src);
}
+ void addDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
+ {
+ m_assembler.addd(dest, op1, op2);
+ }
+
void addDouble(Address src, FPRegisterID dest)
{
loadDouble(src, fpTempRegister);
m_assembler.addd(dest, dest, fpTempRegister);
}
+ void addDouble(AbsoluteAddress address, FPRegisterID dest)
+ {
+ loadDouble(address.m_ptr, fpTempRegister);
+ m_assembler.addd(dest, dest, fpTempRegister);
+ }
+
void subDouble(FPRegisterID src, FPRegisterID dest)
{
m_assembler.subd(dest, dest, src);
@@ -1738,6 +2059,16 @@ public:
m_assembler.cvtdw(dest, fpTempRegister);
}
+ void convertFloatToDouble(FPRegisterID src, FPRegisterID dst)
+ {
+ m_assembler.cvtds(dst, src);
+ }
+
+ void convertDoubleToFloat(FPRegisterID src, FPRegisterID dst)
+ {
+ m_assembler.cvtsd(dst, src);
+ }
+
void insertRelaxationWords()
{
/* We need four words for relaxation. */
diff --git a/masm/assembler/MacroAssemblerX86.h b/masm/assembler/MacroAssemblerX86.h
index d1a4ff3c4f..8fd31466d4 100644
--- a/masm/assembler/MacroAssemblerX86.h
+++ b/masm/assembler/MacroAssemblerX86.h
@@ -44,6 +44,7 @@ public:
using MacroAssemblerX86Common::or32;
using MacroAssemblerX86Common::load32;
using MacroAssemblerX86Common::store32;
+ using MacroAssemblerX86Common::store8;
using MacroAssemblerX86Common::branch32;
using MacroAssemblerX86Common::call;
using MacroAssemblerX86Common::jump;
@@ -84,6 +85,11 @@ public:
m_assembler.orl_im(imm.m_value, address.m_ptr);
}
+ void or32(RegisterID reg, AbsoluteAddress address)
+ {
+ m_assembler.orl_rm(reg, address.m_ptr);
+ }
+
void sub32(TrustedImm32 imm, AbsoluteAddress address)
{
m_assembler.subl_im(imm.m_value, address.m_ptr);
@@ -127,6 +133,28 @@ public:
m_assembler.movl_rm(src, address);
}
+ void store8(TrustedImm32 imm, void* address)
+ {
+ ASSERT(-128 <= imm.m_value && imm.m_value < 128);
+ m_assembler.movb_i8m(imm.m_value, address);
+ }
+
+ // Possibly clobbers src.
+ void moveDoubleToInts(FPRegisterID src, RegisterID dest1, RegisterID dest2)
+ {
+ movePackedToInt32(src, dest1);
+ rshiftPacked(TrustedImm32(32), src);
+ movePackedToInt32(src, dest2);
+ }
+
+ void moveIntsToDouble(RegisterID src1, RegisterID src2, FPRegisterID dest, FPRegisterID scratch)
+ {
+ moveInt32ToPacked(src1, dest);
+ moveInt32ToPacked(src2, scratch);
+ lshiftPacked(TrustedImm32(32), scratch);
+ orPacked(scratch, dest);
+ }
+
Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, AbsoluteAddress dest)
{
m_assembler.addl_im(imm.m_value, dest.m_ptr);
diff --git a/masm/assembler/MacroAssemblerX86Common.h b/masm/assembler/MacroAssemblerX86Common.h
index 905c094267..66db26acb0 100644
--- a/masm/assembler/MacroAssemblerX86Common.h
+++ b/masm/assembler/MacroAssemblerX86Common.h
@@ -97,9 +97,12 @@ public:
#if ENABLE(JIT_CONSTANT_BLINDING)
static bool shouldBlindForSpecificArch(uint32_t value) { return value >= 0x00ffffff; }
#if CPU(X86_64)
+ static bool shouldBlindForSpecificArch(uint64_t value) { return value >= 0x00ffffff; }
+#if OS(DARWIN) // On 64-bit systems other than DARWIN uint64_t and uintptr_t are the same type so overload is prohibited.
static bool shouldBlindForSpecificArch(uintptr_t value) { return value >= 0x00ffffff; }
#endif
#endif
+#endif
// Integer arithmetic operations:
//
@@ -993,6 +996,11 @@ public:
m_assembler.movq_i64r(imm.asIntptr(), dest);
}
+ void move(TrustedImm64 imm, RegisterID dest)
+ {
+ m_assembler.movq_i64r(imm.m_value, dest);
+ }
+
void swap(RegisterID reg1, RegisterID reg2)
{
if (reg1 != reg2)
diff --git a/masm/assembler/MacroAssemblerX86_64.h b/masm/assembler/MacroAssemblerX86_64.h
index ac90516f41..ceacf6aa82 100644
--- a/masm/assembler/MacroAssemblerX86_64.h
+++ b/masm/assembler/MacroAssemblerX86_64.h
@@ -45,6 +45,7 @@ public:
using MacroAssemblerX86Common::sub32;
using MacroAssemblerX86Common::load32;
using MacroAssemblerX86Common::store32;
+ using MacroAssemblerX86Common::store8;
using MacroAssemblerX86Common::call;
using MacroAssemblerX86Common::jump;
using MacroAssemblerX86Common::addDouble;
@@ -75,6 +76,12 @@ public:
or32(imm, Address(scratchRegister));
}
+ void or32(RegisterID reg, AbsoluteAddress address)
+ {
+ move(TrustedImmPtr(address.m_ptr), scratchRegister);
+ or32(reg, Address(scratchRegister));
+ }
+
void sub32(TrustedImm32 imm, AbsoluteAddress address)
{
move(TrustedImmPtr(address.m_ptr), scratchRegister);
@@ -108,6 +115,12 @@ public:
move(TrustedImmPtr(address), scratchRegister);
store32(imm, scratchRegister);
}
+
+ void store8(TrustedImm32 imm, void* address)
+ {
+ move(TrustedImmPtr(address), scratchRegister);
+ store8(imm, Address(scratchRegister));
+ }
Call call()
{
@@ -141,231 +154,225 @@ public:
return Call::fromTailJump(newJump);
}
+ Jump branchAdd32(ResultCondition cond, TrustedImm32 src, AbsoluteAddress dest)
+ {
+ move(TrustedImmPtr(dest.m_ptr), scratchRegister);
+ add32(src, Address(scratchRegister));
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
- void addPtr(RegisterID src, RegisterID dest)
+ void add64(RegisterID src, RegisterID dest)
{
m_assembler.addq_rr(src, dest);
}
- void addPtr(Address src, RegisterID dest)
+ void add64(Address src, RegisterID dest)
{
m_assembler.addq_mr(src.offset, src.base, dest);
}
- void addPtr(AbsoluteAddress src, RegisterID dest)
+ void add64(AbsoluteAddress src, RegisterID dest)
{
move(TrustedImmPtr(src.m_ptr), scratchRegister);
- addPtr(Address(scratchRegister), dest);
+ add64(Address(scratchRegister), dest);
}
- void addPtr(TrustedImm32 imm, RegisterID srcDest)
+ void add64(TrustedImm32 imm, RegisterID srcDest)
{
m_assembler.addq_ir(imm.m_value, srcDest);
}
- void addPtr(TrustedImmPtr imm, RegisterID dest)
+ void add64(TrustedImm64 imm, RegisterID dest)
{
move(imm, scratchRegister);
- m_assembler.addq_rr(scratchRegister, dest);
+ add64(scratchRegister, dest);
}
- void addPtr(TrustedImm32 imm, RegisterID src, RegisterID dest)
+ void add64(TrustedImm32 imm, RegisterID src, RegisterID dest)
{
m_assembler.leaq_mr(imm.m_value, src, dest);
}
- void addPtr(TrustedImm32 imm, Address address)
+ void add64(TrustedImm32 imm, Address address)
{
m_assembler.addq_im(imm.m_value, address.offset, address.base);
}
- void addPtr(TrustedImm32 imm, AbsoluteAddress address)
- {
- move(TrustedImmPtr(address.m_ptr), scratchRegister);
- addPtr(imm, Address(scratchRegister));
- }
-
void add64(TrustedImm32 imm, AbsoluteAddress address)
{
- addPtr(imm, address);
+ move(TrustedImmPtr(address.m_ptr), scratchRegister);
+ add64(imm, Address(scratchRegister));
}
- void andPtr(RegisterID src, RegisterID dest)
+ void and64(RegisterID src, RegisterID dest)
{
m_assembler.andq_rr(src, dest);
}
- void andPtr(TrustedImm32 imm, RegisterID srcDest)
+ void and64(TrustedImm32 imm, RegisterID srcDest)
{
m_assembler.andq_ir(imm.m_value, srcDest);
}
- void negPtr(RegisterID dest)
+ void neg64(RegisterID dest)
{
m_assembler.negq_r(dest);
}
- void orPtr(RegisterID src, RegisterID dest)
+ void or64(RegisterID src, RegisterID dest)
{
m_assembler.orq_rr(src, dest);
}
- void orPtr(TrustedImmPtr imm, RegisterID dest)
+ void or64(TrustedImm64 imm, RegisterID dest)
{
move(imm, scratchRegister);
- m_assembler.orq_rr(scratchRegister, dest);
+ or64(scratchRegister, dest);
}
- void orPtr(TrustedImm32 imm, RegisterID dest)
+ void or64(TrustedImm32 imm, RegisterID dest)
{
m_assembler.orq_ir(imm.m_value, dest);
}
- void orPtr(RegisterID op1, RegisterID op2, RegisterID dest)
+ void or64(RegisterID op1, RegisterID op2, RegisterID dest)
{
if (op1 == op2)
move(op1, dest);
else if (op1 == dest)
- orPtr(op2, dest);
+ or64(op2, dest);
else {
move(op2, dest);
- orPtr(op1, dest);
+ or64(op1, dest);
}
}
- void orPtr(TrustedImm32 imm, RegisterID src, RegisterID dest)
+ void or64(TrustedImm32 imm, RegisterID src, RegisterID dest)
{
move(src, dest);
- orPtr(imm, dest);
+ or64(imm, dest);
}
- void rotateRightPtr(TrustedImm32 imm, RegisterID srcDst)
+ void rotateRight64(TrustedImm32 imm, RegisterID srcDst)
{
m_assembler.rorq_i8r(imm.m_value, srcDst);
}
- void subPtr(RegisterID src, RegisterID dest)
+ void sub64(RegisterID src, RegisterID dest)
{
m_assembler.subq_rr(src, dest);
}
- void subPtr(TrustedImm32 imm, RegisterID dest)
+ void sub64(TrustedImm32 imm, RegisterID dest)
{
m_assembler.subq_ir(imm.m_value, dest);
}
- void subPtr(TrustedImmPtr imm, RegisterID dest)
+ void sub64(TrustedImm64 imm, RegisterID dest)
{
move(imm, scratchRegister);
- m_assembler.subq_rr(scratchRegister, dest);
+ sub64(scratchRegister, dest);
}
- void xorPtr(RegisterID src, RegisterID dest)
+ void xor64(RegisterID src, RegisterID dest)
{
m_assembler.xorq_rr(src, dest);
}
- void xorPtr(RegisterID src, Address dest)
+ void xor64(RegisterID src, Address dest)
{
m_assembler.xorq_rm(src, dest.offset, dest.base);
}
- void xorPtr(TrustedImm32 imm, RegisterID srcDest)
+ void xor64(TrustedImm32 imm, RegisterID srcDest)
{
m_assembler.xorq_ir(imm.m_value, srcDest);
}
- void loadPtr(ImplicitAddress address, RegisterID dest)
- {
- m_assembler.movq_mr(address.offset, address.base, dest);
- }
-
- ConvertibleLoadLabel convertibleLoadPtr(Address address, RegisterID dest)
+ void load64(ImplicitAddress address, RegisterID dest)
{
- ConvertibleLoadLabel result = ConvertibleLoadLabel(this);
m_assembler.movq_mr(address.offset, address.base, dest);
- return result;
}
- void loadPtr(BaseIndex address, RegisterID dest)
+ void load64(BaseIndex address, RegisterID dest)
{
m_assembler.movq_mr(address.offset, address.base, address.index, address.scale, dest);
}
- void loadPtr(const void* address, RegisterID dest)
+ void load64(const void* address, RegisterID dest)
{
if (dest == X86Registers::eax)
m_assembler.movq_mEAX(address);
else {
move(TrustedImmPtr(address), dest);
- loadPtr(dest, dest);
+ load64(dest, dest);
}
}
- DataLabel32 loadPtrWithAddressOffsetPatch(Address address, RegisterID dest)
+ DataLabel32 load64WithAddressOffsetPatch(Address address, RegisterID dest)
{
padBeforePatch();
m_assembler.movq_mr_disp32(address.offset, address.base, dest);
return DataLabel32(this);
}
- DataLabelCompact loadPtrWithCompactAddressOffsetPatch(Address address, RegisterID dest)
+ DataLabelCompact load64WithCompactAddressOffsetPatch(Address address, RegisterID dest)
{
padBeforePatch();
m_assembler.movq_mr_disp8(address.offset, address.base, dest);
return DataLabelCompact(this);
}
- void storePtr(RegisterID src, ImplicitAddress address)
+ void store64(RegisterID src, ImplicitAddress address)
{
m_assembler.movq_rm(src, address.offset, address.base);
}
- void storePtr(RegisterID src, BaseIndex address)
+ void store64(RegisterID src, BaseIndex address)
{
m_assembler.movq_rm(src, address.offset, address.base, address.index, address.scale);
}
- void storePtr(RegisterID src, void* address)
+ void store64(RegisterID src, void* address)
{
if (src == X86Registers::eax)
m_assembler.movq_EAXm(address);
else {
move(TrustedImmPtr(address), scratchRegister);
- storePtr(src, scratchRegister);
+ store64(src, scratchRegister);
}
}
- void storePtr(TrustedImmPtr imm, ImplicitAddress address)
+ void store64(TrustedImm64 imm, ImplicitAddress address)
{
move(imm, scratchRegister);
- storePtr(scratchRegister, address);
+ store64(scratchRegister, address);
}
- void storePtr(TrustedImmPtr imm, BaseIndex address)
+ void store64(TrustedImm64 imm, BaseIndex address)
{
move(imm, scratchRegister);
m_assembler.movq_rm(scratchRegister, address.offset, address.base, address.index, address.scale);
}
- DataLabel32 storePtrWithAddressOffsetPatch(RegisterID src, Address address)
+ DataLabel32 store64WithAddressOffsetPatch(RegisterID src, Address address)
{
padBeforePatch();
m_assembler.movq_rm_disp32(src, address.offset, address.base);
return DataLabel32(this);
}
- void movePtrToDouble(RegisterID src, FPRegisterID dest)
+ void move64ToDouble(RegisterID src, FPRegisterID dest)
{
m_assembler.movq_rr(src, dest);
}
- void moveDoubleToPtr(FPRegisterID src, RegisterID dest)
+ void moveDoubleTo64(FPRegisterID src, RegisterID dest)
{
m_assembler.movq_rr(src, dest);
}
- void comparePtr(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
+ void compare64(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
{
if (((cond == Equal) || (cond == NotEqual)) && !right.m_value)
m_assembler.testq_rr(left, left);
@@ -375,67 +382,60 @@ public:
m_assembler.movzbl_rr(dest, dest);
}
- void comparePtr(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest)
+ void compare64(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest)
{
m_assembler.cmpq_rr(right, left);
m_assembler.setCC_r(x86Condition(cond), dest);
m_assembler.movzbl_rr(dest, dest);
}
- Jump branchAdd32(ResultCondition cond, TrustedImm32 src, AbsoluteAddress dest)
- {
- move(TrustedImmPtr(dest.m_ptr), scratchRegister);
- add32(src, Address(scratchRegister));
- return Jump(m_assembler.jCC(x86Condition(cond)));
- }
-
- Jump branchPtr(RelationalCondition cond, RegisterID left, RegisterID right)
+ Jump branch64(RelationalCondition cond, RegisterID left, RegisterID right)
{
m_assembler.cmpq_rr(right, left);
return Jump(m_assembler.jCC(x86Condition(cond)));
}
- Jump branchPtr(RelationalCondition cond, RegisterID left, TrustedImmPtr right)
+ Jump branch64(RelationalCondition cond, RegisterID left, TrustedImm64 right)
{
if (((cond == Equal) || (cond == NotEqual)) && !right.m_value) {
m_assembler.testq_rr(left, left);
return Jump(m_assembler.jCC(x86Condition(cond)));
}
move(right, scratchRegister);
- return branchPtr(cond, left, scratchRegister);
+ return branch64(cond, left, scratchRegister);
}
- Jump branchPtr(RelationalCondition cond, RegisterID left, Address right)
+ Jump branch64(RelationalCondition cond, RegisterID left, Address right)
{
m_assembler.cmpq_mr(right.offset, right.base, left);
return Jump(m_assembler.jCC(x86Condition(cond)));
}
- Jump branchPtr(RelationalCondition cond, AbsoluteAddress left, RegisterID right)
+ Jump branch64(RelationalCondition cond, AbsoluteAddress left, RegisterID right)
{
move(TrustedImmPtr(left.m_ptr), scratchRegister);
- return branchPtr(cond, Address(scratchRegister), right);
+ return branch64(cond, Address(scratchRegister), right);
}
- Jump branchPtr(RelationalCondition cond, Address left, RegisterID right)
+ Jump branch64(RelationalCondition cond, Address left, RegisterID right)
{
m_assembler.cmpq_rm(right, left.offset, left.base);
return Jump(m_assembler.jCC(x86Condition(cond)));
}
- Jump branchPtr(RelationalCondition cond, Address left, TrustedImmPtr right)
+ Jump branch64(RelationalCondition cond, Address left, TrustedImm64 right)
{
move(right, scratchRegister);
- return branchPtr(cond, left, scratchRegister);
+ return branch64(cond, left, scratchRegister);
}
- Jump branchTestPtr(ResultCondition cond, RegisterID reg, RegisterID mask)
+ Jump branchTest64(ResultCondition cond, RegisterID reg, RegisterID mask)
{
m_assembler.testq_rr(reg, mask);
return Jump(m_assembler.jCC(x86Condition(cond)));
}
- Jump branchTestPtr(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
+ Jump branchTest64(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
{
// if we are only interested in the low seven bits, this can be tested with a testb
if (mask.m_value == -1)
@@ -447,7 +447,7 @@ public:
return Jump(m_assembler.jCC(x86Condition(cond)));
}
- void testPtr(ResultCondition cond, RegisterID reg, TrustedImm32 mask, RegisterID dest)
+ void test64(ResultCondition cond, RegisterID reg, TrustedImm32 mask, RegisterID dest)
{
if (mask.m_value == -1)
m_assembler.testq_rr(reg, reg);
@@ -458,19 +458,19 @@ public:
set32(x86Condition(cond), dest);
}
- void testPtr(ResultCondition cond, RegisterID reg, RegisterID mask, RegisterID dest)
+ void test64(ResultCondition cond, RegisterID reg, RegisterID mask, RegisterID dest)
{
m_assembler.testq_rr(reg, mask);
set32(x86Condition(cond), dest);
}
- Jump branchTestPtr(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1))
+ Jump branchTest64(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1))
{
- loadPtr(address.m_ptr, scratchRegister);
- return branchTestPtr(cond, scratchRegister, mask);
+ load64(address.m_ptr, scratchRegister);
+ return branchTest64(cond, scratchRegister, mask);
}
- Jump branchTestPtr(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
+ Jump branchTest64(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
{
if (mask.m_value == -1)
m_assembler.cmpq_im(0, address.offset, address.base);
@@ -479,13 +479,13 @@ public:
return Jump(m_assembler.jCC(x86Condition(cond)));
}
- Jump branchTestPtr(ResultCondition cond, Address address, RegisterID reg)
+ Jump branchTest64(ResultCondition cond, Address address, RegisterID reg)
{
m_assembler.testq_rm(reg, address.offset, address.base);
return Jump(m_assembler.jCC(x86Condition(cond)));
}
- Jump branchTestPtr(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
+ Jump branchTest64(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
{
if (mask.m_value == -1)
m_assembler.cmpq_im(0, address.offset, address.base, address.index, address.scale);
@@ -495,28 +495,41 @@ public:
}
- Jump branchAddPtr(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
+ Jump branchAdd64(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
{
- addPtr(imm, dest);
+ add64(imm, dest);
return Jump(m_assembler.jCC(x86Condition(cond)));
}
- Jump branchAddPtr(ResultCondition cond, RegisterID src, RegisterID dest)
+ Jump branchAdd64(ResultCondition cond, RegisterID src, RegisterID dest)
{
- addPtr(src, dest);
+ add64(src, dest);
return Jump(m_assembler.jCC(x86Condition(cond)));
}
- Jump branchSubPtr(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
+ Jump branchSub64(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
{
- subPtr(imm, dest);
+ sub64(imm, dest);
return Jump(m_assembler.jCC(x86Condition(cond)));
}
- Jump branchSubPtr(ResultCondition cond, RegisterID src1, TrustedImm32 src2, RegisterID dest)
+ Jump branchSub64(ResultCondition cond, RegisterID src, RegisterID dest)
+ {
+ sub64(src, dest);
+ return Jump(m_assembler.jCC(x86Condition(cond)));
+ }
+
+ Jump branchSub64(ResultCondition cond, RegisterID src1, TrustedImm32 src2, RegisterID dest)
{
move(src1, dest);
- return branchSubPtr(cond, src2, dest);
+ return branchSub64(cond, src2, dest);
+ }
+
+ ConvertibleLoadLabel convertibleLoadPtr(Address address, RegisterID dest)
+ {
+ ConvertibleLoadLabel result = ConvertibleLoadLabel(this);
+ m_assembler.movq_mr(address.offset, address.base, dest);
+ return result;
}
DataLabelPtr moveWithPatch(TrustedImmPtr initialValue, RegisterID dest)
@@ -529,22 +542,22 @@ public:
Jump branchPtrWithPatch(RelationalCondition cond, RegisterID left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
{
dataLabel = moveWithPatch(initialRightValue, scratchRegister);
- return branchPtr(cond, left, scratchRegister);
+ return branch64(cond, left, scratchRegister);
}
Jump branchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
{
dataLabel = moveWithPatch(initialRightValue, scratchRegister);
- return branchPtr(cond, left, scratchRegister);
+ return branch64(cond, left, scratchRegister);
}
DataLabelPtr storePtrWithPatch(TrustedImmPtr initialValue, ImplicitAddress address)
{
DataLabelPtr label = moveWithPatch(initialValue, scratchRegister);
- storePtr(scratchRegister, address);
+ store64(scratchRegister, address);
return label;
}
-
+
using MacroAssemblerX86Common::branchTest8;
Jump branchTest8(ResultCondition cond, ExtendedAddress address, TrustedImm32 mask = TrustedImm32(-1))
{
diff --git a/masm/assembler/X86Assembler.h b/masm/assembler/X86Assembler.h
index adaee4bc07..ecb178e88b 100644
--- a/masm/assembler/X86Assembler.h
+++ b/masm/assembler/X86Assembler.h
@@ -541,6 +541,11 @@ public:
m_formatter.immediate32(imm);
}
}
+
+ void orl_rm(RegisterID src, const void* addr)
+ {
+ m_formatter.oneByteOp(OP_OR_EvGv, src, addr);
+ }
#endif
void subl_rr(RegisterID src, RegisterID dst)
@@ -1150,6 +1155,15 @@ public:
m_formatter.immediate32(imm);
}
+#if !CPU(X86_64)
+ void movb_i8m(int imm, const void* addr)
+ {
+ ASSERT(-128 <= imm && imm < 128);
+ m_formatter.oneByteOp(OP_GROUP11_EvIb, GROUP11_MOV, addr);
+ m_formatter.immediate8(imm);
+ }
+#endif
+
void movb_i8m(int imm, int offset, RegisterID base)
{
ASSERT(-128 <= imm && imm < 128);
diff --git a/masm/masm.pri b/masm/masm.pri
index 119b70f53b..117707fc70 100644
--- a/masm/masm.pri
+++ b/masm/masm.pri
@@ -13,7 +13,8 @@ DEFINES += WTF_EXPORT_PRIVATE=""
DEFINES += ENABLE_LLINT=0
DEFINES += ENABLE_DFG_JIT=0
-DEFINES += ENABLE_JIT=0
+DEFINES += ENABLE_JIT=1
+DEFINES += ENABLE_JIT_CONSTANT_BLINDING=0
DEFINES += ENABLE_ASSEMBLER=1
DEFINES += USE_SYSTEM_MALLOC=1
diff --git a/masm/wtf/Assertions.h b/masm/wtf/Assertions.h
index 7e079ab187..f347a21747 100644
--- a/masm/wtf/Assertions.h
+++ b/masm/wtf/Assertions.h
@@ -42,8 +42,6 @@
http://msdn2.microsoft.com/en-us/library/ms177415(VS.80).aspx
*/
-#include <wtf/Platform.h>
-
#include <stddef.h>
#if !COMPILER(MSVC)
diff --git a/masm/wtf/Compiler.h b/masm/wtf/Compiler.h
index f40e15e601..a9ef419c18 100644
--- a/masm/wtf/Compiler.h
+++ b/masm/wtf/Compiler.h
@@ -57,10 +57,10 @@
#define WTF_COMPILER_SUPPORTS_CXX_DELETED_FUNCTIONS __has_extension(cxx_deleted_functions)
#define WTF_COMPILER_SUPPORTS_CXX_NULLPTR __has_feature(cxx_nullptr)
+#define WTF_COMPILER_SUPPORTS_CXX_EXPLICIT_CONVERSIONS __has_feature(cxx_explicit_conversions)
#define WTF_COMPILER_SUPPORTS_BLOCKS __has_feature(blocks)
#define WTF_COMPILER_SUPPORTS_C_STATIC_ASSERT __has_extension(c_static_assert)
#define WTF_COMPILER_SUPPORTS_CXX_OVERRIDE_CONTROL __has_extension(cxx_override_control)
-
#define WTF_COMPILER_SUPPORTS_HAS_TRIVIAL_DESTRUCTOR __has_extension(has_trivial_destructor)
#endif
@@ -122,7 +122,7 @@
/* Specific compiler features */
#if COMPILER(GCC) && !COMPILER(CLANG)
-#if GCC_VERSION_AT_LEAST(4, 7, 0) && __cplusplus >= 201103L
+#if GCC_VERSION_AT_LEAST(4, 7, 0) && defined(__cplusplus) && __cplusplus >= 201103L
#define WTF_COMPILER_SUPPORTS_CXX_RVALUE_REFERENCES 1
#define WTF_COMPILER_SUPPORTS_CXX_DELETED_FUNCTIONS 1
#define WTF_COMPILER_SUPPORTS_CXX_NULLPTR 1
diff --git a/masm/wtf/Noncopyable.h b/masm/wtf/Noncopyable.h
index a88cefc34e..f6bdfbb40b 100644
--- a/masm/wtf/Noncopyable.h
+++ b/masm/wtf/Noncopyable.h
@@ -25,13 +25,9 @@
#if COMPILER_SUPPORTS(CXX_DELETED_FUNCTIONS)
#define WTF_MAKE_NONCOPYABLE(ClassName) \
- CLANG_PRAGMA("clang diagnostic push") \
- CLANG_PRAGMA("clang diagnostic ignored \"-Wunknown-pragmas\"") \
- CLANG_PRAGMA("clang diagnostic ignored \"-Wc++0x-extensions\"") \
private: \
ClassName(const ClassName&) = delete; \
- ClassName& operator=(const ClassName&) = delete; \
- CLANG_PRAGMA("clang diagnostic pop")
+ ClassName& operator=(const ClassName&) = delete;
#else
#define WTF_MAKE_NONCOPYABLE(ClassName) \
private: \
diff --git a/masm/wtf/Platform.h b/masm/wtf/Platform.h
index 0be4bb07dc..3c263c51d9 100644
--- a/masm/wtf/Platform.h
+++ b/masm/wtf/Platform.h
@@ -540,9 +540,6 @@
#define WTF_USE_SCROLLBAR_PAINTER 1
#define HAVE_XPC 1
#endif
-#if !defined(ENABLE_JAVA_BRIDGE)
-#define ENABLE_JAVA_BRIDGE 1
-#endif
#if !defined(ENABLE_DASHBOARD_SUPPORT)
#define ENABLE_DASHBOARD_SUPPORT 1
#endif
@@ -591,7 +588,6 @@
#define ENABLE_GEOLOCATION 1
#define ENABLE_ICONDATABASE 0
#define ENABLE_INSPECTOR 1
-#define ENABLE_JAVA_BRIDGE 0
#define ENABLE_NETSCAPE_PLUGIN_API 0
#define ENABLE_ORIENTATION_EVENTS 1
#define ENABLE_REPAINT_THROTTLING 1
@@ -604,11 +600,9 @@
#define WTF_USE_PTHREADS 1
#if PLATFORM(IOS_SIMULATOR)
- #define ENABLE_CLASSIC_INTERPRETER 1
#define ENABLE_JIT 0
#define ENABLE_YARR_JIT 0
#else
- #define ENABLE_CLASSIC_INTERPRETER 0
#define ENABLE_JIT 1
#define ENABLE_LLINT 1
#define ENABLE_YARR_JIT 1
@@ -641,7 +635,9 @@
#if PLATFORM(WX)
#if !CPU(PPC)
+#if !defined(ENABLE_ASSEMBLER)
#define ENABLE_ASSEMBLER 1
+#endif
#define ENABLE_JIT 1
#endif
#define ENABLE_GLOBAL_FASTMALLOC_NEW 0
@@ -698,7 +694,6 @@
#define HAVE_LANGINFO_H 1
#define HAVE_MMAP 1
#define HAVE_MERGESORT 1
-#define HAVE_SBRK 1
#define HAVE_STRINGS_H 1
#define HAVE_SYS_PARAM_H 1
#define HAVE_SYS_TIME_H 1
@@ -739,7 +734,6 @@
#define HAVE_MMAP 1
#define HAVE_MADV_FREE_REUSE 1
#define HAVE_MADV_FREE 1
-#define HAVE_SBRK 1
#define HAVE_STRINGS_H 1
#define HAVE_SYS_PARAM_H 1
#define HAVE_SYS_TIME_H 1
@@ -749,7 +743,6 @@
#define HAVE_ERRNO_H 1
#define HAVE_NMAP 1
-#define HAVE_SBRK 1
#define HAVE_STRINGS_H 1
#define HAVE_SYS_PARAM_H 1
#define HAVE_SYS_TIME_H 1
@@ -761,7 +754,6 @@
#define HAVE_ERRNO_H 1
#define HAVE_LANGINFO_H 1
#define HAVE_MMAP 1
-#define HAVE_SBRK 1
#define HAVE_STRINGS_H 1
#define HAVE_SYS_PARAM_H 1
#define HAVE_SYS_TIME_H 1
@@ -880,6 +872,7 @@
/* JIT is not implemented for Windows 64-bit */
#if !defined(ENABLE_JIT) && OS(WINDOWS) && CPU(X86_64)
#define ENABLE_JIT 0
+#define ENABLE_YARR_JIT 0
#endif
#if !defined(ENABLE_JIT) && CPU(SH4) && PLATFORM(QT)
@@ -906,17 +899,25 @@
#define ENABLE_DISASSEMBLER 1
#endif
+/* On the GTK+ port we take an extra precaution for LLINT support:
+ * We disable it on x86 builds if the build target doesn't support SSE2
+ * instructions (LLINT requires SSE2 on this platform). */
+#if !defined(ENABLE_LLINT) && PLATFORM(GTK) && CPU(X86) && COMPILER(GCC) \
+ && !defined(__SSE2__)
+#define ENABLE_LLINT 0
+#endif
+
/* On some of the platforms where we have a JIT, we want to also have the
low-level interpreter. */
#if !defined(ENABLE_LLINT) \
&& ENABLE(JIT) \
&& (OS(DARWIN) || OS(LINUX)) \
- && (PLATFORM(MAC) || PLATFORM(IOS) || PLATFORM(GTK)) \
+ && (PLATFORM(MAC) || PLATFORM(IOS) || PLATFORM(GTK) || (PLATFORM(QT) && OS(LINUX))) \
&& (CPU(X86) || CPU(X86_64) || CPU(ARM_THUMB2))
#define ENABLE_LLINT 1
#endif
-#if !defined(ENABLE_DFG_JIT) && ENABLE(JIT)
+#if !defined(ENABLE_DFG_JIT) && ENABLE(JIT) && !COMPILER(MSVC)
/* Enable the DFG JIT on X86 and X86_64. Only tested on Mac and GNU/Linux. */
#if (CPU(X86) || CPU(X86_64)) && (PLATFORM(MAC) || OS(LINUX))
#define ENABLE_DFG_JIT 1
@@ -931,6 +932,22 @@
#endif
#endif
+/* If the jit is not available, enable the LLInt C Loop: */
+#if !ENABLE(JIT)
+#undef ENABLE_LLINT /* Undef so that we can redefine it. */
+#undef ENABLE_LLINT_C_LOOP /* Undef so that we can redefine it. */
+#undef ENABLE_DFG_JIT /* Undef so that we can redefine it. */
+#define ENABLE_LLINT 1
+#define ENABLE_LLINT_C_LOOP 1
+#define ENABLE_DFG_JIT 0
+#endif
+
+/* Do a sanity check to make sure that we at least have one execution engine in
+ use: */
+#if !(ENABLE(JIT) || ENABLE(LLINT))
+#error You have to have at least one execution model enabled to build JSC
+#endif
+
/* Profiling of types and values used by JIT code. DFG_JIT depends on it, but you
can enable it manually with DFG turned off if you want to use it as a standalone
profiler. In that case, you probably want to also enable VERBOSE_VALUE_PROFILE
@@ -953,29 +970,6 @@
#define ENABLE_WRITE_BARRIER_PROFILING 0
#endif
-/* Ensure that either the JIT or the interpreter has been enabled. */
-#if !defined(ENABLE_CLASSIC_INTERPRETER) && !ENABLE(JIT) && !ENABLE(LLINT)
-#define ENABLE_CLASSIC_INTERPRETER 1
-#endif
-
-/* If the jit and classic interpreter is not available, enable the LLInt C Loop: */
-#if !ENABLE(JIT) && !ENABLE(CLASSIC_INTERPRETER)
- #define ENABLE_LLINT 1
- #define ENABLE_LLINT_C_LOOP 1
- #define ENABLE_DFG_JIT 0
-#endif
-
-/* Do a sanity check to make sure that we at least have one execution engine in
- use: */
-#if !(ENABLE(JIT) || ENABLE(CLASSIC_INTERPRETER) || ENABLE(LLINT))
-#error You have to have at least one execution model enabled to build JSC
-#endif
-/* Do a sanity check to make sure that we don't have both the classic interpreter
- and the llint C loop in use at the same time: */
-#if ENABLE(CLASSIC_INTERPRETER) && ENABLE(LLINT_C_LOOP)
-#error You cannot build both the classic interpreter and the llint C loop together
-#endif
-
/* Configure the JIT */
#if CPU(X86) && COMPILER(MSVC)
#define JSC_HOST_CALL __fastcall
@@ -989,12 +983,9 @@
#if COMPILER(GCC) || (RVCT_VERSION_AT_LEAST(4, 0, 0, 0) && defined(__GNUC__))
#define HAVE_COMPUTED_GOTO 1
#endif
-#if HAVE(COMPUTED_GOTO) && ENABLE(CLASSIC_INTERPRETER)
-#define ENABLE_COMPUTED_GOTO_CLASSIC_INTERPRETER 1
-#endif
/* Determine if we need to enable Computed Goto Opcodes or not: */
-#if (HAVE(COMPUTED_GOTO) && ENABLE(LLINT)) || ENABLE(COMPUTED_GOTO_CLASSIC_INTERPRETER)
+#if HAVE(COMPUTED_GOTO) && ENABLE(LLINT)
#define ENABLE_COMPUTED_GOTO_OPCODES 1
#endif
@@ -1009,9 +1000,16 @@
#define ENABLE_YARR_JIT_DEBUG 0
#endif
+/* If either the JIT or the RegExp JIT is enabled, then the Assembler must be
+ enabled as well: */
#if ENABLE(JIT) || ENABLE(YARR_JIT)
+#if defined(ENABLE_ASSEMBLER) && !ENABLE_ASSEMBLER
+#error "Cannot enable the JIT or RegExp JIT without enabling the Assembler"
+#else
+#undef ENABLE_ASSEMBLER
#define ENABLE_ASSEMBLER 1
#endif
+#endif
/* Pick which allocator to use; we only need an executable allocator if the assembler is compiled in.
On x86-64 we use a single fixed mmap, on other platforms we mmap on demand. */
@@ -1027,6 +1025,11 @@
#define ENABLE_PAN_SCROLLING 1
#endif
+/*Add other platforms as they update their platfrom specific code to handle TextRun's with 8 bit data. */
+#if PLATFORM(MAC)
+#define ENABLE_8BIT_TEXTRUN 1
+#endif
+
/* Use the QXmlStreamReader implementation for XMLDocumentParser */
/* Use the QXmlQuery implementation for XSLTProcessor */
#if PLATFORM(QT)
@@ -1111,7 +1114,7 @@
since most ports try to support sub-project independence, adding new headers
to WTF causes many ports to break, and so this way we can address the build
breakages one port at a time. */
-#if !defined(WTF_USE_EXPORT_MACROS) && (PLATFORM(MAC) || PLATFORM(QT) || PLATFORM(WX) || PLATFORM(BLACKBERRY))
+#if !defined(WTF_USE_EXPORT_MACROS) && (PLATFORM(MAC) || PLATFORM(QT) || PLATFORM(WX))
#define WTF_USE_EXPORT_MACROS 1
#endif
@@ -1181,10 +1184,6 @@
#define WTF_USE_ZLIB 1
#endif
-#if PLATFORM(GTK)
-#define WTF_DEPRECATED_STRING_OPERATORS
-#endif
-
#if PLATFORM(QT)
#include <qglobal.h>
#if defined(QT_OPENGL_ES_2) && !defined(WTF_USE_OPENGL_ES_2)
diff --git a/masm/wtf/RefCounted.h b/masm/wtf/RefCounted.h
index cea1434e1b..0504b9ed28 100644
--- a/masm/wtf/RefCounted.h
+++ b/masm/wtf/RefCounted.h
@@ -30,6 +30,12 @@
namespace WTF {
+#ifdef NDEBUG
+#define CHECK_REF_COUNTED_LIFECYCLE 0
+#else
+#define CHECK_REF_COUNTED_LIFECYCLE 1
+#endif
+
// This base class holds the non-template methods and attributes.
// The RefCounted class inherits from it reducing the template bloat
// generated by the compiler (technique called template hoisting).
@@ -37,7 +43,7 @@ class RefCountedBase {
public:
void ref()
{
-#ifndef NDEBUG
+#if CHECK_REF_COUNTED_LIFECYCLE
// Start thread verification as soon as the ref count gets to 2. This
// heuristic reflects the fact that items are often created on one thread
// and then given to another thread to be used.
@@ -46,26 +52,30 @@ public:
// We should be able to add a "detachFromThread" method to make this explicit.
if (m_refCount == 1)
m_verifier.setShared(true);
-#endif
// If this assert fires, it either indicates a thread safety issue or
// that the verification needs to change. See ThreadRestrictionVerifier for
// the different modes.
ASSERT(m_verifier.isSafeToUse());
ASSERT(!m_deletionHasBegun);
ASSERT(!m_adoptionIsRequired);
+#endif
++m_refCount;
}
bool hasOneRef() const
{
+#if CHECK_REF_COUNTED_LIFECYCLE
ASSERT(m_verifier.isSafeToUse());
ASSERT(!m_deletionHasBegun);
+#endif
return m_refCount == 1;
}
int refCount() const
{
+#if CHECK_REF_COUNTED_LIFECYCLE
ASSERT(m_verifier.isSafeToUse());
+#endif
return m_refCount;
}
@@ -87,14 +97,14 @@ public:
// safe version of reference counting.
void turnOffVerifier()
{
-#ifndef NDEBUG
+#if CHECK_REF_COUNTED_LIFECYCLE
m_verifier.turnOffVerification();
#endif
}
void relaxAdoptionRequirement()
{
-#ifndef NDEBUG
+#if CHECK_REF_COUNTED_LIFECYCLE
ASSERT(!m_deletionHasBegun);
ASSERT(m_adoptionIsRequired);
m_adoptionIsRequired = false;
@@ -110,7 +120,7 @@ public:
protected:
RefCountedBase()
: m_refCount(1)
-#ifndef NDEBUG
+#if CHECK_REF_COUNTED_LIFECYCLE
, m_deletionHasBegun(false)
, m_adoptionIsRequired(true)
#endif
@@ -119,27 +129,31 @@ protected:
~RefCountedBase()
{
+#if CHECK_REF_COUNTED_LIFECYCLE
ASSERT(m_deletionHasBegun);
ASSERT(!m_adoptionIsRequired);
+#endif
}
// Returns whether the pointer should be freed or not.
bool derefBase()
{
+#if CHECK_REF_COUNTED_LIFECYCLE
ASSERT(m_verifier.isSafeToUse());
ASSERT(!m_deletionHasBegun);
ASSERT(!m_adoptionIsRequired);
+#endif
ASSERT(m_refCount > 0);
if (m_refCount == 1) {
-#ifndef NDEBUG
+#if CHECK_REF_COUNTED_LIFECYCLE
m_deletionHasBegun = true;
#endif
return true;
}
--m_refCount;
-#ifndef NDEBUG
+#if CHECK_REF_COUNTED_LIFECYCLE
// Stop thread verification when the ref goes to 1 because it
// is safe to be passed to another thread at this point.
if (m_refCount == 1)
@@ -148,7 +162,7 @@ protected:
return false;
}
-#ifndef NDEBUG
+#if CHECK_REF_COUNTED_LIFECYCLE
bool deletionHasBegun() const
{
return m_deletionHasBegun;
@@ -157,20 +171,19 @@ protected:
private:
-#ifndef NDEBUG
+#if CHECK_REF_COUNTED_LIFECYCLE
friend void adopted(RefCountedBase*);
#endif
int m_refCount;
-#ifndef NDEBUG
+#if CHECK_REF_COUNTED_LIFECYCLE
bool m_deletionHasBegun;
bool m_adoptionIsRequired;
ThreadRestrictionVerifier m_verifier;
#endif
};
-#ifndef NDEBUG
-
+#if CHECK_REF_COUNTED_LIFECYCLE
inline void adopted(RefCountedBase* object)
{
if (!object)
@@ -178,7 +191,6 @@ inline void adopted(RefCountedBase* object)
ASSERT(!object->m_deletionHasBegun);
object->m_adoptionIsRequired = false;
}
-
#endif
template<typename T> class RefCounted : public RefCountedBase {
@@ -213,24 +225,24 @@ protected:
}
};
-#ifdef NDEBUG
-inline void RefCountedBase::setMutexForVerifier(Mutex&) { }
-#else
+#if CHECK_REF_COUNTED_LIFECYCLE
inline void RefCountedBase::setMutexForVerifier(Mutex& mutex)
{
m_verifier.setMutexMode(mutex);
}
+#else
+inline void RefCountedBase::setMutexForVerifier(Mutex&) { }
#endif
#if HAVE(DISPATCH_H)
-#ifdef NDEBUG
-inline void RefCountedBase::setDispatchQueueForVerifier(dispatch_queue_t) { }
-#else
+#if CHECK_REF_COUNTED_LIFECYCLE
inline void RefCountedBase::setDispatchQueueForVerifier(dispatch_queue_t queue)
{
m_verifier.setDispatchQueueMode(queue);
}
-#endif // NDEBUG
+#else
+inline void RefCountedBase::setDispatchQueueForVerifier(dispatch_queue_t) { }
+#endif
#endif // HAVE(DISPATCH_H)
} // namespace WTF
diff --git a/masm/wtf/TypeTraits.h b/masm/wtf/TypeTraits.h
index 097b2fd790..b9e46bc555 100644
--- a/masm/wtf/TypeTraits.h
+++ b/masm/wtf/TypeTraits.h
@@ -228,208 +228,34 @@ namespace WTF {
>::Type Type;
};
-#if (defined(__GLIBCXX__) && (__GLIBCXX__ >= 20070724) && defined(__GXX_EXPERIMENTAL_CXX0X__)) || (defined(_MSC_VER) && (_MSC_VER >= 1600))
-
+#if COMPILER(CLANG) || GCC_VERSION_AT_LEAST(4, 6, 0) || (defined(_MSC_VER) && (_MSC_VER >= 1400) && (_MSC_VER < 1600) && !defined(__INTEL_COMPILER))
+ // VC8 (VS2005) and later has __has_trivial_constructor and __has_trivial_destructor,
+ // but the implementation returns false for built-in types. We add the extra IsPod condition to
+ // work around this.
+ template <typename T> struct HasTrivialConstructor {
+ static const bool value = __has_trivial_constructor(T) || IsPod<RemoveConstVolatile<T> >::value;
+ };
+ template <typename T> struct HasTrivialDestructor {
+ static const bool value = __has_trivial_destructor(T) || IsPod<RemoveConstVolatile<T> >::value;
+ };
+#elif (defined(__GLIBCXX__) && (__GLIBCXX__ >= 20070724) && defined(__GXX_EXPERIMENTAL_CXX0X__)) || (defined(_MSC_VER) && (_MSC_VER >= 1600))
// GCC's libstdc++ 20070724 and later supports C++ TR1 type_traits in the std namespace.
// VC10 (VS2010) and later support C++ TR1 type_traits in the std::tr1 namespace.
template<typename T> struct HasTrivialConstructor : public std::tr1::has_trivial_constructor<T> { };
template<typename T> struct HasTrivialDestructor : public std::tr1::has_trivial_destructor<T> { };
-
#else
-
- // This compiler doesn't provide type traits, so we provide basic HasTrivialConstructor
- // and HasTrivialDestructor definitions. The definitions here include most built-in
- // scalar types but do not include POD structs and classes. For the intended purposes of
- // type_traits this results correct but potentially less efficient code.
- template <typename T, T v>
- struct IntegralConstant {
- static const T value = v;
- typedef T value_type;
- typedef IntegralConstant<T, v> type;
+ // For compilers that don't support detection of trivial constructors and destructors in classes,
+ // we use a template that returns true for any POD type that IsPod can detect (see IsPod caveats above),
+ // but false for all other types (which includes all classes). This will give false negatives, which can hurt
+ // performance, but avoids false positives, which would result in incorrect behavior.
+ template <typename T> struct HasTrivialConstructor {
+ static const bool value = IsPod<RemoveConstVolatile<T> >::value;
+ };
+ template <typename T> struct HasTrivialDestructor {
+ static const bool value = IsPod<RemoveConstVolatile<T> >::value;
};
-
- typedef IntegralConstant<bool, true> true_type;
- typedef IntegralConstant<bool, false> false_type;
-
-#if COMPILER(CLANG) || (defined(_MSC_VER) && (_MSC_VER >= 1400) && !defined(__INTEL_COMPILER))
- // VC8 (VS2005) and later have built-in compiler support for HasTrivialConstructor / HasTrivialDestructor,
- // but for some unexplained reason it doesn't work on built-in types.
- template <typename T> struct HasTrivialConstructor : public IntegralConstant<bool, __has_trivial_constructor(T)>{ };
- template <typename T> struct HasTrivialDestructor : public IntegralConstant<bool, __has_trivial_destructor(T)>{ };
-#else
- template <typename T> struct HasTrivialConstructor : public false_type{ };
- template <typename T> struct HasTrivialDestructor : public false_type{ };
#endif
- template <typename T> struct HasTrivialConstructor<T*> : public true_type{ };
- template <typename T> struct HasTrivialDestructor<T*> : public true_type{ };
-
- template <> struct HasTrivialConstructor<float> : public true_type{ };
- template <> struct HasTrivialConstructor<const float> : public true_type{ };
- template <> struct HasTrivialConstructor<volatile float> : public true_type{ };
- template <> struct HasTrivialConstructor<const volatile float> : public true_type{ };
-
- template <> struct HasTrivialConstructor<double> : public true_type{ };
- template <> struct HasTrivialConstructor<const double> : public true_type{ };
- template <> struct HasTrivialConstructor<volatile double> : public true_type{ };
- template <> struct HasTrivialConstructor<const volatile double> : public true_type{ };
-
- template <> struct HasTrivialConstructor<long double> : public true_type{ };
- template <> struct HasTrivialConstructor<const long double> : public true_type{ };
- template <> struct HasTrivialConstructor<volatile long double> : public true_type{ };
- template <> struct HasTrivialConstructor<const volatile long double> : public true_type{ };
-
- template <> struct HasTrivialConstructor<unsigned char> : public true_type{ };
- template <> struct HasTrivialConstructor<const unsigned char> : public true_type{ };
- template <> struct HasTrivialConstructor<volatile unsigned char> : public true_type{ };
- template <> struct HasTrivialConstructor<const volatile unsigned char> : public true_type{ };
-
- template <> struct HasTrivialConstructor<unsigned short> : public true_type{ };
- template <> struct HasTrivialConstructor<const unsigned short> : public true_type{ };
- template <> struct HasTrivialConstructor<volatile unsigned short> : public true_type{ };
- template <> struct HasTrivialConstructor<const volatile unsigned short> : public true_type{ };
-
- template <> struct HasTrivialConstructor<unsigned int> : public true_type{ };
- template <> struct HasTrivialConstructor<const unsigned int> : public true_type{ };
- template <> struct HasTrivialConstructor<volatile unsigned int> : public true_type{ };
- template <> struct HasTrivialConstructor<const volatile unsigned int> : public true_type{ };
-
- template <> struct HasTrivialConstructor<unsigned long> : public true_type{ };
- template <> struct HasTrivialConstructor<const unsigned long> : public true_type{ };
- template <> struct HasTrivialConstructor<volatile unsigned long> : public true_type{ };
- template <> struct HasTrivialConstructor<const volatile unsigned long> : public true_type{ };
-
- template <> struct HasTrivialConstructor<unsigned long long> : public true_type{ };
- template <> struct HasTrivialConstructor<const unsigned long long> : public true_type{ };
- template <> struct HasTrivialConstructor<volatile unsigned long long> : public true_type{ };
- template <> struct HasTrivialConstructor<const volatile unsigned long long> : public true_type{ };
-
- template <> struct HasTrivialConstructor<signed char> : public true_type{ };
- template <> struct HasTrivialConstructor<const signed char> : public true_type{ };
- template <> struct HasTrivialConstructor<volatile signed char> : public true_type{ };
- template <> struct HasTrivialConstructor<const volatile signed char> : public true_type{ };
-
- template <> struct HasTrivialConstructor<signed short> : public true_type{ };
- template <> struct HasTrivialConstructor<const signed short> : public true_type{ };
- template <> struct HasTrivialConstructor<volatile signed short> : public true_type{ };
- template <> struct HasTrivialConstructor<const volatile signed short> : public true_type{ };
-
- template <> struct HasTrivialConstructor<signed int> : public true_type{ };
- template <> struct HasTrivialConstructor<const signed int> : public true_type{ };
- template <> struct HasTrivialConstructor<volatile signed int> : public true_type{ };
- template <> struct HasTrivialConstructor<const volatile signed int> : public true_type{ };
-
- template <> struct HasTrivialConstructor<signed long> : public true_type{ };
- template <> struct HasTrivialConstructor<const signed long> : public true_type{ };
- template <> struct HasTrivialConstructor<volatile signed long> : public true_type{ };
- template <> struct HasTrivialConstructor<const volatile signed long> : public true_type{ };
-
- template <> struct HasTrivialConstructor<signed long long> : public true_type{ };
- template <> struct HasTrivialConstructor<const signed long long> : public true_type{ };
- template <> struct HasTrivialConstructor<volatile signed long long> : public true_type{ };
- template <> struct HasTrivialConstructor<const volatile signed long long> : public true_type{ };
-
- template <> struct HasTrivialConstructor<bool> : public true_type{ };
- template <> struct HasTrivialConstructor<const bool> : public true_type{ };
- template <> struct HasTrivialConstructor<volatile bool> : public true_type{ };
- template <> struct HasTrivialConstructor<const volatile bool> : public true_type{ };
-
- template <> struct HasTrivialConstructor<char> : public true_type{ };
- template <> struct HasTrivialConstructor<const char> : public true_type{ };
- template <> struct HasTrivialConstructor<volatile char> : public true_type{ };
- template <> struct HasTrivialConstructor<const volatile char> : public true_type{ };
-
- #if !defined(_MSC_VER) || defined(_NATIVE_WCHAR_T_DEFINED)
- template <> struct HasTrivialConstructor<wchar_t> : public true_type{ };
- template <> struct HasTrivialConstructor<const wchar_t> : public true_type{ };
- template <> struct HasTrivialConstructor<volatile wchar_t> : public true_type{ };
- template <> struct HasTrivialConstructor<const volatile wchar_t> : public true_type{ };
- #endif
-
- template <> struct HasTrivialDestructor<float> : public true_type{ };
- template <> struct HasTrivialDestructor<const float> : public true_type{ };
- template <> struct HasTrivialDestructor<volatile float> : public true_type{ };
- template <> struct HasTrivialDestructor<const volatile float> : public true_type{ };
-
- template <> struct HasTrivialDestructor<double> : public true_type{ };
- template <> struct HasTrivialDestructor<const double> : public true_type{ };
- template <> struct HasTrivialDestructor<volatile double> : public true_type{ };
- template <> struct HasTrivialDestructor<const volatile double> : public true_type{ };
-
- template <> struct HasTrivialDestructor<long double> : public true_type{ };
- template <> struct HasTrivialDestructor<const long double> : public true_type{ };
- template <> struct HasTrivialDestructor<volatile long double> : public true_type{ };
- template <> struct HasTrivialDestructor<const volatile long double> : public true_type{ };
-
- template <> struct HasTrivialDestructor<unsigned char> : public true_type{ };
- template <> struct HasTrivialDestructor<const unsigned char> : public true_type{ };
- template <> struct HasTrivialDestructor<volatile unsigned char> : public true_type{ };
- template <> struct HasTrivialDestructor<const volatile unsigned char> : public true_type{ };
-
- template <> struct HasTrivialDestructor<unsigned short> : public true_type{ };
- template <> struct HasTrivialDestructor<const unsigned short> : public true_type{ };
- template <> struct HasTrivialDestructor<volatile unsigned short> : public true_type{ };
- template <> struct HasTrivialDestructor<const volatile unsigned short> : public true_type{ };
-
- template <> struct HasTrivialDestructor<unsigned int> : public true_type{ };
- template <> struct HasTrivialDestructor<const unsigned int> : public true_type{ };
- template <> struct HasTrivialDestructor<volatile unsigned int> : public true_type{ };
- template <> struct HasTrivialDestructor<const volatile unsigned int> : public true_type{ };
-
- template <> struct HasTrivialDestructor<unsigned long> : public true_type{ };
- template <> struct HasTrivialDestructor<const unsigned long> : public true_type{ };
- template <> struct HasTrivialDestructor<volatile unsigned long> : public true_type{ };
- template <> struct HasTrivialDestructor<const volatile unsigned long> : public true_type{ };
-
- template <> struct HasTrivialDestructor<unsigned long long> : public true_type{ };
- template <> struct HasTrivialDestructor<const unsigned long long> : public true_type{ };
- template <> struct HasTrivialDestructor<volatile unsigned long long> : public true_type{ };
- template <> struct HasTrivialDestructor<const volatile unsigned long long> : public true_type{ };
-
- template <> struct HasTrivialDestructor<signed char> : public true_type{ };
- template <> struct HasTrivialDestructor<const signed char> : public true_type{ };
- template <> struct HasTrivialDestructor<volatile signed char> : public true_type{ };
- template <> struct HasTrivialDestructor<const volatile signed char> : public true_type{ };
-
- template <> struct HasTrivialDestructor<signed short> : public true_type{ };
- template <> struct HasTrivialDestructor<const signed short> : public true_type{ };
- template <> struct HasTrivialDestructor<volatile signed short> : public true_type{ };
- template <> struct HasTrivialDestructor<const volatile signed short> : public true_type{ };
-
- template <> struct HasTrivialDestructor<signed int> : public true_type{ };
- template <> struct HasTrivialDestructor<const signed int> : public true_type{ };
- template <> struct HasTrivialDestructor<volatile signed int> : public true_type{ };
- template <> struct HasTrivialDestructor<const volatile signed int> : public true_type{ };
-
- template <> struct HasTrivialDestructor<signed long> : public true_type{ };
- template <> struct HasTrivialDestructor<const signed long> : public true_type{ };
- template <> struct HasTrivialDestructor<volatile signed long> : public true_type{ };
- template <> struct HasTrivialDestructor<const volatile signed long> : public true_type{ };
-
- template <> struct HasTrivialDestructor<signed long long> : public true_type{ };
- template <> struct HasTrivialDestructor<const signed long long> : public true_type{ };
- template <> struct HasTrivialDestructor<volatile signed long long> : public true_type{ };
- template <> struct HasTrivialDestructor<const volatile signed long long> : public true_type{ };
-
- template <> struct HasTrivialDestructor<bool> : public true_type{ };
- template <> struct HasTrivialDestructor<const bool> : public true_type{ };
- template <> struct HasTrivialDestructor<volatile bool> : public true_type{ };
- template <> struct HasTrivialDestructor<const volatile bool> : public true_type{ };
-
- template <> struct HasTrivialDestructor<char> : public true_type{ };
- template <> struct HasTrivialDestructor<const char> : public true_type{ };
- template <> struct HasTrivialDestructor<volatile char> : public true_type{ };
- template <> struct HasTrivialDestructor<const volatile char> : public true_type{ };
-
- #if !defined(_MSC_VER) || defined(_NATIVE_WCHAR_T_DEFINED)
- template <> struct HasTrivialDestructor<wchar_t> : public true_type{ };
- template <> struct HasTrivialDestructor<const wchar_t> : public true_type{ };
- template <> struct HasTrivialDestructor<volatile wchar_t> : public true_type{ };
- template <> struct HasTrivialDestructor<const volatile wchar_t> : public true_type{ };
- #endif
-
-#endif // __GLIBCXX__, etc.
-
} // namespace WTF
#endif // TypeTraits_h
diff --git a/masm/wtf/Vector.h b/masm/wtf/Vector.h
index 8a5eaf6213..6cd43115b1 100644
--- a/masm/wtf/Vector.h
+++ b/masm/wtf/Vector.h
@@ -32,10 +32,6 @@
#include <limits>
#include <utility>
-#if PLATFORM(QT)
-#include <QDataStream>
-#endif
-
namespace WTF {
using std::min;
@@ -619,6 +615,7 @@ namespace WTF {
template<typename U> void append(const U&);
template<typename U> void uncheckedAppend(const U& val);
template<size_t otherCapacity> void append(const Vector<T, otherCapacity>&);
+ template<typename U, size_t otherCapacity> void appendVector(const Vector<U, otherCapacity>&);
template<typename U> bool tryAppend(const U*, size_t);
template<typename U> void insert(size_t position, const U*, size_t);
@@ -695,32 +692,6 @@ namespace WTF {
Buffer m_buffer;
};
-#if PLATFORM(QT)
- template<typename T>
- QDataStream& operator<<(QDataStream& stream, const Vector<T>& data)
- {
- stream << qint64(data.size());
- foreach (const T& i, data)
- stream << i;
- return stream;
- }
-
- template<typename T>
- QDataStream& operator>>(QDataStream& stream, Vector<T>& data)
- {
- data.clear();
- qint64 count;
- T item;
- stream >> count;
- data.reserveCapacity(count);
- for (qint64 i = 0; i < count; ++i) {
- stream >> item;
- data.append(item);
- }
- return stream;
- }
-#endif
-
template<typename T, size_t inlineCapacity>
Vector<T, inlineCapacity>::Vector(const Vector& other)
: m_size(other.size())
@@ -1103,6 +1074,12 @@ namespace WTF {
append(val.begin(), val.size());
}
+ template<typename T, size_t inlineCapacity> template<typename U, size_t otherCapacity>
+ inline void Vector<T, inlineCapacity>::appendVector(const Vector<U, otherCapacity>& val)
+ {
+ append(val.begin(), val.size());
+ }
+
template<typename T, size_t inlineCapacity> template<typename U>
void Vector<T, inlineCapacity>::insert(size_t position, const U* data, size_t dataSize)
{