summaryrefslogtreecommitdiffstats
path: root/src/3rdparty/pcre2/src/sljit/sljitNativeX86_common.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/3rdparty/pcre2/src/sljit/sljitNativeX86_common.c')
-rw-r--r--src/3rdparty/pcre2/src/sljit/sljitNativeX86_common.c914
1 files changed, 597 insertions, 317 deletions
diff --git a/src/3rdparty/pcre2/src/sljit/sljitNativeX86_common.c b/src/3rdparty/pcre2/src/sljit/sljitNativeX86_common.c
index ddcc5ebf76..651942be80 100644
--- a/src/3rdparty/pcre2/src/sljit/sljitNativeX86_common.c
+++ b/src/3rdparty/pcre2/src/sljit/sljitNativeX86_common.c
@@ -26,11 +26,7 @@
SLJIT_API_FUNC_ATTRIBUTE const char* sljit_get_platform_name(void)
{
-#if (defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL)
- return "x86" SLJIT_CPUINFO " ABI:fastcall";
-#else
return "x86" SLJIT_CPUINFO;
-#endif
}
/*
@@ -65,6 +61,8 @@ SLJIT_API_FUNC_ATTRIBUTE const char* sljit_get_platform_name(void)
15 - R15
*/
+#define TMP_FREG (0)
+
#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
/* Last register + 1. */
@@ -76,10 +74,7 @@ static const sljit_u8 reg_map[SLJIT_NUMBER_OF_REGISTERS + 3] = {
#define CHECK_EXTRA_REGS(p, w, do) \
if (p >= SLJIT_R3 && p <= SLJIT_S3) { \
- if (p <= compiler->scratches) \
- w = compiler->saveds_offset - ((p) - SLJIT_R2) * (sljit_sw)sizeof(sljit_sw); \
- else \
- w = compiler->locals_offset + ((p) - SLJIT_S2) * (sljit_sw)sizeof(sljit_sw); \
+ w = (2 * SSIZE_OF(sw)) + ((p) - SLJIT_R3) * SSIZE_OF(sw); \
p = SLJIT_MEM1(SLJIT_SP); \
do; \
}
@@ -115,11 +110,11 @@ static const sljit_u8 reg_lmap[SLJIT_NUMBER_OF_REGISTERS + 4] = {
/* Args: xmm0-xmm3 */
static const sljit_u8 freg_map[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 1] = {
- 4, 0, 1, 2, 3, 5, 6
+ 4, 0, 1, 2, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
};
/* low-map. freg_map & 0x7. */
static const sljit_u8 freg_lmap[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 1] = {
- 4, 0, 1, 2, 3, 5, 6
+ 4, 0, 1, 2, 3, 5, 6, 7, 0, 1, 2, 3, 4, 5, 6, 7
};
#define REX_W 0x48
@@ -143,7 +138,8 @@ static const sljit_u8 freg_lmap[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 1] = {
#endif /* SLJIT_CONFIG_X86_32 */
-#define TMP_FREG (0)
+#define U8(v) ((sljit_u8)(v))
+
/* Size flags for emit_x86_instruction: */
#define EX86_BIN_INS 0x0010
@@ -178,6 +174,7 @@ static const sljit_u8 freg_lmap[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 1] = {
#define AND_rm_r 0x21
#define ANDPD_x_xm 0x54
#define BSR_r_rm (/* GROUP_0F */ 0xbd)
+#define BSF_r_rm (/* GROUP_0F */ 0xbc)
#define CALL_i32 0xe8
#define CALL_rm (/* GROUP_FF */ 2 << 3)
#define CDQ 0x99
@@ -191,6 +188,8 @@ static const sljit_u8 freg_lmap[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 1] = {
#define CVTTSD2SI_r_xm 0x2c
#define DIV (/* GROUP_F7 */ 6 << 3)
#define DIVSD_x_xm 0x5e
+#define FLDS 0xd9
+#define FLDL 0xdd
#define FSTPS 0xd9
#define FSTPD 0xdd
#define INT3 0xcc
@@ -205,12 +204,16 @@ static const sljit_u8 freg_lmap[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 1] = {
#define JMP_i32 0xe9
#define JMP_rm (/* GROUP_FF */ 4 << 3)
#define LEA_r_m 0x8d
+#define LOOP_i8 0xe2
+#define LZCNT_r_rm (/* GROUP_F3 */ /* GROUP_0F */ 0xbd)
#define MOV_r_rm 0x8b
#define MOV_r_i32 0xb8
#define MOV_rm_r 0x89
#define MOV_rm_i32 0xc7
#define MOV_rm8_i8 0xc6
#define MOV_rm8_r8 0x88
+#define MOVAPS_x_xm 0x28
+#define MOVAPS_xm_x 0x29
#define MOVSD_x_xm 0x10
#define MOVSD_xm_x 0x11
#define MOVSXD_r_rm 0x63
@@ -236,6 +239,8 @@ static const sljit_u8 freg_lmap[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 1] = {
#define PUSH_r 0x50
#define PUSH_rm (/* GROUP_FF */ 6 << 3)
#define PUSHF 0x9c
+#define ROL (/* SHIFT */ 0 << 3)
+#define ROR (/* SHIFT */ 1 << 3)
#define RET_near 0xc3
#define RET_i16 0xc2
#define SBB (/* BINARY */ 3 << 3)
@@ -244,6 +249,8 @@ static const sljit_u8 freg_lmap[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 1] = {
#define SBB_rm_r 0x19
#define SAR (/* SHIFT */ 7 << 3)
#define SHL (/* SHIFT */ 4 << 3)
+#define SHLD (/* GROUP_0F */ 0xa5)
+#define SHRD (/* GROUP_0F */ 0xad)
#define SHR (/* SHIFT */ 5 << 3)
#define SUB (/* BINARY */ 5 << 3)
#define SUB_EAX_i32 0x2d
@@ -252,6 +259,7 @@ static const sljit_u8 freg_lmap[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 1] = {
#define SUBSD_x_xm 0x5c
#define TEST_EAX_i32 0xa9
#define TEST_rm_r 0x85
+#define TZCNT_r_rm (/* GROUP_F3 */ /* GROUP_0F */ 0xbc)
#define UCOMISD_x_xm 0x2e
#define UNPCKLPD_x_xm 0x14
#define XCHG_EAX_r 0x90
@@ -263,6 +271,7 @@ static const sljit_u8 freg_lmap[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 1] = {
#define XORPD_x_xm 0x57
#define GROUP_0F 0x0f
+#define GROUP_F3 0xf3
#define GROUP_F7 0xf7
#define GROUP_FF 0xff
#define GROUP_BINARY_81 0x81
@@ -274,22 +283,25 @@ static const sljit_u8 freg_lmap[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 1] = {
#define MOD_REG 0xc0
#define MOD_DISP8 0x40
-#define INC_SIZE(s) (*inst++ = (s), compiler->size += (s))
+#define INC_SIZE(s) (*inst++ = U8(s), compiler->size += (s))
-#define PUSH_REG(r) (*inst++ = (PUSH_r + (r)))
-#define POP_REG(r) (*inst++ = (POP_r + (r)))
-#define RET() (*inst++ = (RET_near))
-#define RET_I16(n) (*inst++ = (RET_i16), *inst++ = n, *inst++ = 0)
-/* r32, r/m32 */
-#define MOV_RM(mod, reg, rm) (*inst++ = (MOV_r_rm), *inst++ = (mod) << 6 | (reg) << 3 | (rm))
+#define PUSH_REG(r) (*inst++ = U8(PUSH_r + (r)))
+#define POP_REG(r) (*inst++ = U8(POP_r + (r)))
+#define RET() (*inst++ = RET_near)
+#define RET_I16(n) (*inst++ = RET_i16, *inst++ = U8(n), *inst++ = 0)
/* Multithreading does not affect these static variables, since they store
built-in CPU features. Therefore they can be overwritten by different threads
if they detect the CPU features in the same time. */
+#define CPU_FEATURE_DETECTED 0x001
#if (defined SLJIT_DETECT_SSE2 && SLJIT_DETECT_SSE2)
-static sljit_s32 cpu_has_sse2 = -1;
+#define CPU_FEATURE_SSE2 0x002
#endif
-static sljit_s32 cpu_has_cmov = -1;
+#define CPU_FEATURE_LZCNT 0x004
+#define CPU_FEATURE_TZCNT 0x008
+#define CPU_FEATURE_CMOV 0x010
+
+static sljit_u32 cpu_feature_list = 0;
#ifdef _WIN32_WCE
#include <cmnintrin.h>
@@ -322,18 +334,65 @@ static SLJIT_INLINE void sljit_unaligned_store_sw(void *addr, sljit_sw value)
static void get_cpu_features(void)
{
- sljit_u32 features;
+ sljit_u32 feature_list = CPU_FEATURE_DETECTED;
+ sljit_u32 value;
#if defined(_MSC_VER) && _MSC_VER >= 1400
int CPUInfo[4];
+
+ __cpuid(CPUInfo, 0);
+ if (CPUInfo[0] >= 7) {
+ __cpuidex(CPUInfo, 7, 0);
+ if (CPUInfo[1] & 0x8)
+ feature_list |= CPU_FEATURE_TZCNT;
+ }
+
+ __cpuid(CPUInfo, (int)0x80000001);
+ if (CPUInfo[2] & 0x20)
+ feature_list |= CPU_FEATURE_LZCNT;
+
__cpuid(CPUInfo, 1);
- features = (sljit_u32)CPUInfo[3];
+ value = (sljit_u32)CPUInfo[3];
#elif defined(__GNUC__) || defined(__INTEL_COMPILER) || defined(__SUNPRO_C)
/* AT&T syntax. */
__asm__ (
+ "movl $0x0, %%eax\n"
+ "lzcnt %%eax, %%eax\n"
+ "setnz %%al\n"
+ "movl %%eax, %0\n"
+ : "=g" (value)
+ :
+#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
+ : "eax"
+#else
+ : "rax"
+#endif
+ );
+
+ if (value & 0x1)
+ feature_list |= CPU_FEATURE_LZCNT;
+
+ __asm__ (
+ "movl $0x0, %%eax\n"
+ "tzcnt %%eax, %%eax\n"
+ "setnz %%al\n"
+ "movl %%eax, %0\n"
+ : "=g" (value)
+ :
+#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
+ : "eax"
+#else
+ : "rax"
+#endif
+ );
+
+ if (value & 0x1)
+ feature_list |= CPU_FEATURE_TZCNT;
+
+ __asm__ (
"movl $0x1, %%eax\n"
#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
/* On x86-32, there is no red zone, so this
@@ -345,7 +404,7 @@ static void get_cpu_features(void)
"pop %%ebx\n"
#endif
"movl %%edx, %0\n"
- : "=g" (features)
+ : "=g" (value)
:
#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
: "%eax", "%ecx", "%edx"
@@ -358,44 +417,82 @@ static void get_cpu_features(void)
/* Intel syntax. */
__asm {
+ mov eax, 0
+ lzcnt eax, eax
+ setnz al
+ mov value, eax
+ }
+
+ if (value & 0x1)
+ feature_list |= CPU_FEATURE_LZCNT;
+
+ __asm {
+ mov eax, 0
+ tzcnt eax, eax
+ setnz al
+ mov value, eax
+ }
+
+ if (value & 0x1)
+ feature_list |= CPU_FEATURE_TZCNT;
+
+ __asm {
mov eax, 1
cpuid
- mov features, edx
+ mov value, edx
}
#endif /* _MSC_VER && _MSC_VER >= 1400 */
#if (defined SLJIT_DETECT_SSE2 && SLJIT_DETECT_SSE2)
- cpu_has_sse2 = (features >> 26) & 0x1;
+ if (value & 0x4000000)
+ feature_list |= CPU_FEATURE_SSE2;
#endif
- cpu_has_cmov = (features >> 15) & 0x1;
+ if (value & 0x8000)
+ feature_list |= CPU_FEATURE_CMOV;
+
+ cpu_feature_list = feature_list;
}
-static sljit_u8 get_jump_code(sljit_s32 type)
+static sljit_u8 get_jump_code(sljit_uw type)
{
switch (type) {
case SLJIT_EQUAL:
- case SLJIT_EQUAL_F64:
+ case SLJIT_F_EQUAL:
+ case SLJIT_UNORDERED_OR_EQUAL:
+ case SLJIT_ORDERED_EQUAL: /* Not supported. */
return 0x84 /* je */;
case SLJIT_NOT_EQUAL:
- case SLJIT_NOT_EQUAL_F64:
+ case SLJIT_F_NOT_EQUAL:
+ case SLJIT_ORDERED_NOT_EQUAL:
+ case SLJIT_UNORDERED_OR_NOT_EQUAL: /* Not supported. */
return 0x85 /* jne */;
case SLJIT_LESS:
- case SLJIT_LESS_F64:
+ case SLJIT_CARRY:
+ case SLJIT_F_LESS:
+ case SLJIT_UNORDERED_OR_LESS:
+ case SLJIT_UNORDERED_OR_GREATER:
return 0x82 /* jc */;
case SLJIT_GREATER_EQUAL:
- case SLJIT_GREATER_EQUAL_F64:
+ case SLJIT_NOT_CARRY:
+ case SLJIT_F_GREATER_EQUAL:
+ case SLJIT_ORDERED_GREATER_EQUAL:
+ case SLJIT_ORDERED_LESS_EQUAL:
return 0x83 /* jae */;
case SLJIT_GREATER:
- case SLJIT_GREATER_F64:
+ case SLJIT_F_GREATER:
+ case SLJIT_ORDERED_LESS:
+ case SLJIT_ORDERED_GREATER:
return 0x87 /* jnbe */;
case SLJIT_LESS_EQUAL:
- case SLJIT_LESS_EQUAL_F64:
+ case SLJIT_F_LESS_EQUAL:
+ case SLJIT_UNORDERED_OR_GREATER_EQUAL:
+ case SLJIT_UNORDERED_OR_LESS_EQUAL:
return 0x86 /* jbe */;
case SLJIT_SIG_LESS:
@@ -411,17 +508,15 @@ static sljit_u8 get_jump_code(sljit_s32 type)
return 0x8e /* jle */;
case SLJIT_OVERFLOW:
- case SLJIT_MUL_OVERFLOW:
return 0x80 /* jo */;
case SLJIT_NOT_OVERFLOW:
- case SLJIT_MUL_NOT_OVERFLOW:
return 0x81 /* jno */;
- case SLJIT_UNORDERED_F64:
+ case SLJIT_UNORDERED:
return 0x8a /* jp */;
- case SLJIT_ORDERED_F64:
+ case SLJIT_ORDERED:
return 0x8b /* jpo */;
}
return 0;
@@ -436,22 +531,22 @@ static sljit_u8* generate_put_label_code(struct sljit_put_label *put_label, slji
static sljit_u8* generate_near_jump_code(struct sljit_jump *jump, sljit_u8 *code_ptr, sljit_u8 *code, sljit_sw executable_offset)
{
- sljit_s32 type = jump->flags >> TYPE_SHIFT;
+ sljit_uw type = jump->flags >> TYPE_SHIFT;
sljit_s32 short_jump;
sljit_uw label_addr;
if (jump->flags & JUMP_LABEL)
label_addr = (sljit_uw)(code + jump->u.label->size);
else
- label_addr = jump->u.target - executable_offset;
-
- short_jump = (sljit_sw)(label_addr - (jump->addr + 2)) >= -128 && (sljit_sw)(label_addr - (jump->addr + 2)) <= 127;
+ label_addr = jump->u.target - (sljit_uw)executable_offset;
#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
if ((sljit_sw)(label_addr - (jump->addr + 1)) > HALFWORD_MAX || (sljit_sw)(label_addr - (jump->addr + 1)) < HALFWORD_MIN)
return generate_far_jump_code(jump, code_ptr);
#endif
+ short_jump = (sljit_sw)(label_addr - (jump->addr + 2)) >= -128 && (sljit_sw)(label_addr - (jump->addr + 2)) <= 127;
+
if (type == SLJIT_JUMP) {
if (short_jump)
*code_ptr++ = JMP_i8;
@@ -465,7 +560,7 @@ static sljit_u8* generate_near_jump_code(struct sljit_jump *jump, sljit_u8 *code
jump->addr++;
}
else if (short_jump) {
- *code_ptr++ = get_jump_code(type) - 0x10;
+ *code_ptr++ = U8(get_jump_code(type) - 0x10);
jump->addr++;
}
else {
@@ -494,7 +589,7 @@ SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compil
sljit_u8 *buf_end;
sljit_u8 len;
sljit_sw executable_offset;
- sljit_sw jump_addr;
+ sljit_uw jump_addr;
struct sljit_label *label;
struct sljit_jump *jump;
@@ -532,7 +627,7 @@ SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compil
switch (*buf_ptr) {
case 0:
label->addr = (sljit_uw)SLJIT_ADD_EXEC_OFFSET(code_ptr, executable_offset);
- label->size = code_ptr - code;
+ label->size = (sljit_uw)(code_ptr - code);
label = label->next;
break;
case 1:
@@ -577,32 +672,33 @@ SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compil
jump = compiler->jumps;
while (jump) {
- jump_addr = jump->addr + executable_offset;
+ if (jump->flags & (PATCH_MB | PATCH_MW)) {
+ if (jump->flags & JUMP_LABEL)
+ jump_addr = jump->u.label->addr;
+ else
+ jump_addr = jump->u.target;
- if (jump->flags & PATCH_MB) {
- SLJIT_ASSERT((sljit_sw)(jump->u.label->addr - (jump_addr + sizeof(sljit_s8))) >= -128 && (sljit_sw)(jump->u.label->addr - (jump_addr + sizeof(sljit_s8))) <= 127);
- *(sljit_u8*)jump->addr = (sljit_u8)(jump->u.label->addr - (jump_addr + sizeof(sljit_s8)));
- } else if (jump->flags & PATCH_MW) {
- if (jump->flags & JUMP_LABEL) {
-#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
- sljit_unaligned_store_sw((void*)jump->addr, (sljit_sw)(jump->u.label->addr - (jump_addr + sizeof(sljit_sw))));
-#else
- SLJIT_ASSERT((sljit_sw)(jump->u.label->addr - (jump_addr + sizeof(sljit_s32))) >= HALFWORD_MIN && (sljit_sw)(jump->u.label->addr - (jump_addr + sizeof(sljit_s32))) <= HALFWORD_MAX);
- sljit_unaligned_store_s32((void*)jump->addr, (sljit_s32)(jump->u.label->addr - (jump_addr + sizeof(sljit_s32))));
-#endif
- }
- else {
+ jump_addr -= jump->addr + (sljit_uw)executable_offset;
+
+ if (jump->flags & PATCH_MB) {
+ jump_addr -= sizeof(sljit_s8);
+ SLJIT_ASSERT((sljit_sw)jump_addr >= -128 && (sljit_sw)jump_addr <= 127);
+ *(sljit_u8*)jump->addr = U8(jump_addr);
+ } else {
+ jump_addr -= sizeof(sljit_s32);
#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
- sljit_unaligned_store_sw((void*)jump->addr, (sljit_sw)(jump->u.target - (jump_addr + sizeof(sljit_sw))));
+ sljit_unaligned_store_sw((void*)jump->addr, (sljit_sw)jump_addr);
#else
- SLJIT_ASSERT((sljit_sw)(jump->u.target - (jump_addr + sizeof(sljit_s32))) >= HALFWORD_MIN && (sljit_sw)(jump->u.target - (jump_addr + sizeof(sljit_s32))) <= HALFWORD_MAX);
- sljit_unaligned_store_s32((void*)jump->addr, (sljit_s32)(jump->u.target - (jump_addr + sizeof(sljit_s32))));
+ SLJIT_ASSERT((sljit_sw)jump_addr >= HALFWORD_MIN && (sljit_sw)jump_addr <= HALFWORD_MAX);
+ sljit_unaligned_store_s32((void*)jump->addr, (sljit_s32)jump_addr);
#endif
}
}
#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
- else if (jump->flags & PATCH_MD)
- sljit_unaligned_store_sw((void*)jump->addr, jump->u.label->addr);
+ else if (jump->flags & PATCH_MD) {
+ SLJIT_ASSERT(jump->flags & JUMP_LABEL);
+ sljit_unaligned_store_sw((void*)jump->addr, (sljit_sw)jump->u.label->addr);
+ }
#endif
jump = jump->next;
@@ -628,7 +724,7 @@ SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compil
compiler->error = SLJIT_ERR_COMPILED;
compiler->executable_offset = executable_offset;
- compiler->executable_size = code_ptr - code;
+ compiler->executable_size = (sljit_uw)(code_ptr - code);
code = (sljit_u8*)SLJIT_ADD_EXEC_OFFSET(code, executable_offset);
@@ -643,9 +739,9 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_has_cpu_feature(sljit_s32 feature_type)
#ifdef SLJIT_IS_FPU_AVAILABLE
return SLJIT_IS_FPU_AVAILABLE;
#elif (defined SLJIT_DETECT_SSE2 && SLJIT_DETECT_SSE2)
- if (cpu_has_sse2 == -1)
+ if (cpu_feature_list == 0)
get_cpu_features();
- return cpu_has_sse2;
+ return (cpu_feature_list & CPU_FEATURE_SSE2) != 0;
#else /* SLJIT_DETECT_SSE2 */
return 1;
#endif /* SLJIT_DETECT_SSE2 */
@@ -653,48 +749,97 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_has_cpu_feature(sljit_s32 feature_type)
#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
case SLJIT_HAS_VIRTUAL_REGISTERS:
return 1;
-#endif
+#endif /* SLJIT_CONFIG_X86_32 */
case SLJIT_HAS_CLZ:
+ if (cpu_feature_list == 0)
+ get_cpu_features();
+
+ return (cpu_feature_list & CPU_FEATURE_LZCNT) ? 1 : 2;
+
+ case SLJIT_HAS_CTZ:
+ if (cpu_feature_list == 0)
+ get_cpu_features();
+
+ return (cpu_feature_list & CPU_FEATURE_TZCNT) ? 1 : 2;
+
case SLJIT_HAS_CMOV:
- if (cpu_has_cmov == -1)
+ if (cpu_feature_list == 0)
get_cpu_features();
- return cpu_has_cmov;
+ return (cpu_feature_list & CPU_FEATURE_CMOV) != 0;
+ case SLJIT_HAS_ROT:
case SLJIT_HAS_PREFETCH:
return 1;
case SLJIT_HAS_SSE2:
#if (defined SLJIT_DETECT_SSE2 && SLJIT_DETECT_SSE2)
- if (cpu_has_sse2 == -1)
+ if (cpu_feature_list == 0)
get_cpu_features();
- return cpu_has_sse2;
-#else
+ return (cpu_feature_list & CPU_FEATURE_SSE2) != 0;
+#else /* !SLJIT_DETECT_SSE2 */
return 1;
-#endif
+#endif /* SLJIT_DETECT_SSE2 */
default:
return 0;
}
}
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_cmp_info(sljit_s32 type)
+{
+ if (type < SLJIT_UNORDERED || type > SLJIT_ORDERED_LESS_EQUAL)
+ return 0;
+
+ switch (type) {
+ case SLJIT_ORDERED_EQUAL:
+ case SLJIT_UNORDERED_OR_NOT_EQUAL:
+ return 0;
+ }
+
+ return 1;
+}
+
/* --------------------------------------------------------------------- */
/* Operators */
/* --------------------------------------------------------------------- */
#define BINARY_OPCODE(opcode) (((opcode ## _EAX_i32) << 24) | ((opcode ## _r_rm) << 16) | ((opcode ## _rm_r) << 8) | (opcode))
-static sljit_s32 emit_cum_binary(struct sljit_compiler *compiler,
- sljit_u32 op_types,
- sljit_s32 dst, sljit_sw dstw,
- sljit_s32 src1, sljit_sw src1w,
- sljit_s32 src2, sljit_sw src2w);
+#define BINARY_IMM32(op_imm, immw, arg, argw) \
+ do { \
+ inst = emit_x86_instruction(compiler, 1 | EX86_BIN_INS, SLJIT_IMM, immw, arg, argw); \
+ FAIL_IF(!inst); \
+ *(inst + 1) |= (op_imm); \
+ } while (0)
-static sljit_s32 emit_non_cum_binary(struct sljit_compiler *compiler,
- sljit_u32 op_types,
- sljit_s32 dst, sljit_sw dstw,
- sljit_s32 src1, sljit_sw src1w,
- sljit_s32 src2, sljit_sw src2w);
+#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
+
+#define BINARY_IMM(op_imm, op_mr, immw, arg, argw) \
+ do { \
+ if (IS_HALFWORD(immw) || compiler->mode32) { \
+ BINARY_IMM32(op_imm, immw, arg, argw); \
+ } \
+ else { \
+ FAIL_IF(emit_load_imm64(compiler, (arg == TMP_REG1) ? TMP_REG2 : TMP_REG1, immw)); \
+ inst = emit_x86_instruction(compiler, 1, (arg == TMP_REG1) ? TMP_REG2 : TMP_REG1, 0, arg, argw); \
+ FAIL_IF(!inst); \
+ *inst = (op_mr); \
+ } \
+ } while (0)
+
+#define BINARY_EAX_IMM(op_eax_imm, immw) \
+ FAIL_IF(emit_do_imm32(compiler, (!compiler->mode32) ? REX_W : 0, (op_eax_imm), immw))
+
+#else /* !SLJIT_CONFIG_X86_64 */
+
+#define BINARY_IMM(op_imm, op_mr, immw, arg, argw) \
+ BINARY_IMM32(op_imm, immw, arg, argw)
+
+#define BINARY_EAX_IMM(op_eax_imm, immw) \
+ FAIL_IF(emit_do_imm(compiler, (op_eax_imm), immw))
+
+#endif /* SLJIT_CONFIG_X86_64 */
static sljit_s32 emit_mov(struct sljit_compiler *compiler,
sljit_s32 dst, sljit_sw dstw,
@@ -797,7 +942,7 @@ static SLJIT_INLINE sljit_s32 cpu_has_shadow_stack(void)
}
static SLJIT_INLINE sljit_s32 adjust_shadow_stack(struct sljit_compiler *compiler,
- sljit_s32 src, sljit_sw srcw, sljit_s32 base, sljit_sw disp)
+ sljit_s32 src, sljit_sw srcw)
{
#if (defined SLJIT_CONFIG_X86_CET && SLJIT_CONFIG_X86_CET) && defined (__SHSTK__)
sljit_u8 *inst, *jz_after_cmp_inst;
@@ -823,12 +968,6 @@ static SLJIT_INLINE sljit_s32 adjust_shadow_stack(struct sljit_compiler *compile
EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_MEM1(TMP_REG1), 0);
#endif /* SLJIT_CONFIG_X86_32 */
- if (src == SLJIT_UNUSED) {
- /* Return address is on stack. */
- src = SLJIT_MEM1(base);
- srcw = disp;
- }
-
/* Compare return address against TMP_REG1. */
FAIL_IF(emit_cmp_binary (compiler, TMP_REG1, 0, src, srcw));
@@ -863,8 +1002,6 @@ static SLJIT_INLINE sljit_s32 adjust_shadow_stack(struct sljit_compiler *compile
SLJIT_UNUSED_ARG(compiler);
SLJIT_UNUSED_ARG(src);
SLJIT_UNUSED_ARG(srcw);
- SLJIT_UNUSED_ARG(base);
- SLJIT_UNUSED_ARG(disp);
#endif /* SLJIT_CONFIG_X86_CET && __SHSTK__ */
return SLJIT_SUCCESS;
}
@@ -881,8 +1018,6 @@ static sljit_s32 emit_mov(struct sljit_compiler *compiler,
{
sljit_u8* inst;
- SLJIT_ASSERT(dst != SLJIT_UNUSED);
-
if (FAST_IS_REG(src)) {
inst = emit_x86_instruction(compiler, 1, src, 0, dst, dstw);
FAIL_IF(!inst);
@@ -892,14 +1027,14 @@ static sljit_s32 emit_mov(struct sljit_compiler *compiler,
if (src & SLJIT_IMM) {
if (FAST_IS_REG(dst)) {
#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
- return emit_do_imm(compiler, MOV_r_i32 + reg_map[dst], srcw);
+ return emit_do_imm(compiler, MOV_r_i32 | reg_map[dst], srcw);
#else
if (!compiler->mode32) {
if (NOT_HALFWORD(srcw))
return emit_load_imm64(compiler, dst, srcw);
}
else
- return emit_do_imm32(compiler, (reg_map[dst] >= 8) ? REX_B : 0, MOV_r_i32 + reg_lmap[dst], srcw);
+ return emit_do_imm32(compiler, (reg_map[dst] >= 8) ? REX_B : 0, U8(MOV_r_i32 | reg_lmap[dst]), srcw);
#endif
}
#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
@@ -940,7 +1075,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op0(struct sljit_compiler *compile
{
sljit_u8 *inst;
#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
- sljit_s32 size;
+ sljit_uw size;
#endif
CHECK_ERROR();
@@ -977,7 +1112,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op0(struct sljit_compiler *compile
&& reg_map[SLJIT_R1] < 7
&& reg_map[TMP_REG1] == 2);
#endif
- compiler->mode32 = op & SLJIT_I32_OP;
+ compiler->mode32 = op & SLJIT_32;
#endif
SLJIT_COMPILE_ASSERT((SLJIT_DIVMOD_UW & 0x2) == 0 && SLJIT_DIV_UW - 0x2 == SLJIT_DIVMOD_UW, bad_div_opcode_assignments);
@@ -1086,7 +1221,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op0(struct sljit_compiler *compile
inst = (sljit_u8*)ensure_buf(compiler, 1 + 1); \
FAIL_IF(!inst); \
INC_SIZE(1); \
- *inst = (prefix); \
+ *inst = U8(prefix); \
} while (0)
static sljit_s32 emit_mov_byte(struct sljit_compiler *compiler, sljit_s32 sign,
@@ -1106,7 +1241,7 @@ static sljit_s32 emit_mov_byte(struct sljit_compiler *compiler, sljit_s32 sign,
if (src & SLJIT_IMM) {
if (FAST_IS_REG(dst)) {
#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
- return emit_do_imm(compiler, MOV_r_i32 + reg_map[dst], srcw);
+ return emit_do_imm(compiler, MOV_r_i32 | reg_map[dst], srcw);
#else
inst = emit_x86_instruction(compiler, 1, SLJIT_IMM, srcw, dst, 0);
FAIL_IF(!inst);
@@ -1136,7 +1271,7 @@ static sljit_s32 emit_mov_byte(struct sljit_compiler *compiler, sljit_s32 sign,
#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
else if (FAST_IS_REG(src) && reg_map[src] >= 4) {
/* src, dst are registers. */
- SLJIT_ASSERT(SLOW_IS_REG(dst));
+ SLJIT_ASSERT(FAST_IS_REG(dst));
if (reg_map[dst] < 4) {
if (dst != src)
EMIT_MOV(compiler, dst, 0, src, 0);
@@ -1195,7 +1330,7 @@ static sljit_s32 emit_mov_byte(struct sljit_compiler *compiler, sljit_s32 sign,
}
if (work_r == SLJIT_R0) {
- ENCODE_PREFIX(XCHG_EAX_r + reg_map[TMP_REG1]);
+ ENCODE_PREFIX(XCHG_EAX_r | reg_map[TMP_REG1]);
}
else {
inst = emit_x86_instruction(compiler, 1, work_r, 0, dst_r, 0);
@@ -1208,7 +1343,7 @@ static sljit_s32 emit_mov_byte(struct sljit_compiler *compiler, sljit_s32 sign,
*inst = MOV_rm8_r8;
if (work_r == SLJIT_R0) {
- ENCODE_PREFIX(XCHG_EAX_r + reg_map[TMP_REG1]);
+ ENCODE_PREFIX(XCHG_EAX_r | reg_map[TMP_REG1]);
}
else {
inst = emit_x86_instruction(compiler, 1, work_r, 0, dst_r, 0);
@@ -1269,7 +1404,7 @@ static sljit_s32 emit_mov_half(struct sljit_compiler *compiler, sljit_s32 sign,
if (src & SLJIT_IMM) {
if (FAST_IS_REG(dst)) {
#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
- return emit_do_imm(compiler, MOV_r_i32 + reg_map[dst], srcw);
+ return emit_do_imm(compiler, MOV_r_i32 | reg_map[dst], srcw);
#else
inst = emit_x86_instruction(compiler, 1, SLJIT_IMM, srcw, dst, 0);
FAIL_IF(!inst);
@@ -1318,9 +1453,6 @@ static sljit_s32 emit_unary(struct sljit_compiler *compiler, sljit_u8 opcode,
return SLJIT_SUCCESS;
}
- if (SLJIT_UNLIKELY(dst == SLJIT_UNUSED))
- dst = TMP_REG1;
-
if (FAST_IS_REG(dst)) {
EMIT_MOV(compiler, dst, 0, src, srcw);
inst = emit_x86_instruction(compiler, 1, 0, 0, dst, 0);
@@ -1345,9 +1477,6 @@ static sljit_s32 emit_not_with_flags(struct sljit_compiler *compiler,
{
sljit_u8* inst;
- if (dst == SLJIT_UNUSED)
- dst = TMP_REG1;
-
if (FAST_IS_REG(dst)) {
EMIT_MOV(compiler, dst, 0, src, srcw);
inst = emit_x86_instruction(compiler, 1, 0, 0, dst, 0);
@@ -1374,47 +1503,75 @@ static sljit_s32 emit_not_with_flags(struct sljit_compiler *compiler,
#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
static const sljit_sw emit_clz_arg = 32 + 31;
+static const sljit_sw emit_ctz_arg = 32;
#endif
-static sljit_s32 emit_clz(struct sljit_compiler *compiler, sljit_s32 op_flags,
+static sljit_s32 emit_clz_ctz(struct sljit_compiler *compiler, sljit_s32 is_clz,
sljit_s32 dst, sljit_sw dstw,
sljit_s32 src, sljit_sw srcw)
{
sljit_u8* inst;
sljit_s32 dst_r;
+ sljit_sw max;
- SLJIT_UNUSED_ARG(op_flags);
-
- if (cpu_has_cmov == -1)
+ if (cpu_feature_list == 0)
get_cpu_features();
dst_r = FAST_IS_REG(dst) ? dst : TMP_REG1;
+ if (is_clz ? (cpu_feature_list & CPU_FEATURE_LZCNT) : (cpu_feature_list & CPU_FEATURE_TZCNT)) {
+ /* Group prefix added separately. */
+ inst = (sljit_u8*)ensure_buf(compiler, 1 + 1);
+ FAIL_IF(!inst);
+ INC_SIZE(1);
+ *inst++ = GROUP_F3;
+
+ inst = emit_x86_instruction(compiler, 2, dst_r, 0, src, srcw);
+ FAIL_IF(!inst);
+ *inst++ = GROUP_0F;
+ *inst = is_clz ? LZCNT_r_rm : TZCNT_r_rm;
+
+ if (dst & SLJIT_MEM)
+ EMIT_MOV(compiler, dst, dstw, TMP_REG1, 0);
+ return SLJIT_SUCCESS;
+ }
+
inst = emit_x86_instruction(compiler, 2, dst_r, 0, src, srcw);
FAIL_IF(!inst);
*inst++ = GROUP_0F;
- *inst = BSR_r_rm;
+ *inst = is_clz ? BSR_r_rm : BSF_r_rm;
#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
- if (cpu_has_cmov) {
+ max = is_clz ? (32 + 31) : 32;
+
+ if (cpu_feature_list & CPU_FEATURE_CMOV) {
if (dst_r != TMP_REG1) {
- EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_IMM, 32 + 31);
+ EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_IMM, max);
inst = emit_x86_instruction(compiler, 2, dst_r, 0, TMP_REG1, 0);
}
else
- inst = emit_x86_instruction(compiler, 2, dst_r, 0, SLJIT_MEM0(), (sljit_sw)&emit_clz_arg);
+ inst = emit_x86_instruction(compiler, 2, dst_r, 0, SLJIT_MEM0(), is_clz ? (sljit_sw)&emit_clz_arg : (sljit_sw)&emit_ctz_arg);
FAIL_IF(!inst);
*inst++ = GROUP_0F;
*inst = CMOVE_r_rm;
}
else
- FAIL_IF(sljit_emit_cmov_generic(compiler, SLJIT_EQUAL, dst_r, SLJIT_IMM, 32 + 31));
+ FAIL_IF(sljit_emit_cmov_generic(compiler, SLJIT_EQUAL, dst_r, SLJIT_IMM, max));
- inst = emit_x86_instruction(compiler, 1 | EX86_BIN_INS, SLJIT_IMM, 31, dst_r, 0);
+ if (is_clz) {
+ inst = emit_x86_instruction(compiler, 1 | EX86_BIN_INS, SLJIT_IMM, 31, dst_r, 0);
+ FAIL_IF(!inst);
+ *(inst + 1) |= XOR;
+ }
#else
- if (cpu_has_cmov) {
- EMIT_MOV(compiler, TMP_REG2, 0, SLJIT_IMM, !(op_flags & SLJIT_I32_OP) ? (64 + 63) : (32 + 31));
+ if (is_clz)
+ max = compiler->mode32 ? (32 + 31) : (64 + 63);
+ else
+ max = compiler->mode32 ? 32 : 64;
+
+ if (cpu_feature_list & CPU_FEATURE_CMOV) {
+ EMIT_MOV(compiler, TMP_REG2, 0, SLJIT_IMM, max);
inst = emit_x86_instruction(compiler, 2, dst_r, 0, TMP_REG2, 0);
FAIL_IF(!inst);
@@ -1422,14 +1579,15 @@ static sljit_s32 emit_clz(struct sljit_compiler *compiler, sljit_s32 op_flags,
*inst = CMOVE_r_rm;
}
else
- FAIL_IF(sljit_emit_cmov_generic(compiler, SLJIT_EQUAL, dst_r, SLJIT_IMM, !(op_flags & SLJIT_I32_OP) ? (64 + 63) : (32 + 31)));
+ FAIL_IF(sljit_emit_cmov_generic(compiler, SLJIT_EQUAL, dst_r, SLJIT_IMM, max));
- inst = emit_x86_instruction(compiler, 1 | EX86_BIN_INS, SLJIT_IMM, !(op_flags & SLJIT_I32_OP) ? 63 : 31, dst_r, 0);
+ if (is_clz) {
+ inst = emit_x86_instruction(compiler, 1 | EX86_BIN_INS, SLJIT_IMM, max >> 1, dst_r, 0);
+ FAIL_IF(!inst);
+ *(inst + 1) |= XOR;
+ }
#endif
- FAIL_IF(!inst);
- *(inst + 1) |= XOR;
-
if (dst & SLJIT_MEM)
EMIT_MOV(compiler, dst, dstw, TMP_REG1, 0);
return SLJIT_SUCCESS;
@@ -1452,7 +1610,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op1(struct sljit_compiler *compile
CHECK_EXTRA_REGS(dst, dstw, dst_is_ereg = 1);
CHECK_EXTRA_REGS(src, srcw, (void)0);
#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
- compiler->mode32 = op_flags & SLJIT_I32_OP;
+ compiler->mode32 = op_flags & SLJIT_32;
#endif
op = GET_OPCODE(op);
@@ -1467,8 +1625,8 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op1(struct sljit_compiler *compile
return SLJIT_SUCCESS;
}
- if (op_flags & SLJIT_I32_OP) {
#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
+ if (op_flags & SLJIT_32) {
if (src & SLJIT_MEM) {
if (op == SLJIT_MOV_S32)
op = SLJIT_MOV_U32;
@@ -1477,8 +1635,8 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op1(struct sljit_compiler *compile
if (op == SLJIT_MOV_U32)
op = SLJIT_MOV_S32;
}
-#endif
}
+#endif
if (src & SLJIT_IMM) {
switch (op) {
@@ -1522,8 +1680,9 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op1(struct sljit_compiler *compile
#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
case SLJIT_MOV_U32:
case SLJIT_MOV_S32:
+ case SLJIT_MOV32:
#endif
- FAIL_IF(emit_mov(compiler, dst, dstw, src, srcw));
+ EMIT_MOV(compiler, dst, dstw, src, srcw);
break;
case SLJIT_MOV_U8:
FAIL_IF(emit_mov_byte(compiler, 0, dst, dstw, src, srcw));
@@ -1544,6 +1703,11 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op1(struct sljit_compiler *compile
case SLJIT_MOV_S32:
FAIL_IF(emit_mov_int(compiler, 1, dst, dstw, src, srcw));
break;
+ case SLJIT_MOV32:
+ compiler->mode32 = 1;
+ EMIT_MOV(compiler, dst, dstw, src, srcw);
+ compiler->mode32 = 0;
+ break;
#endif
}
@@ -1560,46 +1724,14 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op1(struct sljit_compiler *compile
return emit_not_with_flags(compiler, dst, dstw, src, srcw);
return emit_unary(compiler, NOT_rm, dst, dstw, src, srcw);
- case SLJIT_NEG:
- return emit_unary(compiler, NEG_rm, dst, dstw, src, srcw);
-
case SLJIT_CLZ:
- return emit_clz(compiler, op_flags, dst, dstw, src, srcw);
+ case SLJIT_CTZ:
+ return emit_clz_ctz(compiler, (op == SLJIT_CLZ), dst, dstw, src, srcw);
}
return SLJIT_SUCCESS;
}
-#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
-
-#define BINARY_IMM(op_imm, op_mr, immw, arg, argw) \
- if (IS_HALFWORD(immw) || compiler->mode32) { \
- inst = emit_x86_instruction(compiler, 1 | EX86_BIN_INS, SLJIT_IMM, immw, arg, argw); \
- FAIL_IF(!inst); \
- *(inst + 1) |= (op_imm); \
- } \
- else { \
- FAIL_IF(emit_load_imm64(compiler, (arg == TMP_REG1) ? TMP_REG2 : TMP_REG1, immw)); \
- inst = emit_x86_instruction(compiler, 1, (arg == TMP_REG1) ? TMP_REG2 : TMP_REG1, 0, arg, argw); \
- FAIL_IF(!inst); \
- *inst = (op_mr); \
- }
-
-#define BINARY_EAX_IMM(op_eax_imm, immw) \
- FAIL_IF(emit_do_imm32(compiler, (!compiler->mode32) ? REX_W : 0, (op_eax_imm), immw))
-
-#else
-
-#define BINARY_IMM(op_imm, op_mr, immw, arg, argw) \
- inst = emit_x86_instruction(compiler, 1 | EX86_BIN_INS, SLJIT_IMM, immw, arg, argw); \
- FAIL_IF(!inst); \
- *(inst + 1) |= (op_imm);
-
-#define BINARY_EAX_IMM(op_eax_imm, immw) \
- FAIL_IF(emit_do_imm(compiler, (op_eax_imm), immw))
-
-#endif
-
static sljit_s32 emit_cum_binary(struct sljit_compiler *compiler,
sljit_u32 op_types,
sljit_s32 dst, sljit_sw dstw,
@@ -1607,23 +1739,10 @@ static sljit_s32 emit_cum_binary(struct sljit_compiler *compiler,
sljit_s32 src2, sljit_sw src2w)
{
sljit_u8* inst;
- sljit_u8 op_eax_imm = (op_types >> 24);
- sljit_u8 op_rm = (op_types >> 16) & 0xff;
- sljit_u8 op_mr = (op_types >> 8) & 0xff;
- sljit_u8 op_imm = op_types & 0xff;
-
- if (dst == SLJIT_UNUSED) {
- EMIT_MOV(compiler, TMP_REG1, 0, src1, src1w);
- if (src2 & SLJIT_IMM) {
- BINARY_IMM(op_imm, op_mr, src2w, TMP_REG1, 0);
- }
- else {
- inst = emit_x86_instruction(compiler, 1, TMP_REG1, 0, src2, src2w);
- FAIL_IF(!inst);
- *inst = op_rm;
- }
- return SLJIT_SUCCESS;
- }
+ sljit_u8 op_eax_imm = U8(op_types >> 24);
+ sljit_u8 op_rm = U8((op_types >> 16) & 0xff);
+ sljit_u8 op_mr = U8((op_types >> 8) & 0xff);
+ sljit_u8 op_imm = U8(op_types & 0xff);
if (dst == src1 && dstw == src1w) {
if (src2 & SLJIT_IMM) {
@@ -1727,23 +1846,10 @@ static sljit_s32 emit_non_cum_binary(struct sljit_compiler *compiler,
sljit_s32 src2, sljit_sw src2w)
{
sljit_u8* inst;
- sljit_u8 op_eax_imm = (op_types >> 24);
- sljit_u8 op_rm = (op_types >> 16) & 0xff;
- sljit_u8 op_mr = (op_types >> 8) & 0xff;
- sljit_u8 op_imm = op_types & 0xff;
-
- if (dst == SLJIT_UNUSED) {
- EMIT_MOV(compiler, TMP_REG1, 0, src1, src1w);
- if (src2 & SLJIT_IMM) {
- BINARY_IMM(op_imm, op_mr, src2w, TMP_REG1, 0);
- }
- else {
- inst = emit_x86_instruction(compiler, 1, TMP_REG1, 0, src2, src2w);
- FAIL_IF(!inst);
- *inst = op_rm;
- }
- return SLJIT_SUCCESS;
- }
+ sljit_u8 op_eax_imm = U8(op_types >> 24);
+ sljit_u8 op_rm = U8((op_types >> 16) & 0xff);
+ sljit_u8 op_mr = U8((op_types >> 8) & 0xff);
+ sljit_u8 op_imm = U8(op_types & 0xff);
if (dst == src1 && dstw == src1w) {
if (src2 & SLJIT_IMM) {
@@ -1812,9 +1918,7 @@ static sljit_s32 emit_mul(struct sljit_compiler *compiler,
sljit_s32 src2, sljit_sw src2w)
{
sljit_u8* inst;
- sljit_s32 dst_r;
-
- dst_r = SLOW_IS_REG(dst) ? dst : TMP_REG1;
+ sljit_s32 dst_r = FAST_IS_REG(dst) ? dst : TMP_REG1;
/* Register destination. */
if (dst_r == src1 && !(src2 & SLJIT_IMM)) {
@@ -1843,7 +1947,7 @@ static sljit_s32 emit_mul(struct sljit_compiler *compiler,
inst = (sljit_u8*)ensure_buf(compiler, 1 + 1);
FAIL_IF(!inst);
INC_SIZE(1);
- *inst = (sljit_s8)src1w;
+ *inst = U8(src1w);
}
#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
else {
@@ -1886,7 +1990,7 @@ static sljit_s32 emit_mul(struct sljit_compiler *compiler,
inst = (sljit_u8*)ensure_buf(compiler, 1 + 1);
FAIL_IF(!inst);
INC_SIZE(1);
- *inst = (sljit_s8)src2w;
+ *inst = U8(src2w);
}
#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
else {
@@ -2160,6 +2264,9 @@ static sljit_s32 emit_shift(struct sljit_compiler *compiler,
sljit_s32 src1, sljit_sw src1w,
sljit_s32 src2, sljit_sw src2w)
{
+#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
+ sljit_s32 mode32;
+#endif
sljit_u8* inst;
if ((src2 & SLJIT_IMM) || (src2 == SLJIT_PREF_SHIFT_REG)) {
@@ -2169,13 +2276,6 @@ static sljit_s32 emit_shift(struct sljit_compiler *compiler,
*inst |= mode;
return SLJIT_SUCCESS;
}
- if (dst == SLJIT_UNUSED) {
- EMIT_MOV(compiler, TMP_REG1, 0, src1, src1w);
- inst = emit_x86_instruction(compiler, 1 | EX86_SHIFT_INS, src2, src2w, TMP_REG1, 0);
- FAIL_IF(!inst);
- *inst |= mode;
- return SLJIT_SUCCESS;
- }
if (dst == SLJIT_PREF_SHIFT_REG && src2 == SLJIT_PREF_SHIFT_REG) {
EMIT_MOV(compiler, TMP_REG1, 0, src1, src1w);
inst = emit_x86_instruction(compiler, 1 | EX86_SHIFT_INS, SLJIT_PREF_SHIFT_REG, 0, TMP_REG1, 0);
@@ -2206,40 +2306,61 @@ static sljit_s32 emit_shift(struct sljit_compiler *compiler,
inst = emit_x86_instruction(compiler, 1 | EX86_SHIFT_INS, SLJIT_PREF_SHIFT_REG, 0, TMP_REG1, 0);
FAIL_IF(!inst);
*inst |= mode;
- EMIT_MOV(compiler, SLJIT_PREF_SHIFT_REG, 0, TMP_REG1, 0);
+ return emit_mov(compiler, SLJIT_PREF_SHIFT_REG, 0, TMP_REG1, 0);
}
- else if (SLOW_IS_REG(dst) && dst != src2 && !ADDRESSING_DEPENDS_ON(src2, dst)) {
+
+ if (FAST_IS_REG(dst) && dst != src2 && dst != TMP_REG1 && !ADDRESSING_DEPENDS_ON(src2, dst)) {
if (src1 != dst)
EMIT_MOV(compiler, dst, 0, src1, src1w);
+#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
+ mode32 = compiler->mode32;
+ compiler->mode32 = 0;
+#endif
EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_PREF_SHIFT_REG, 0);
+#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
+ compiler->mode32 = mode32;
+#endif
EMIT_MOV(compiler, SLJIT_PREF_SHIFT_REG, 0, src2, src2w);
inst = emit_x86_instruction(compiler, 1 | EX86_SHIFT_INS, SLJIT_PREF_SHIFT_REG, 0, dst, 0);
FAIL_IF(!inst);
*inst |= mode;
+#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
+ compiler->mode32 = 0;
+#endif
EMIT_MOV(compiler, SLJIT_PREF_SHIFT_REG, 0, TMP_REG1, 0);
+#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
+ compiler->mode32 = mode32;
+#endif
+ return SLJIT_SUCCESS;
}
- else {
- /* This case is complex since ecx itself may be used for
- addressing, and this case must be supported as well. */
- EMIT_MOV(compiler, TMP_REG1, 0, src1, src1w);
+
+ /* This case is complex since ecx itself may be used for
+ addressing, and this case must be supported as well. */
+ EMIT_MOV(compiler, TMP_REG1, 0, src1, src1w);
#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
- EMIT_MOV(compiler, SLJIT_MEM1(SLJIT_SP), 0, SLJIT_PREF_SHIFT_REG, 0);
- EMIT_MOV(compiler, SLJIT_PREF_SHIFT_REG, 0, src2, src2w);
- inst = emit_x86_instruction(compiler, 1 | EX86_SHIFT_INS, SLJIT_PREF_SHIFT_REG, 0, TMP_REG1, 0);
- FAIL_IF(!inst);
- *inst |= mode;
- EMIT_MOV(compiler, SLJIT_PREF_SHIFT_REG, 0, SLJIT_MEM1(SLJIT_SP), 0);
+ EMIT_MOV(compiler, SLJIT_MEM1(SLJIT_SP), 0, SLJIT_PREF_SHIFT_REG, 0);
+#else /* !SLJIT_CONFIG_X86_32 */
+ mode32 = compiler->mode32;
+ compiler->mode32 = 0;
+ EMIT_MOV(compiler, TMP_REG2, 0, SLJIT_PREF_SHIFT_REG, 0);
+ compiler->mode32 = mode32;
+#endif /* SLJIT_CONFIG_X86_32 */
+
+ EMIT_MOV(compiler, SLJIT_PREF_SHIFT_REG, 0, src2, src2w);
+ inst = emit_x86_instruction(compiler, 1 | EX86_SHIFT_INS, SLJIT_PREF_SHIFT_REG, 0, TMP_REG1, 0);
+ FAIL_IF(!inst);
+ *inst |= mode;
+
+#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
+ EMIT_MOV(compiler, SLJIT_PREF_SHIFT_REG, 0, SLJIT_MEM1(SLJIT_SP), 0);
#else
- EMIT_MOV(compiler, TMP_REG2, 0, SLJIT_PREF_SHIFT_REG, 0);
- EMIT_MOV(compiler, SLJIT_PREF_SHIFT_REG, 0, src2, src2w);
- inst = emit_x86_instruction(compiler, 1 | EX86_SHIFT_INS, SLJIT_PREF_SHIFT_REG, 0, TMP_REG1, 0);
- FAIL_IF(!inst);
- *inst |= mode;
- EMIT_MOV(compiler, SLJIT_PREF_SHIFT_REG, 0, TMP_REG2, 0);
-#endif
- if (dst != SLJIT_UNUSED)
- return emit_mov(compiler, dst, dstw, TMP_REG1, 0);
- }
+ compiler->mode32 = 0;
+ EMIT_MOV(compiler, SLJIT_PREF_SHIFT_REG, 0, TMP_REG2, 0);
+ compiler->mode32 = mode32;
+#endif /* SLJIT_CONFIG_X86_32 */
+
+ if (dst != TMP_REG1)
+ return emit_mov(compiler, dst, dstw, TMP_REG1, 0);
return SLJIT_SUCCESS;
}
@@ -2253,12 +2374,13 @@ static sljit_s32 emit_shift_with_flags(struct sljit_compiler *compiler,
/* The CPU does not set flags if the shift count is 0. */
if (src2 & SLJIT_IMM) {
#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
- if ((src2w & 0x3f) != 0 || (compiler->mode32 && (src2w & 0x1f) != 0))
- return emit_shift(compiler, mode, dst, dstw, src1, src1w, src2, src2w);
-#else
- if ((src2w & 0x1f) != 0)
+ src2w &= compiler->mode32 ? 0x1f : 0x3f;
+#else /* !SLJIT_CONFIG_X86_64 */
+ src2w &= 0x1f;
+#endif /* SLJIT_CONFIG_X86_64 */
+ if (src2w != 0)
return emit_shift(compiler, mode, dst, dstw, src1, src1w, src2, src2w);
-#endif
+
if (!set_flags)
return emit_mov(compiler, dst, dstw, src1, src1w);
/* OR dst, src, 0 */
@@ -2275,7 +2397,7 @@ static sljit_s32 emit_shift_with_flags(struct sljit_compiler *compiler,
FAIL_IF(emit_shift(compiler, mode, dst, dstw, src1, src1w, src2, src2w));
if (FAST_IS_REG(dst))
- return emit_cmp_binary(compiler, (dst == SLJIT_UNUSED) ? TMP_REG1 : dst, dstw, SLJIT_IMM, 0);
+ return emit_cmp_binary(compiler, dst, dstw, SLJIT_IMM, 0);
return SLJIT_SUCCESS;
}
@@ -2285,7 +2407,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op2(struct sljit_compiler *compile
sljit_s32 src2, sljit_sw src2w)
{
CHECK_ERROR();
- CHECK(check_sljit_emit_op2(compiler, op, dst, dstw, src1, src1w, src2, src2w));
+ CHECK(check_sljit_emit_op2(compiler, op, 0, dst, dstw, src1, src1w, src2, src2w));
ADJUST_LOCAL_OFFSET(dst, dstw);
ADJUST_LOCAL_OFFSET(src1, src1w);
ADJUST_LOCAL_OFFSET(src2, src2w);
@@ -2294,11 +2416,10 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op2(struct sljit_compiler *compile
CHECK_EXTRA_REGS(src1, src1w, (void)0);
CHECK_EXTRA_REGS(src2, src2w, (void)0);
#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
- compiler->mode32 = op & SLJIT_I32_OP;
+ compiler->mode32 = op & SLJIT_32;
#endif
- if (dst == SLJIT_UNUSED && !HAS_FLAGS(op))
- return SLJIT_SUCCESS;
+ SLJIT_ASSERT(dst != TMP_REG1 || HAS_FLAGS(op));
switch (GET_OPCODE(op)) {
case SLJIT_ADD:
@@ -2312,17 +2433,18 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op2(struct sljit_compiler *compile
return emit_cum_binary(compiler, BINARY_OPCODE(ADC),
dst, dstw, src1, src1w, src2, src2w);
case SLJIT_SUB:
+ if (src1 == SLJIT_IMM && src1w == 0)
+ return emit_unary(compiler, NEG_rm, dst, dstw, src2, src2w);
+
if (!HAS_FLAGS(op)) {
if ((src2 & SLJIT_IMM) && emit_lea_binary(compiler, dst, dstw, src1, src1w, SLJIT_IMM, -src2w) != SLJIT_ERR_UNSUPPORTED)
return compiler->error;
- if (SLOW_IS_REG(dst) && src2 == dst) {
+ if (FAST_IS_REG(dst) && src2 == dst) {
FAIL_IF(emit_non_cum_binary(compiler, BINARY_OPCODE(SUB), dst, 0, dst, 0, src1, src1w));
return emit_unary(compiler, NEG_rm, dst, 0, dst, 0);
}
}
- if (dst == SLJIT_UNUSED)
- return emit_cmp_binary(compiler, src1, src1w, src2, src2w);
return emit_non_cum_binary(compiler, BINARY_OPCODE(SUB),
dst, dstw, src1, src1w, src2, src2w);
case SLJIT_SUBC:
@@ -2331,8 +2453,6 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op2(struct sljit_compiler *compile
case SLJIT_MUL:
return emit_mul(compiler, dst, dstw, src1, src1w, src2, src2w);
case SLJIT_AND:
- if (dst == SLJIT_UNUSED)
- return emit_test_binary(compiler, src1, src1w, src2, src2w);
return emit_cum_binary(compiler, BINARY_OPCODE(AND),
dst, dstw, src1, src1w, src2, src2w);
case SLJIT_OR:
@@ -2342,19 +2462,173 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op2(struct sljit_compiler *compile
return emit_cum_binary(compiler, BINARY_OPCODE(XOR),
dst, dstw, src1, src1w, src2, src2w);
case SLJIT_SHL:
+ case SLJIT_MSHL:
return emit_shift_with_flags(compiler, SHL, HAS_FLAGS(op),
dst, dstw, src1, src1w, src2, src2w);
case SLJIT_LSHR:
+ case SLJIT_MLSHR:
return emit_shift_with_flags(compiler, SHR, HAS_FLAGS(op),
dst, dstw, src1, src1w, src2, src2w);
case SLJIT_ASHR:
+ case SLJIT_MASHR:
return emit_shift_with_flags(compiler, SAR, HAS_FLAGS(op),
dst, dstw, src1, src1w, src2, src2w);
+ case SLJIT_ROTL:
+ return emit_shift_with_flags(compiler, ROL, 0,
+ dst, dstw, src1, src1w, src2, src2w);
+ case SLJIT_ROTR:
+ return emit_shift_with_flags(compiler, ROR, 0,
+ dst, dstw, src1, src1w, src2, src2w);
}
return SLJIT_SUCCESS;
}
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op2u(struct sljit_compiler *compiler, sljit_s32 op,
+ sljit_s32 src1, sljit_sw src1w,
+ sljit_s32 src2, sljit_sw src2w)
+{
+ sljit_s32 opcode = GET_OPCODE(op);
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_op2(compiler, op, 1, 0, 0, src1, src1w, src2, src2w));
+
+ if (opcode != SLJIT_SUB && opcode != SLJIT_AND) {
+ SLJIT_SKIP_CHECKS(compiler);
+ return sljit_emit_op2(compiler, op, TMP_REG1, 0, src1, src1w, src2, src2w);
+ }
+
+ ADJUST_LOCAL_OFFSET(src1, src1w);
+ ADJUST_LOCAL_OFFSET(src2, src2w);
+
+ CHECK_EXTRA_REGS(src1, src1w, (void)0);
+ CHECK_EXTRA_REGS(src2, src2w, (void)0);
+#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
+ compiler->mode32 = op & SLJIT_32;
+#endif
+
+ if (opcode == SLJIT_SUB) {
+ return emit_cmp_binary(compiler, src1, src1w, src2, src2w);
+ }
+ return emit_test_binary(compiler, src1, src1w, src2, src2w);
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_shift_into(struct sljit_compiler *compiler, sljit_s32 op,
+ sljit_s32 src_dst,
+ sljit_s32 src1, sljit_sw src1w,
+ sljit_s32 src2, sljit_sw src2w)
+{
+ sljit_s32 restore_ecx = 0;
+ sljit_s32 is_rotate, is_left;
+ sljit_u8* inst;
+ sljit_sw dstw = 0;
+#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
+ sljit_s32 tmp2 = SLJIT_MEM1(SLJIT_SP);
+#else /* !SLJIT_CONFIG_X86_32 */
+ sljit_s32 tmp2 = TMP_REG2;
+#endif /* SLJIT_CONFIG_X86_32 */
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_shift_into(compiler, op, src_dst, src1, src1w, src2, src2w));
+ ADJUST_LOCAL_OFFSET(src1, src1w);
+ ADJUST_LOCAL_OFFSET(src2, src2w);
+
+ CHECK_EXTRA_REGS(src1, src1w, (void)0);
+ CHECK_EXTRA_REGS(src2, src2w, (void)0);
+
+#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
+ compiler->mode32 = op & SLJIT_32;
+#endif
+
+ if (src2 & SLJIT_IMM) {
+#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
+ src2w &= 0x1f;
+#else /* !SLJIT_CONFIG_X86_32 */
+ src2w &= (op & SLJIT_32) ? 0x1f : 0x3f;
+#endif /* SLJIT_CONFIG_X86_32 */
+
+ if (src2w == 0)
+ return SLJIT_SUCCESS;
+ }
+
+ is_left = (GET_OPCODE(op) == SLJIT_SHL || GET_OPCODE(op) == SLJIT_MSHL);
+
+ is_rotate = (src_dst == src1);
+ CHECK_EXTRA_REGS(src_dst, dstw, (void)0);
+
+ if (is_rotate)
+ return emit_shift(compiler, is_left ? ROL : ROR, src_dst, dstw, src1, src1w, src2, src2w);
+
+ if ((src2 & SLJIT_IMM) || src2 == SLJIT_PREF_SHIFT_REG) {
+ if (!FAST_IS_REG(src1)) {
+ EMIT_MOV(compiler, TMP_REG1, 0, src1, src1w);
+ src1 = TMP_REG1;
+ }
+ } else if (FAST_IS_REG(src1)) {
+#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
+ compiler->mode32 = 0;
+#endif
+ EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_PREF_SHIFT_REG, 0);
+#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
+ compiler->mode32 = op & SLJIT_32;
+#endif
+ EMIT_MOV(compiler, SLJIT_PREF_SHIFT_REG, 0, src2, src2w);
+
+ if (src1 == SLJIT_PREF_SHIFT_REG)
+ src1 = TMP_REG1;
+
+ if (src_dst == SLJIT_PREF_SHIFT_REG)
+ src_dst = TMP_REG1;
+
+ restore_ecx = 1;
+ } else {
+ EMIT_MOV(compiler, TMP_REG1, 0, src1, src1w);
+#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
+ compiler->mode32 = 0;
+#endif
+ EMIT_MOV(compiler, tmp2, 0, SLJIT_PREF_SHIFT_REG, 0);
+#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
+ compiler->mode32 = op & SLJIT_32;
+#endif
+ EMIT_MOV(compiler, SLJIT_PREF_SHIFT_REG, 0, src2, src2w);
+
+ src1 = TMP_REG1;
+
+ if (src_dst == SLJIT_PREF_SHIFT_REG) {
+ src_dst = tmp2;
+ SLJIT_ASSERT(dstw == 0);
+ }
+
+ restore_ecx = 2;
+ }
+
+ inst = emit_x86_instruction(compiler, 2, src1, 0, src_dst, dstw);
+ FAIL_IF(!inst);
+ inst[0] = GROUP_0F;
+
+ if (src2 & SLJIT_IMM) {
+ inst[1] = U8((is_left ? SHLD : SHRD) - 1);
+
+ /* Immedate argument is added separately. */
+ inst = (sljit_u8*)ensure_buf(compiler, 1 + 1);
+ FAIL_IF(!inst);
+ INC_SIZE(1);
+ *inst = U8(src2w);
+ } else
+ inst[1] = U8(is_left ? SHLD : SHRD);
+
+#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
+ compiler->mode32 = 0;
+#endif
+
+ if (restore_ecx == 1)
+ return emit_mov(compiler, SLJIT_PREF_SHIFT_REG, 0, TMP_REG1, 0);
+ if (restore_ecx == 2)
+ return emit_mov(compiler, SLJIT_PREF_SHIFT_REG, 0, tmp2, 0);
+
+ return SLJIT_SUCCESS;
+}
+
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_src(struct sljit_compiler *compiler, sljit_s32 op,
sljit_s32 src, sljit_sw srcw)
{
@@ -2371,7 +2645,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_src(struct sljit_compiler *comp
/* Don't adjust shadow stack if it isn't enabled. */
if (!cpu_has_shadow_stack ())
return SLJIT_SUCCESS;
- return adjust_shadow_stack(compiler, src, srcw, SLJIT_UNUSED, 0);
+ return adjust_shadow_stack(compiler, src, srcw);
case SLJIT_PREFETCH_L1:
case SLJIT_PREFETCH_L2:
case SLJIT_PREFETCH_L3:
@@ -2403,7 +2677,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_get_float_register_index(sljit_s32 reg)
}
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_custom(struct sljit_compiler *compiler,
- void *instruction, sljit_s32 size)
+ void *instruction, sljit_u32 size)
{
sljit_u8 *inst;
@@ -2422,13 +2696,13 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_custom(struct sljit_compiler *c
/* --------------------------------------------------------------------- */
/* Alignment(3) + 4 * 16 bytes. */
-static sljit_s32 sse2_data[3 + (4 * 4)];
-static sljit_s32 *sse2_buffer;
+static sljit_u32 sse2_data[3 + (4 * 4)];
+static sljit_u32 *sse2_buffer;
static void init_compiler(void)
{
/* Align to 16 bytes. */
- sse2_buffer = (sljit_s32*)(((sljit_uw)sse2_data + 15) & ~0xf);
+ sse2_buffer = (sljit_u32*)(((sljit_uw)sse2_data + 15) & ~(sljit_uw)0xf);
/* Single precision constants (each constant is 16 byte long). */
sse2_buffer[0] = 0x80000000;
@@ -2488,7 +2762,7 @@ static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_sw_from_f64(struct sljit_comp
compiler->mode32 = 0;
#endif
- inst = emit_x86_instruction(compiler, 2 | ((op & SLJIT_F32_OP) ? EX86_PREF_F3 : EX86_PREF_F2) | EX86_SSE2_OP2, dst_r, 0, src, srcw);
+ inst = emit_x86_instruction(compiler, 2 | ((op & SLJIT_32) ? EX86_PREF_F3 : EX86_PREF_F2) | EX86_SSE2_OP2, dst_r, 0, src, srcw);
FAIL_IF(!inst);
*inst++ = GROUP_0F;
*inst = CVTTSD2SI_r_xm;
@@ -2520,7 +2794,7 @@ static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_f64_from_sw(struct sljit_comp
srcw = 0;
}
- inst = emit_x86_instruction(compiler, 2 | ((op & SLJIT_F32_OP) ? EX86_PREF_F3 : EX86_PREF_F2) | EX86_SSE2_OP1, dst_r, 0, src, srcw);
+ inst = emit_x86_instruction(compiler, 2 | ((op & SLJIT_32) ? EX86_PREF_F3 : EX86_PREF_F2) | EX86_SSE2_OP1, dst_r, 0, src, srcw);
FAIL_IF(!inst);
*inst++ = GROUP_0F;
*inst = CVTSI2SD_x_rm;
@@ -2529,7 +2803,7 @@ static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_f64_from_sw(struct sljit_comp
compiler->mode32 = 1;
#endif
if (dst_r == TMP_FREG)
- return emit_sse2_store(compiler, op & SLJIT_F32_OP, dst, dstw, TMP_FREG);
+ return emit_sse2_store(compiler, op & SLJIT_32, dst, dstw, TMP_FREG);
return SLJIT_SUCCESS;
}
@@ -2537,12 +2811,25 @@ static SLJIT_INLINE sljit_s32 sljit_emit_fop1_cmp(struct sljit_compiler *compile
sljit_s32 src1, sljit_sw src1w,
sljit_s32 src2, sljit_sw src2w)
{
+ switch (GET_FLAG_TYPE(op)) {
+ case SLJIT_ORDERED_LESS:
+ case SLJIT_UNORDERED_OR_GREATER_EQUAL:
+ case SLJIT_UNORDERED_OR_GREATER:
+ case SLJIT_ORDERED_LESS_EQUAL:
+ if (!FAST_IS_REG(src2)) {
+ FAIL_IF(emit_sse2_load(compiler, op & SLJIT_32, TMP_FREG, src2, src2w));
+ src2 = TMP_FREG;
+ }
+
+ return emit_sse2_logic(compiler, UCOMISD_x_xm, !(op & SLJIT_32), src2, src1, src1w);
+ }
+
if (!FAST_IS_REG(src1)) {
- FAIL_IF(emit_sse2_load(compiler, op & SLJIT_F32_OP, TMP_FREG, src1, src1w));
+ FAIL_IF(emit_sse2_load(compiler, op & SLJIT_32, TMP_FREG, src1, src1w));
src1 = TMP_FREG;
}
- return emit_sse2_logic(compiler, UCOMISD_x_xm, !(op & SLJIT_F32_OP), src1, src2, src2w);
+ return emit_sse2_logic(compiler, UCOMISD_x_xm, !(op & SLJIT_32), src1, src2, src2w);
}
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop1(struct sljit_compiler *compiler, sljit_s32 op,
@@ -2560,11 +2847,11 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop1(struct sljit_compiler *compil
if (GET_OPCODE(op) == SLJIT_MOV_F64) {
if (FAST_IS_REG(dst))
- return emit_sse2_load(compiler, op & SLJIT_F32_OP, dst, src, srcw);
+ return emit_sse2_load(compiler, op & SLJIT_32, dst, src, srcw);
if (FAST_IS_REG(src))
- return emit_sse2_store(compiler, op & SLJIT_F32_OP, dst, dstw, src);
- FAIL_IF(emit_sse2_load(compiler, op & SLJIT_F32_OP, TMP_FREG, src, srcw));
- return emit_sse2_store(compiler, op & SLJIT_F32_OP, dst, dstw, TMP_FREG);
+ return emit_sse2_store(compiler, op & SLJIT_32, dst, dstw, src);
+ FAIL_IF(emit_sse2_load(compiler, op & SLJIT_32, TMP_FREG, src, srcw));
+ return emit_sse2_store(compiler, op & SLJIT_32, dst, dstw, TMP_FREG);
}
if (GET_OPCODE(op) == SLJIT_CONV_F64_FROM_F32) {
@@ -2573,41 +2860,41 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop1(struct sljit_compiler *compil
/* We overwrite the high bits of source. From SLJIT point of view,
this is not an issue.
Note: In SSE3, we could also use MOVDDUP and MOVSLDUP. */
- FAIL_IF(emit_sse2_logic(compiler, UNPCKLPD_x_xm, op & SLJIT_F32_OP, src, src, 0));
+ FAIL_IF(emit_sse2_logic(compiler, UNPCKLPD_x_xm, op & SLJIT_32, src, src, 0));
}
else {
- FAIL_IF(emit_sse2_load(compiler, !(op & SLJIT_F32_OP), TMP_FREG, src, srcw));
+ FAIL_IF(emit_sse2_load(compiler, !(op & SLJIT_32), TMP_FREG, src, srcw));
src = TMP_FREG;
}
- FAIL_IF(emit_sse2_logic(compiler, CVTPD2PS_x_xm, op & SLJIT_F32_OP, dst_r, src, 0));
+ FAIL_IF(emit_sse2_logic(compiler, CVTPD2PS_x_xm, op & SLJIT_32, dst_r, src, 0));
if (dst_r == TMP_FREG)
- return emit_sse2_store(compiler, op & SLJIT_F32_OP, dst, dstw, TMP_FREG);
+ return emit_sse2_store(compiler, op & SLJIT_32, dst, dstw, TMP_FREG);
return SLJIT_SUCCESS;
}
if (FAST_IS_REG(dst)) {
dst_r = dst;
if (dst != src)
- FAIL_IF(emit_sse2_load(compiler, op & SLJIT_F32_OP, dst_r, src, srcw));
+ FAIL_IF(emit_sse2_load(compiler, op & SLJIT_32, dst_r, src, srcw));
}
else {
dst_r = TMP_FREG;
- FAIL_IF(emit_sse2_load(compiler, op & SLJIT_F32_OP, dst_r, src, srcw));
+ FAIL_IF(emit_sse2_load(compiler, op & SLJIT_32, dst_r, src, srcw));
}
switch (GET_OPCODE(op)) {
case SLJIT_NEG_F64:
- FAIL_IF(emit_sse2_logic(compiler, XORPD_x_xm, 1, dst_r, SLJIT_MEM0(), (sljit_sw)(op & SLJIT_F32_OP ? sse2_buffer : sse2_buffer + 8)));
+ FAIL_IF(emit_sse2_logic(compiler, XORPD_x_xm, 1, dst_r, SLJIT_MEM0(), (sljit_sw)(op & SLJIT_32 ? sse2_buffer : sse2_buffer + 8)));
break;
case SLJIT_ABS_F64:
- FAIL_IF(emit_sse2_logic(compiler, ANDPD_x_xm, 1, dst_r, SLJIT_MEM0(), (sljit_sw)(op & SLJIT_F32_OP ? sse2_buffer + 4 : sse2_buffer + 12)));
+ FAIL_IF(emit_sse2_logic(compiler, ANDPD_x_xm, 1, dst_r, SLJIT_MEM0(), (sljit_sw)(op & SLJIT_32 ? sse2_buffer + 4 : sse2_buffer + 12)));
break;
}
if (dst_r == TMP_FREG)
- return emit_sse2_store(compiler, op & SLJIT_F32_OP, dst, dstw, TMP_FREG);
+ return emit_sse2_store(compiler, op & SLJIT_32, dst, dstw, TMP_FREG);
return SLJIT_SUCCESS;
}
@@ -2638,37 +2925,37 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop2(struct sljit_compiler *compil
src2w = src1w;
}
else if (dst != src2)
- FAIL_IF(emit_sse2_load(compiler, op & SLJIT_F32_OP, dst_r, src1, src1w));
+ FAIL_IF(emit_sse2_load(compiler, op & SLJIT_32, dst_r, src1, src1w));
else {
dst_r = TMP_FREG;
- FAIL_IF(emit_sse2_load(compiler, op & SLJIT_F32_OP, TMP_FREG, src1, src1w));
+ FAIL_IF(emit_sse2_load(compiler, op & SLJIT_32, TMP_FREG, src1, src1w));
}
}
else {
dst_r = TMP_FREG;
- FAIL_IF(emit_sse2_load(compiler, op & SLJIT_F32_OP, TMP_FREG, src1, src1w));
+ FAIL_IF(emit_sse2_load(compiler, op & SLJIT_32, TMP_FREG, src1, src1w));
}
switch (GET_OPCODE(op)) {
case SLJIT_ADD_F64:
- FAIL_IF(emit_sse2(compiler, ADDSD_x_xm, op & SLJIT_F32_OP, dst_r, src2, src2w));
+ FAIL_IF(emit_sse2(compiler, ADDSD_x_xm, op & SLJIT_32, dst_r, src2, src2w));
break;
case SLJIT_SUB_F64:
- FAIL_IF(emit_sse2(compiler, SUBSD_x_xm, op & SLJIT_F32_OP, dst_r, src2, src2w));
+ FAIL_IF(emit_sse2(compiler, SUBSD_x_xm, op & SLJIT_32, dst_r, src2, src2w));
break;
case SLJIT_MUL_F64:
- FAIL_IF(emit_sse2(compiler, MULSD_x_xm, op & SLJIT_F32_OP, dst_r, src2, src2w));
+ FAIL_IF(emit_sse2(compiler, MULSD_x_xm, op & SLJIT_32, dst_r, src2, src2w));
break;
case SLJIT_DIV_F64:
- FAIL_IF(emit_sse2(compiler, DIVSD_x_xm, op & SLJIT_F32_OP, dst_r, src2, src2w));
+ FAIL_IF(emit_sse2(compiler, DIVSD_x_xm, op & SLJIT_32, dst_r, src2, src2w));
break;
}
if (dst_r == TMP_FREG)
- return emit_sse2_store(compiler, op & SLJIT_F32_OP, dst, dstw, TMP_FREG);
+ return emit_sse2_store(compiler, op & SLJIT_32, dst, dstw, TMP_FREG);
return SLJIT_SUCCESS;
}
@@ -2710,7 +2997,7 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_jump(struct sljit_compile
jump = (struct sljit_jump*)ensure_abuf(compiler, sizeof(struct sljit_jump));
PTR_FAIL_IF_NULL(jump);
- set_jump(jump, compiler, (type & SLJIT_REWRITABLE_JUMP) | ((type & 0xff) << TYPE_SHIFT));
+ set_jump(jump, compiler, (sljit_u32)((type & SLJIT_REWRITABLE_JUMP) | ((type & 0xff) << TYPE_SHIFT)));
type &= 0xff;
/* Worst case size. */
@@ -2742,8 +3029,8 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_ijump(struct sljit_compiler *compi
if (src == SLJIT_IMM) {
jump = (struct sljit_jump*)ensure_abuf(compiler, sizeof(struct sljit_jump));
FAIL_IF_NULL(jump);
- set_jump(jump, compiler, JUMP_ADDR | (type << TYPE_SHIFT));
- jump->u.target = srcw;
+ set_jump(jump, compiler, (sljit_u32)(JUMP_ADDR | (type << TYPE_SHIFT)));
+ jump->u.target = (sljit_uw)srcw;
/* Worst case size. */
#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
@@ -2766,7 +3053,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_ijump(struct sljit_compiler *compi
inst = emit_x86_instruction(compiler, 1, 0, 0, src, srcw);
FAIL_IF(!inst);
*inst++ = GROUP_FF;
- *inst |= (type >= SLJIT_FAST_CALL) ? CALL_rm : JMP_rm;
+ *inst = U8(*inst | ((type >= SLJIT_FAST_CALL) ? CALL_rm : JMP_rm));
}
return SLJIT_SUCCESS;
}
@@ -2790,9 +3077,8 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_flags(struct sljit_compiler *co
ADJUST_LOCAL_OFFSET(dst, dstw);
CHECK_EXTRA_REGS(dst, dstw, (void)0);
- type &= 0xff;
/* setcc = jcc + 0x10. */
- cond_set = get_jump_code(type) + 0x10;
+ cond_set = U8(get_jump_code((sljit_uw)type) + 0x10);
#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
if (GET_OPCODE(op) == SLJIT_OR && !GET_ALL_FLAGS(op) && FAST_IS_REG(dst)) {
@@ -2804,9 +3090,9 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_flags(struct sljit_compiler *co
*inst++ = GROUP_0F;
*inst++ = cond_set;
*inst++ = MOD_REG | reg_lmap[TMP_REG1];
- *inst++ = REX | (reg_map[TMP_REG1] <= 7 ? 0 : REX_R) | (reg_map[dst] <= 7 ? 0 : REX_B);
+ *inst++ = U8(REX | (reg_map[TMP_REG1] <= 7 ? 0 : REX_R) | (reg_map[dst] <= 7 ? 0 : REX_B));
*inst++ = OR_rm8_r8;
- *inst++ = MOD_REG | (reg_lmap[TMP_REG1] << 3) | reg_lmap[dst];
+ *inst++ = U8(MOD_REG | (reg_lmap[TMP_REG1] << 3) | reg_lmap[dst]);
return SLJIT_SUCCESS;
}
@@ -2824,7 +3110,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_flags(struct sljit_compiler *co
/* The movzx instruction does not affect flags. */
*inst++ = GROUP_0F;
*inst++ = MOVZX_r_rm8;
- *inst = MOD_REG | (reg_lmap[reg] << 3) | reg_lmap[reg];
+ *inst = U8(MOD_REG | (reg_lmap[reg] << 3) | reg_lmap[reg]);
if (reg != TMP_REG1)
return SLJIT_SUCCESS;
@@ -2834,10 +3120,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_flags(struct sljit_compiler *co
return emit_mov(compiler, dst, dstw, TMP_REG1, 0);
}
-#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \
- || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
- compiler->skip_checks = 1;
-#endif
+ SLJIT_SKIP_CHECKS(compiler);
return sljit_emit_op2(compiler, op, dst_save, dstw_save, dst_save, dstw_save, TMP_REG1, 0);
#else
@@ -2851,19 +3134,19 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_flags(struct sljit_compiler *co
/* Set low byte to conditional flag. */
*inst++ = GROUP_0F;
*inst++ = cond_set;
- *inst++ = MOD_REG | reg_map[dst];
+ *inst++ = U8(MOD_REG | reg_map[dst]);
*inst++ = GROUP_0F;
*inst++ = MOVZX_r_rm8;
- *inst = MOD_REG | (reg_map[dst] << 3) | reg_map[dst];
+ *inst = U8(MOD_REG | (reg_map[dst] << 3) | reg_map[dst]);
return SLJIT_SUCCESS;
}
/* Low byte is not accessible. */
- if (cpu_has_cmov == -1)
+ if (cpu_feature_list == 0)
get_cpu_features();
- if (cpu_has_cmov) {
+ if (cpu_feature_list & CPU_FEATURE_CMOV) {
EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_IMM, 1);
/* a xor reg, reg operation would overwrite the flags. */
EMIT_MOV(compiler, dst, 0, SLJIT_IMM, 0);
@@ -2874,15 +3157,15 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_flags(struct sljit_compiler *co
*inst++ = GROUP_0F;
/* cmovcc = setcc - 0x50. */
- *inst++ = cond_set - 0x50;
- *inst++ = MOD_REG | (reg_map[dst] << 3) | reg_map[TMP_REG1];
+ *inst++ = U8(cond_set - 0x50);
+ *inst++ = U8(MOD_REG | (reg_map[dst] << 3) | reg_map[TMP_REG1]);
return SLJIT_SUCCESS;
}
inst = (sljit_u8*)ensure_buf(compiler, 1 + 1 + 3 + 3 + 1);
FAIL_IF(!inst);
INC_SIZE(1 + 3 + 3 + 1);
- *inst++ = XCHG_EAX_r + reg_map[TMP_REG1];
+ *inst++ = U8(XCHG_EAX_r | reg_map[TMP_REG1]);
/* Set al to conditional flag. */
*inst++ = GROUP_0F;
*inst++ = cond_set;
@@ -2890,8 +3173,8 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_flags(struct sljit_compiler *co
*inst++ = GROUP_0F;
*inst++ = MOVZX_r_rm8;
- *inst++ = MOD_REG | (reg_map[dst] << 3) | 0 /* eax */;
- *inst++ = XCHG_EAX_r + reg_map[TMP_REG1];
+ *inst++ = U8(MOD_REG | (reg_map[dst] << 3) | 0 /* eax */);
+ *inst++ = U8(XCHG_EAX_r | reg_map[TMP_REG1]);
return SLJIT_SUCCESS;
}
@@ -2903,13 +3186,13 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_flags(struct sljit_compiler *co
FAIL_IF(!inst);
INC_SIZE(1 + 3 + 2 + 1);
/* Set low register to conditional flag. */
- *inst++ = XCHG_EAX_r + reg_map[TMP_REG1];
+ *inst++ = U8(XCHG_EAX_r | reg_map[TMP_REG1]);
*inst++ = GROUP_0F;
*inst++ = cond_set;
*inst++ = MOD_REG | 0 /* eax */;
*inst++ = OR_rm8_r8;
*inst++ = MOD_REG | (0 /* eax */ << 3) | reg_map[dst];
- *inst++ = XCHG_EAX_r + reg_map[TMP_REG1];
+ *inst++ = U8(XCHG_EAX_r | reg_map[TMP_REG1]);
}
else {
inst = (sljit_u8*)ensure_buf(compiler, 1 + 2 + 3 + 2 + 2);
@@ -2917,14 +3200,14 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_flags(struct sljit_compiler *co
INC_SIZE(2 + 3 + 2 + 2);
/* Set low register to conditional flag. */
*inst++ = XCHG_r_rm;
- *inst++ = MOD_REG | (1 /* ecx */ << 3) | reg_map[TMP_REG1];
+ *inst++ = U8(MOD_REG | (1 /* ecx */ << 3) | reg_map[TMP_REG1]);
*inst++ = GROUP_0F;
*inst++ = cond_set;
*inst++ = MOD_REG | 1 /* ecx */;
*inst++ = OR_rm8_r8;
*inst++ = MOD_REG | (1 /* ecx */ << 3) | 0 /* eax */;
*inst++ = XCHG_r_rm;
- *inst++ = MOD_REG | (1 /* ecx */ << 3) | reg_map[TMP_REG1];
+ *inst++ = U8(MOD_REG | (1 /* ecx */ << 3) | reg_map[TMP_REG1]);
}
return SLJIT_SUCCESS;
}
@@ -2933,7 +3216,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_flags(struct sljit_compiler *co
inst = (sljit_u8*)ensure_buf(compiler, 1 + 1 + 3 + 3 + 1);
FAIL_IF(!inst);
INC_SIZE(1 + 3 + 3 + 1);
- *inst++ = XCHG_EAX_r + reg_map[TMP_REG1];
+ *inst++ = U8(XCHG_EAX_r | reg_map[TMP_REG1]);
/* Set al to conditional flag. */
*inst++ = GROUP_0F;
*inst++ = cond_set;
@@ -2943,15 +3226,12 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_flags(struct sljit_compiler *co
*inst++ = MOVZX_r_rm8;
*inst++ = MOD_REG | (0 << 3) /* eax */ | 0 /* eax */;
- *inst++ = XCHG_EAX_r + reg_map[TMP_REG1];
+ *inst++ = U8(XCHG_EAX_r | reg_map[TMP_REG1]);
if (GET_OPCODE(op) < SLJIT_ADD)
return emit_mov(compiler, dst, dstw, TMP_REG1, 0);
-#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \
- || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
- compiler->skip_checks = 1;
-#endif
+ SLJIT_SKIP_CHECKS(compiler);
return sljit_emit_op2(compiler, op, dst_save, dstw_save, dst_save, dstw_save, TMP_REG1, 0);
#endif /* SLJIT_CONFIG_X86_64 */
}
@@ -2966,7 +3246,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_cmov(struct sljit_compiler *compil
CHECK(check_sljit_emit_cmov(compiler, type, dst_reg, src, srcw));
#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
- dst_reg &= ~SLJIT_I32_OP;
+ type &= ~SLJIT_32;
if (!sljit_has_cpu_feature(SLJIT_HAS_CMOV) || (dst_reg >= SLJIT_R3 && dst_reg <= SLJIT_S3))
return sljit_emit_cmov_generic(compiler, type, dst_reg, src, srcw);
@@ -2979,8 +3259,8 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_cmov(struct sljit_compiler *compil
CHECK_EXTRA_REGS(src, srcw, (void)0);
#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
- compiler->mode32 = dst_reg & SLJIT_I32_OP;
- dst_reg &= ~SLJIT_I32_OP;
+ compiler->mode32 = type & SLJIT_32;
+ type &= ~SLJIT_32;
#endif
if (SLJIT_UNLIKELY(src & SLJIT_IMM)) {
@@ -2992,7 +3272,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_cmov(struct sljit_compiler *compil
inst = emit_x86_instruction(compiler, 2, dst_reg, 0, src, srcw);
FAIL_IF(!inst);
*inst++ = GROUP_0F;
- *inst = get_jump_code(type & 0xff) - 0x40;
+ *inst = U8(get_jump_code((sljit_uw)type) - 0x40);
return SLJIT_SUCCESS;
}
@@ -3125,9 +3405,9 @@ SLJIT_API_FUNC_ATTRIBUTE void sljit_set_jump_addr(sljit_uw addr, sljit_uw new_ta
SLJIT_UPDATE_WX_FLAGS((void*)addr, (void*)(addr + sizeof(sljit_uw)), 0);
#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
- sljit_unaligned_store_sw((void*)addr, new_target - (addr + 4) - (sljit_uw)executable_offset);
+ sljit_unaligned_store_sw((void*)addr, (sljit_sw)(new_target - (addr + 4) - (sljit_uw)executable_offset));
#else
- sljit_unaligned_store_sw((void*)addr, (sljit_sw) new_target);
+ sljit_unaligned_store_sw((void*)addr, (sljit_sw)new_target);
#endif
SLJIT_UPDATE_WX_FLAGS((void*)addr, (void*)(addr + sizeof(sljit_uw)), 1);
}