summaryrefslogtreecommitdiffstats
path: root/src/3rdparty/pcre2/src/sljit/sljitNativeARM_T2_32.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/3rdparty/pcre2/src/sljit/sljitNativeARM_T2_32.c')
-rw-r--r--src/3rdparty/pcre2/src/sljit/sljitNativeARM_T2_32.c1303
1 files changed, 1149 insertions, 154 deletions
diff --git a/src/3rdparty/pcre2/src/sljit/sljitNativeARM_T2_32.c b/src/3rdparty/pcre2/src/sljit/sljitNativeARM_T2_32.c
index 7d6bac077e..c27c50ddb3 100644
--- a/src/3rdparty/pcre2/src/sljit/sljitNativeARM_T2_32.c
+++ b/src/3rdparty/pcre2/src/sljit/sljitNativeARM_T2_32.c
@@ -49,8 +49,20 @@ static const sljit_u8 reg_map[SLJIT_NUMBER_OF_REGISTERS + 5] = {
0, 0, 1, 2, 3, 11, 10, 9, 8, 7, 6, 5, 4, 13, 12, 14, 15
};
-static const sljit_u8 freg_map[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 3] = {
- 0, 0, 1, 2, 3, 4, 5, 15, 14, 13, 12, 11, 10, 9, 8, 6, 7
+static const sljit_u8 freg_map[((SLJIT_NUMBER_OF_FLOAT_REGISTERS + 2) << 1) + 1] = {
+ 0,
+ 0, 1, 2, 3, 4, 5, 15, 14, 13, 12, 11, 10, 9, 8,
+ 7, 6,
+ 0, 1, 2, 3, 4, 5, 15, 14, 13, 12, 11, 10, 9, 8,
+ 7, 6
+};
+
+static const sljit_u8 freg_ebit_map[((SLJIT_NUMBER_OF_FLOAT_REGISTERS + 2) << 1) + 1] = {
+ 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1
};
#define COPY_BITS(src, from, to, bits) \
@@ -75,13 +87,15 @@ static const sljit_u8 freg_map[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 3] = {
(reg_map[reg1] <= 7 && reg_map[reg2] <= 7 && reg_map[reg3] <= 7)
/* Thumb32 encodings. */
-#define RD4(rd) ((sljit_ins)reg_map[rd] << 8)
-#define RN4(rn) ((sljit_ins)reg_map[rn] << 16)
#define RM4(rm) ((sljit_ins)reg_map[rm])
+#define RD4(rd) ((sljit_ins)reg_map[rd] << 8)
#define RT4(rt) ((sljit_ins)reg_map[rt] << 12)
-#define DD4(dd) ((sljit_ins)freg_map[dd] << 12)
-#define DN4(dn) ((sljit_ins)freg_map[dn] << 16)
-#define DM4(dm) ((sljit_ins)freg_map[dm])
+#define RN4(rn) ((sljit_ins)reg_map[rn] << 16)
+
+#define VM4(vm) (((sljit_ins)freg_map[vm]) | ((sljit_ins)freg_ebit_map[vm] << 5))
+#define VD4(vd) (((sljit_ins)freg_map[vd] << 12) | ((sljit_ins)freg_ebit_map[vd] << 22))
+#define VN4(vn) (((sljit_ins)freg_map[vn] << 16) | ((sljit_ins)freg_ebit_map[vn] << 7))
+
#define IMM5(imm) \
(COPY_BITS(imm, 2, 12, 3) | (((sljit_ins)imm & 0x3) << 6))
#define IMM12(imm) \
@@ -128,9 +142,12 @@ static const sljit_u8 freg_map[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 3] = {
#define EORS 0x4040
#define EOR_W 0xea800000
#define IT 0xbf00
-#define LDR_SP 0x9800
#define LDR 0xf8d00000
+#define LDR_SP 0x9800
#define LDRD 0xe9500000
+#define LDREX 0xe8500f00
+#define LDREXB 0xe8d00f4f
+#define LDREXH 0xe8d00f5f
#define LDRI 0xf8500800
#define LSLS 0x4080
#define LSLSI 0x0000
@@ -160,6 +177,10 @@ static const sljit_u8 freg_map[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 3] = {
#define POP_W 0xe8bd0000
#define PUSH 0xb400
#define PUSH_W 0xe92d0000
+#define REV 0xba00
+#define REV_W 0xfa90f080
+#define REV16 0xba40
+#define REV16_W 0xfa90f090
#define RBIT 0xfa90f0a0
#define RORS 0x41c0
#define ROR_W 0xfa60f000
@@ -171,8 +192,11 @@ static const sljit_u8 freg_map[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 3] = {
#define SBC_W 0xeb600000
#define SDIV 0xfb90f0f0
#define SMULL 0xfb800000
-#define STRD 0xe9400000
#define STR_SP 0x9000
+#define STRD 0xe9400000
+#define STREX 0xe8400000
+#define STREXB 0xe8c00f40
+#define STREXH 0xe8c00f50
#define SUBS 0x1a00
#define SUBSI3 0x1e00
#define SUBSI8 0x3800
@@ -195,23 +219,57 @@ static const sljit_u8 freg_map[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 3] = {
#define UXTH_W 0xfa1ff080
#define VABS_F32 0xeeb00ac0
#define VADD_F32 0xee300a00
+#define VAND 0xef000110
#define VCMP_F32 0xeeb40a40
#define VCVT_F32_S32 0xeeb80ac0
+#define VCVT_F32_U32 0xeeb80a40
#define VCVT_F64_F32 0xeeb70ac0
#define VCVT_S32_F32 0xeebd0ac0
#define VDIV_F32 0xee800a00
+#define VDUP 0xee800b10
+#define VDUP_s 0xffb00c00
+#define VEOR 0xff000110
+#define VLD1 0xf9200000
+#define VLD1_r 0xf9a00c00
+#define VLD1_s 0xf9a00000
#define VLDR_F32 0xed100a00
#define VMOV_F32 0xeeb00a40
#define VMOV 0xee000a10
#define VMOV2 0xec400a10
+#define VMOV_i 0xef800010
+#define VMOV_s 0xee000b10
+#define VMOVN 0xffb20200
#define VMRS 0xeef1fa10
#define VMUL_F32 0xee200a00
#define VNEG_F32 0xeeb10a40
+#define VORR 0xef200110
#define VPOP 0xecbd0b00
#define VPUSH 0xed2d0b00
+#define VSHLL 0xef800a10
+#define VSHR 0xef800010
+#define VSRA 0xef800110
+#define VST1 0xf9000000
+#define VST1_s 0xf9800000
#define VSTR_F32 0xed000a00
#define VSUB_F32 0xee300a40
+#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
+
+static sljit_s32 function_check_is_freg(struct sljit_compiler *compiler, sljit_s32 fr, sljit_s32 is_32)
+{
+ if (compiler->scratches == -1)
+ return 0;
+
+ if (is_32 && fr >= SLJIT_F64_SECOND(SLJIT_FR0))
+ fr -= SLJIT_F64_SECOND(0);
+
+ return (fr >= SLJIT_FR0 && fr < (SLJIT_FR0 + compiler->fscratches))
+ || (fr > (SLJIT_FS0 - compiler->fsaveds) && fr <= SLJIT_FS0)
+ || (fr >= SLJIT_TMP_FREGISTER_BASE && fr < (SLJIT_TMP_FREGISTER_BASE + SLJIT_NUMBER_OF_TEMPORARY_FLOAT_REGISTERS));
+}
+
+#endif /* SLJIT_ARGUMENT_CHECKS */
+
static sljit_s32 push_inst16(struct sljit_compiler *compiler, sljit_ins inst)
{
sljit_u16 *ptr;
@@ -488,18 +546,25 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_has_cpu_feature(sljit_s32 feature_type)
{
switch (feature_type) {
case SLJIT_HAS_FPU:
+ case SLJIT_HAS_F64_AS_F32_PAIR:
+ case SLJIT_HAS_SIMD:
#ifdef SLJIT_IS_FPU_AVAILABLE
- return SLJIT_IS_FPU_AVAILABLE;
+ return (SLJIT_IS_FPU_AVAILABLE) != 0;
#else
/* Available by default. */
return 1;
#endif
+ case SLJIT_SIMD_REGS_ARE_PAIRS:
case SLJIT_HAS_CLZ:
case SLJIT_HAS_CTZ:
+ case SLJIT_HAS_REV:
case SLJIT_HAS_ROT:
case SLJIT_HAS_CMOV:
case SLJIT_HAS_PREFETCH:
+ case SLJIT_HAS_COPY_F32:
+ case SLJIT_HAS_COPY_F64:
+ case SLJIT_HAS_ATOMIC:
return 1;
default:
@@ -615,18 +680,17 @@ static sljit_s32 emit_op_imm(struct sljit_compiler *compiler, sljit_s32 flags, s
switch (flags & 0xffff) {
case SLJIT_CLZ:
case SLJIT_CTZ:
+ case SLJIT_REV:
+ case SLJIT_REV_U16:
+ case SLJIT_REV_S16:
+ case SLJIT_REV_U32:
+ case SLJIT_REV_S32:
case SLJIT_MUL:
/* No form with immediate operand. */
break;
case SLJIT_MOV:
SLJIT_ASSERT(!(flags & SET_FLAGS) && (flags & ARG2_IMM) && arg1 == TMP_REG2);
return load_immediate(compiler, dst, imm);
- case SLJIT_NOT:
- if (!(flags & SET_FLAGS))
- return load_immediate(compiler, dst, ~imm);
- /* Since the flags should be set, we just fallback to the register mode.
- Although some clever things could be done here, "NOT IMM" does not worth the efforts. */
- break;
case SLJIT_ADD:
compiler->status_flags_state = SLJIT_CURRENT_FLAGS_ADD;
imm2 = NEGATE(imm);
@@ -657,9 +721,14 @@ static sljit_s32 emit_op_imm(struct sljit_compiler *compiler, sljit_s32 flags, s
break;
case SLJIT_ADDC:
compiler->status_flags_state = SLJIT_CURRENT_FLAGS_ADD;
- imm = get_imm(imm);
- if (imm != INVALID_IMM)
- return push_inst32(compiler, ADCI | (flags & SET_FLAGS) | RD4(dst) | RN4(reg) | imm);
+ imm2 = get_imm(imm);
+ if (imm2 != INVALID_IMM)
+ return push_inst32(compiler, ADCI | (flags & SET_FLAGS) | RD4(dst) | RN4(reg) | imm2);
+ if (flags & ARG2_IMM) {
+ imm = get_imm(~imm);
+ if (imm != INVALID_IMM)
+ return push_inst32(compiler, SBCI | (flags & SET_FLAGS) | RD4(dst) | RN4(reg) | imm);
+ }
break;
case SLJIT_SUB:
compiler->status_flags_state = SLJIT_CURRENT_FLAGS_SUB;
@@ -712,9 +781,12 @@ static sljit_s32 emit_op_imm(struct sljit_compiler *compiler, sljit_s32 flags, s
compiler->status_flags_state = SLJIT_CURRENT_FLAGS_SUB;
if (flags & ARG1_IMM)
break;
- imm = get_imm(imm);
+ imm2 = get_imm(imm);
+ if (imm2 != INVALID_IMM)
+ return push_inst32(compiler, SBCI | (flags & SET_FLAGS) | RD4(dst) | RN4(reg) | imm2);
+ imm = get_imm(~imm);
if (imm != INVALID_IMM)
- return push_inst32(compiler, SBCI | (flags & SET_FLAGS) | RD4(dst) | RN4(reg) | imm);
+ return push_inst32(compiler, ADCI | (flags & SET_FLAGS) | RD4(dst) | RN4(reg) | imm);
break;
case SLJIT_AND:
imm2 = get_imm(imm);
@@ -733,6 +805,11 @@ static sljit_s32 emit_op_imm(struct sljit_compiler *compiler, sljit_s32 flags, s
return push_inst32(compiler, ORNI | (flags & SET_FLAGS) | RD4(dst) | RN4(reg) | imm);
break;
case SLJIT_XOR:
+ if (imm == (sljit_uw)-1) {
+ if (IS_2_LO_REGS(dst, reg))
+ return push_inst16(compiler, MVNS | RD3(dst) | RN3(reg));
+ return push_inst32(compiler, MVN_W | (flags & SET_FLAGS) | RD4(dst) | RM4(reg));
+ }
imm = get_imm(imm);
if (imm != INVALID_IMM)
return push_inst32(compiler, EORI | (flags & SET_FLAGS) | RD4(dst) | RN4(reg) | imm);
@@ -788,8 +865,7 @@ static sljit_s32 emit_op_imm(struct sljit_compiler *compiler, sljit_s32 flags, s
imm = arg2;
arg2 = (arg1 == TMP_REG1) ? TMP_REG2 : TMP_REG1;
FAIL_IF(load_immediate(compiler, (sljit_s32)arg2, imm));
- }
- else {
+ } else {
imm = arg1;
arg1 = (arg2 == TMP_REG1) ? TMP_REG2 : TMP_REG1;
FAIL_IF(load_immediate(compiler, (sljit_s32)arg1, imm));
@@ -829,11 +905,6 @@ static sljit_s32 emit_op_imm(struct sljit_compiler *compiler, sljit_s32 flags, s
if (IS_2_LO_REGS(dst, arg2))
return push_inst16(compiler, SXTH | RD3(dst) | RN3(arg2));
return push_inst32(compiler, SXTH_W | RD4(dst) | RM4(arg2));
- case SLJIT_NOT:
- SLJIT_ASSERT(arg1 == TMP_REG2);
- if (IS_2_LO_REGS(dst, arg2))
- return push_inst16(compiler, MVNS | RD3(dst) | RN3(arg2));
- return push_inst32(compiler, MVN_W | (flags & SET_FLAGS) | RD4(dst) | RM4(arg2));
case SLJIT_CLZ:
SLJIT_ASSERT(arg1 == TMP_REG2);
return push_inst32(compiler, CLZ | RN4(arg2) | RD4(dst) | RM4(arg2));
@@ -841,6 +912,29 @@ static sljit_s32 emit_op_imm(struct sljit_compiler *compiler, sljit_s32 flags, s
SLJIT_ASSERT(arg1 == TMP_REG2);
FAIL_IF(push_inst32(compiler, RBIT | RN4(arg2) | RD4(dst) | RM4(arg2)));
return push_inst32(compiler, CLZ | RN4(dst) | RD4(dst) | RM4(dst));
+ case SLJIT_REV:
+ case SLJIT_REV_U32:
+ case SLJIT_REV_S32:
+ SLJIT_ASSERT(arg1 == TMP_REG2);
+ if (IS_2_LO_REGS(dst, arg2))
+ return push_inst16(compiler, REV | RD3(dst) | RN3(arg2));
+ return push_inst32(compiler, REV_W | RN4(arg2) | RD4(dst) | RM4(arg2));
+ case SLJIT_REV_U16:
+ case SLJIT_REV_S16:
+ SLJIT_ASSERT(arg1 == TMP_REG2 && dst != TMP_REG2);
+
+ flags &= 0xffff;
+ if (IS_2_LO_REGS(dst, arg2))
+ FAIL_IF(push_inst16(compiler, REV16 | RD3(dst) | RN3(arg2)));
+ else
+ FAIL_IF(push_inst32(compiler, REV16_W | RN4(arg2) | RD4(dst) | RM4(arg2)));
+
+ if (dst == TMP_REG1 || (arg2 == TMP_REG1 && flags == SLJIT_REV_U16))
+ return SLJIT_SUCCESS;
+
+ if (reg_map[dst] <= 7)
+ return push_inst16(compiler, (flags == SLJIT_REV_U16 ? UXTH : SXTH) | RD3(dst) | RN3(dst));
+ return push_inst32(compiler, (flags == SLJIT_REV_U16 ? UXTH_W : SXTH_W) | RD4(dst) | RM4(dst));
case SLJIT_ADD:
compiler->status_flags_state = SLJIT_CURRENT_FLAGS_ADD;
if (IS_3_LO_REGS(dst, arg1, arg2))
@@ -1176,12 +1270,12 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_enter(struct sljit_compiler *compi
}
if (fsaveds + fscratches >= SLJIT_NUMBER_OF_FLOAT_REGISTERS) {
- FAIL_IF(push_inst32(compiler, VPUSH | DD4(SLJIT_FS0) | ((sljit_uw)SLJIT_NUMBER_OF_SAVED_FLOAT_REGISTERS << 1)));
+ FAIL_IF(push_inst32(compiler, VPUSH | VD4(SLJIT_FS0) | ((sljit_uw)SLJIT_NUMBER_OF_SAVED_FLOAT_REGISTERS << 1)));
} else {
if (fsaveds > 0)
- FAIL_IF(push_inst32(compiler, VPUSH | DD4(SLJIT_FS0) | ((sljit_uw)fsaveds << 1)));
+ FAIL_IF(push_inst32(compiler, VPUSH | VD4(SLJIT_FS0) | ((sljit_uw)fsaveds << 1)));
if (fscratches >= SLJIT_FIRST_SAVED_FLOAT_REG)
- FAIL_IF(push_inst32(compiler, VPUSH | DD4(fscratches) | ((sljit_uw)(fscratches - (SLJIT_FIRST_SAVED_FLOAT_REG - 1)) << 1)));
+ FAIL_IF(push_inst32(compiler, VPUSH | VD4(fscratches) | ((sljit_uw)(fscratches - (SLJIT_FIRST_SAVED_FLOAT_REG - 1)) << 1)));
}
}
@@ -1258,17 +1352,17 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_enter(struct sljit_compiler *compi
switch (arg_types & SLJIT_ARG_MASK) {
case SLJIT_ARG_TYPE_F64:
if (offset != old_offset)
- *remap_ptr++ = VMOV_F32 | SLJIT_32 | DD4(offset) | DM4(old_offset);
+ *remap_ptr++ = VMOV_F32 | SLJIT_32 | VD4(offset) | VM4(old_offset);
old_offset++;
offset++;
break;
case SLJIT_ARG_TYPE_F32:
if (f32_offset != 0) {
- *remap_ptr++ = VMOV_F32 | 0x20 | DD4(offset) | DM4(f32_offset);
+ *remap_ptr++ = VMOV_F32 | 0x20 | VD4(offset) | VM4(f32_offset);
f32_offset = 0;
} else {
if (offset != old_offset)
- *remap_ptr++ = VMOV_F32 | DD4(offset) | DM4(old_offset);
+ *remap_ptr++ = VMOV_F32 | VD4(offset) | VM4(old_offset);
f32_offset = old_offset;
old_offset++;
}
@@ -1356,6 +1450,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_set_context(struct sljit_compiler *comp
size = GET_SAVED_REGISTERS_SIZE(scratches, saveds - SLJIT_KEPT_SAVEDS_COUNT(options), 1);
+ /* Doubles are saved, so alignment is unaffected. */
if ((size & SSIZE_OF(sw)) != 0 && (fsaveds > 0 || fscratches >= SLJIT_FIRST_SAVED_FLOAT_REG))
size += SSIZE_OF(sw);
@@ -1401,12 +1496,12 @@ static sljit_s32 emit_stack_frame_release(struct sljit_compiler *compiler, sljit
FAIL_IF(emit_add_sp(compiler, (sljit_uw)local_size));
if (fsaveds + fscratches >= SLJIT_NUMBER_OF_FLOAT_REGISTERS) {
- FAIL_IF(push_inst32(compiler, VPOP | DD4(SLJIT_FS0) | ((sljit_uw)SLJIT_NUMBER_OF_SAVED_FLOAT_REGISTERS << 1)));
+ FAIL_IF(push_inst32(compiler, VPOP | VD4(SLJIT_FS0) | ((sljit_uw)SLJIT_NUMBER_OF_SAVED_FLOAT_REGISTERS << 1)));
} else {
if (fscratches >= SLJIT_FIRST_SAVED_FLOAT_REG)
- FAIL_IF(push_inst32(compiler, VPOP | DD4(fscratches) | ((sljit_uw)(fscratches - (SLJIT_FIRST_SAVED_FLOAT_REG - 1)) << 1)));
+ FAIL_IF(push_inst32(compiler, VPOP | VD4(fscratches) | ((sljit_uw)(fscratches - (SLJIT_FIRST_SAVED_FLOAT_REG - 1)) << 1)));
if (fsaveds > 0)
- FAIL_IF(push_inst32(compiler, VPOP | DD4(SLJIT_FS0) | ((sljit_uw)fsaveds << 1)));
+ FAIL_IF(push_inst32(compiler, VPOP | VD4(SLJIT_FS0) | ((sljit_uw)fsaveds << 1)));
}
local_size = GET_SAVED_REGISTERS_SIZE(compiler->scratches, compiler->saveds, 1) & 0x7;
@@ -1705,22 +1800,22 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op1(struct sljit_compiler *compile
break;
case SLJIT_MOV_U8:
flags = BYTE_SIZE;
- if (src & SLJIT_IMM)
+ if (src == SLJIT_IMM)
srcw = (sljit_u8)srcw;
break;
case SLJIT_MOV_S8:
flags = BYTE_SIZE | SIGNED;
- if (src & SLJIT_IMM)
+ if (src == SLJIT_IMM)
srcw = (sljit_s8)srcw;
break;
case SLJIT_MOV_U16:
flags = HALF_SIZE;
- if (src & SLJIT_IMM)
+ if (src == SLJIT_IMM)
srcw = (sljit_u16)srcw;
break;
case SLJIT_MOV_S16:
flags = HALF_SIZE | SIGNED;
- if (src & SLJIT_IMM)
+ if (src == SLJIT_IMM)
srcw = (sljit_s16)srcw;
break;
default:
@@ -1729,7 +1824,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op1(struct sljit_compiler *compile
break;
}
- if (src & SLJIT_IMM)
+ if (src == SLJIT_IMM)
FAIL_IF(emit_op_imm(compiler, SLJIT_MOV | ARG2_IMM, dst_r, TMP_REG2, (sljit_uw)srcw));
else if (src & SLJIT_MEM) {
FAIL_IF(emit_op_mem(compiler, flags, dst_r, src, srcw, TMP_REG1));
@@ -1745,10 +1840,14 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op1(struct sljit_compiler *compile
return emit_op_mem(compiler, flags | STORE, dst_r, dst, dstw, TMP_REG2);
}
+ SLJIT_COMPILE_ASSERT(WORD_SIZE == 0, word_size_must_be_0);
flags = HAS_FLAGS(op_flags) ? SET_FLAGS : 0;
+ if (op == SLJIT_REV_U16 || op == SLJIT_REV_S16)
+ flags |= HALF_SIZE;
+
if (src & SLJIT_MEM) {
- FAIL_IF(emit_op_mem(compiler, WORD_SIZE, TMP_REG1, src, srcw, TMP_REG1));
+ FAIL_IF(emit_op_mem(compiler, flags, TMP_REG1, src, srcw, TMP_REG1));
src = TMP_REG1;
}
@@ -1778,7 +1877,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op2(struct sljit_compiler *compile
if (dst == TMP_REG1)
flags |= UNUSED_RETURN;
- if (src1 & SLJIT_IMM)
+ if (src1 == SLJIT_IMM)
flags |= ARG1_IMM;
else if (src1 & SLJIT_MEM) {
emit_op_mem(compiler, WORD_SIZE, TMP_REG1, src1, src1w, TMP_REG1);
@@ -1787,7 +1886,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op2(struct sljit_compiler *compile
else
src1w = src1;
- if (src2 & SLJIT_IMM)
+ if (src2 == SLJIT_IMM)
flags |= ARG2_IMM;
else if (src2 & SLJIT_MEM) {
src2_reg = (!(flags & ARG1_IMM) && (src1w == TMP_REG1)) ? TMP_REG2 : TMP_REG1;
@@ -1816,68 +1915,60 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op2u(struct sljit_compiler *compil
}
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_shift_into(struct sljit_compiler *compiler, sljit_s32 op,
- sljit_s32 src_dst,
- sljit_s32 src1, sljit_sw src1w,
- sljit_s32 src2, sljit_sw src2w)
+ sljit_s32 dst_reg,
+ sljit_s32 src1_reg,
+ sljit_s32 src2_reg,
+ sljit_s32 src3, sljit_sw src3w)
{
sljit_s32 is_left;
CHECK_ERROR();
- CHECK(check_sljit_emit_shift_into(compiler, op, src_dst, src1, src1w, src2, src2w));
+ CHECK(check_sljit_emit_shift_into(compiler, op, dst_reg, src1_reg, src2_reg, src3, src3w));
op = GET_OPCODE(op);
is_left = (op == SLJIT_SHL || op == SLJIT_MSHL);
- if (src_dst == src1) {
+ if (src1_reg == src2_reg) {
SLJIT_SKIP_CHECKS(compiler);
- return sljit_emit_op2(compiler, is_left ? SLJIT_ROTL : SLJIT_ROTR, src_dst, 0, src_dst, 0, src2, src2w);
+ return sljit_emit_op2(compiler, is_left ? SLJIT_ROTL : SLJIT_ROTR, dst_reg, 0, src1_reg, 0, src3, src3w);
}
- ADJUST_LOCAL_OFFSET(src1, src1w);
- ADJUST_LOCAL_OFFSET(src2, src2w);
+ ADJUST_LOCAL_OFFSET(src3, src3w);
- if (src2 & SLJIT_IMM) {
- src2w &= 0x1f;
+ if (src3 == SLJIT_IMM) {
+ src3w &= 0x1f;
- if (src2w == 0)
+ if (src3w == 0)
return SLJIT_SUCCESS;
- } else if (src2 & SLJIT_MEM) {
- FAIL_IF(emit_op_mem(compiler, WORD_SIZE, TMP_REG2, src2, src2w, TMP_REG2));
- src2 = TMP_REG2;
- }
- if (src1 & SLJIT_MEM) {
- FAIL_IF(emit_op_mem(compiler, WORD_SIZE, TMP_REG1, src1, src1w, TMP_REG1));
- src1 = TMP_REG1;
- } else if (src1 & SLJIT_IMM) {
- FAIL_IF(load_immediate(compiler, TMP_REG1, (sljit_uw)src1w));
- src1 = TMP_REG1;
- }
-
- if (src2 & SLJIT_IMM) {
- if (reg_map[src_dst] <= 7)
- FAIL_IF(push_inst16(compiler, (is_left ? LSLSI : LSRSI) | RD3(src_dst) | RN3(src_dst) | ((sljit_ins)src2w << 6)));
+ if (IS_2_LO_REGS(dst_reg, src1_reg))
+ FAIL_IF(push_inst16(compiler, (is_left ? LSLSI : LSRSI) | RD3(dst_reg) | RN3(src1_reg) | ((sljit_ins)src3w << 6)));
else
- FAIL_IF(push_inst32(compiler, (is_left ? LSL_WI : LSR_WI) | RD4(src_dst) | RM4(src_dst) | IMM5(src2w)));
+ FAIL_IF(push_inst32(compiler, (is_left ? LSL_WI : LSR_WI) | RD4(dst_reg) | RM4(src1_reg) | IMM5(src3w)));
- src2w = (src2w ^ 0x1f) + 1;
- return push_inst32(compiler, ORR_W | RD4(src_dst) | RN4(src_dst) | RM4(src1) | (is_left ? 0x10 : 0x0) | IMM5(src2w));
+ src3w = (src3w ^ 0x1f) + 1;
+ return push_inst32(compiler, ORR_W | RD4(dst_reg) | RN4(dst_reg) | RM4(src2_reg) | (is_left ? 0x10 : 0x0) | IMM5(src3w));
}
- if (op == SLJIT_MSHL || op == SLJIT_MLSHR) {
- FAIL_IF(push_inst32(compiler, ANDI | RD4(TMP_REG2) | RN4(src2) | 0x1f));
- src2 = TMP_REG2;
+ if (src3 & SLJIT_MEM) {
+ FAIL_IF(emit_op_mem(compiler, WORD_SIZE, TMP_REG2, src3, src3w, TMP_REG2));
+ src3 = TMP_REG2;
}
- if (IS_2_LO_REGS(src_dst, src2))
- FAIL_IF(push_inst16(compiler, (is_left ? LSLS : LSRS) | RD3(src_dst) | RN3(src2)));
+ if (op == SLJIT_MSHL || op == SLJIT_MLSHR || dst_reg == src3) {
+ FAIL_IF(push_inst32(compiler, ANDI | RD4(TMP_REG2) | RN4(src3) | 0x1f));
+ src3 = TMP_REG2;
+ }
+
+ if (dst_reg == src1_reg && IS_2_LO_REGS(dst_reg, src3))
+ FAIL_IF(push_inst16(compiler, (is_left ? LSLS : LSRS) | RD3(dst_reg) | RN3(src3)));
else
- FAIL_IF(push_inst32(compiler, (is_left ? LSL_W : LSR_W) | RD4(src_dst) | RN4(src_dst) | RM4(src2)));
+ FAIL_IF(push_inst32(compiler, (is_left ? LSL_W : LSR_W) | RD4(dst_reg) | RN4(src1_reg) | RM4(src3)));
- FAIL_IF(push_inst32(compiler, (is_left ? LSR_WI : LSL_WI) | RD4(TMP_REG1) | RM4(src1) | (1 << 6)));
- FAIL_IF(push_inst32(compiler, EORI | RD4(TMP_REG2) | RN4(src2) | 0x1f));
+ FAIL_IF(push_inst32(compiler, (is_left ? LSR_WI : LSL_WI) | RD4(TMP_REG1) | RM4(src2_reg) | (1 << 6)));
+ FAIL_IF(push_inst32(compiler, EORI | RD4(TMP_REG2) | RN4(src3) | 0x1f));
FAIL_IF(push_inst32(compiler, (is_left ? LSR_W : LSL_W) | RD4(TMP_REG1) | RN4(TMP_REG1) | RM4(TMP_REG2)));
- return push_inst32(compiler, ORR_W | RD4(src_dst) | RN4(src_dst) | RM4(TMP_REG1));
+ return push_inst32(compiler, ORR_W | RD4(dst_reg) | RN4(dst_reg) | RM4(TMP_REG1));
}
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_src(struct sljit_compiler *compiler, sljit_s32 op,
@@ -1909,16 +2000,60 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_src(struct sljit_compiler *comp
return SLJIT_SUCCESS;
}
-SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_get_register_index(sljit_s32 reg)
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_dst(struct sljit_compiler *compiler, sljit_s32 op,
+ sljit_s32 dst, sljit_sw dstw)
{
- CHECK_REG_INDEX(check_sljit_get_register_index(reg));
- return reg_map[reg];
+ sljit_s32 size, dst_r;
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_op_dst(compiler, op, dst, dstw));
+ ADJUST_LOCAL_OFFSET(dst, dstw);
+
+ switch (op) {
+ case SLJIT_FAST_ENTER:
+ SLJIT_ASSERT(reg_map[TMP_REG2] == 14);
+
+ if (FAST_IS_REG(dst))
+ return push_inst16(compiler, MOV | SET_REGS44(dst, TMP_REG2));
+ break;
+ case SLJIT_GET_RETURN_ADDRESS:
+ size = GET_SAVED_REGISTERS_SIZE(compiler->scratches, compiler->saveds - SLJIT_KEPT_SAVEDS_COUNT(compiler->options), 0);
+
+ if (compiler->fsaveds > 0 || compiler->fscratches >= SLJIT_FIRST_SAVED_FLOAT_REG) {
+ /* The size of pc is not added above. */
+ if ((size & SSIZE_OF(sw)) == 0)
+ size += SSIZE_OF(sw);
+
+ size += GET_SAVED_FLOAT_REGISTERS_SIZE(compiler->fscratches, compiler->fsaveds, f64);
+ }
+
+ SLJIT_ASSERT(((compiler->local_size + size + SSIZE_OF(sw)) & 0x7) == 0);
+
+ dst_r = FAST_IS_REG(dst) ? dst : TMP_REG2;
+ FAIL_IF(emit_op_mem(compiler, WORD_SIZE, dst_r, SLJIT_MEM1(SLJIT_SP), compiler->local_size + size, TMP_REG1));
+ break;
+ }
+
+ if (dst & SLJIT_MEM)
+ return emit_op_mem(compiler, WORD_SIZE | STORE, TMP_REG2, dst, dstw, TMP_REG1);
+
+ return SLJIT_SUCCESS;
}
-SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_get_float_register_index(sljit_s32 reg)
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_get_register_index(sljit_s32 type, sljit_s32 reg)
{
- CHECK_REG_INDEX(check_sljit_get_float_register_index(reg));
- return (freg_map[reg] << 1);
+ CHECK_REG_INDEX(check_sljit_get_register_index(type, reg));
+
+ if (type == SLJIT_GP_REGISTER)
+ return reg_map[reg];
+
+ if (type == SLJIT_FLOAT_REGISTER || type == SLJIT_SIMD_REG_64)
+ return freg_map[reg];
+
+ if (type != SLJIT_SIMD_REG_128)
+ return freg_map[reg] & ~0x1;
+
+ return -1;
}
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_custom(struct sljit_compiler *compiler,
@@ -1954,35 +2089,35 @@ static sljit_s32 emit_fop_mem(struct sljit_compiler *compiler, sljit_s32 flags,
if ((arg & REG_MASK) && (argw & 0x3) == 0) {
if (!(argw & ~0x3fc))
- return push_inst32(compiler, inst | 0x800000 | RN4(arg & REG_MASK) | DD4(reg) | ((sljit_uw)argw >> 2));
+ return push_inst32(compiler, inst | 0x800000 | RN4(arg & REG_MASK) | VD4(reg) | ((sljit_uw)argw >> 2));
if (!(-argw & ~0x3fc))
- return push_inst32(compiler, inst | RN4(arg & REG_MASK) | DD4(reg) | ((sljit_uw)-argw >> 2));
+ return push_inst32(compiler, inst | RN4(arg & REG_MASK) | VD4(reg) | ((sljit_uw)-argw >> 2));
}
if (arg & REG_MASK) {
if (emit_set_delta(compiler, TMP_REG1, arg & REG_MASK, argw) != SLJIT_ERR_UNSUPPORTED) {
FAIL_IF(compiler->error);
- return push_inst32(compiler, inst | 0x800000 | RN4(TMP_REG1) | DD4(reg));
+ return push_inst32(compiler, inst | 0x800000 | RN4(TMP_REG1) | VD4(reg));
}
imm = get_imm((sljit_uw)argw & ~(sljit_uw)0x3fc);
if (imm != INVALID_IMM) {
FAIL_IF(push_inst32(compiler, ADD_WI | RD4(TMP_REG1) | RN4(arg & REG_MASK) | imm));
- return push_inst32(compiler, inst | 0x800000 | RN4(TMP_REG1) | DD4(reg) | (((sljit_uw)argw & 0x3fc) >> 2));
+ return push_inst32(compiler, inst | 0x800000 | RN4(TMP_REG1) | VD4(reg) | (((sljit_uw)argw & 0x3fc) >> 2));
}
imm = get_imm((sljit_uw)-argw & ~(sljit_uw)0x3fc);
if (imm != INVALID_IMM) {
argw = -argw;
FAIL_IF(push_inst32(compiler, SUB_WI | RD4(TMP_REG1) | RN4(arg & REG_MASK) | imm));
- return push_inst32(compiler, inst | RN4(TMP_REG1) | DD4(reg) | (((sljit_uw)argw & 0x3fc) >> 2));
+ return push_inst32(compiler, inst | RN4(TMP_REG1) | VD4(reg) | (((sljit_uw)argw & 0x3fc) >> 2));
}
}
FAIL_IF(load_immediate(compiler, TMP_REG1, (sljit_uw)argw));
if (arg & REG_MASK)
FAIL_IF(push_inst16(compiler, ADD | SET_REGS44(TMP_REG1, (arg & REG_MASK))));
- return push_inst32(compiler, inst | 0x800000 | RN4(TMP_REG1) | DD4(reg));
+ return push_inst32(compiler, inst | 0x800000 | RN4(TMP_REG1) | VD4(reg));
}
static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_sw_from_f64(struct sljit_compiler *compiler, sljit_s32 op,
@@ -1996,41 +2131,53 @@ static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_sw_from_f64(struct sljit_comp
src = TMP_FREG1;
}
- FAIL_IF(push_inst32(compiler, VCVT_S32_F32 | (op & SLJIT_32) | DD4(TMP_FREG1) | DM4(src)));
+ FAIL_IF(push_inst32(compiler, VCVT_S32_F32 | (op & SLJIT_32) | VD4(TMP_FREG1) | VM4(src)));
if (FAST_IS_REG(dst))
- return push_inst32(compiler, VMOV | (1 << 20) | RT4(dst) | DN4(TMP_FREG1));
+ return push_inst32(compiler, VMOV | (1 << 20) | RT4(dst) | VN4(TMP_FREG1));
/* Store the integer value from a VFP register. */
return emit_fop_mem(compiler, 0, TMP_FREG1, dst, dstw);
}
-static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_f64_from_sw(struct sljit_compiler *compiler, sljit_s32 op,
+static sljit_s32 sljit_emit_fop1_conv_f64_from_w(struct sljit_compiler *compiler, sljit_ins ins,
sljit_s32 dst, sljit_sw dstw,
sljit_s32 src, sljit_sw srcw)
{
sljit_s32 dst_r = FAST_IS_REG(dst) ? dst : TMP_FREG1;
- op ^= SLJIT_32;
-
if (FAST_IS_REG(src))
- FAIL_IF(push_inst32(compiler, VMOV | RT4(src) | DN4(TMP_FREG1)));
+ FAIL_IF(push_inst32(compiler, VMOV | RT4(src) | VN4(TMP_FREG1)));
else if (src & SLJIT_MEM) {
/* Load the integer value into a VFP register. */
FAIL_IF(emit_fop_mem(compiler, FPU_LOAD, TMP_FREG1, src, srcw));
}
else {
FAIL_IF(load_immediate(compiler, TMP_REG1, (sljit_uw)srcw));
- FAIL_IF(push_inst32(compiler, VMOV | RT4(TMP_REG1) | DN4(TMP_FREG1)));
+ FAIL_IF(push_inst32(compiler, VMOV | RT4(TMP_REG1) | VN4(TMP_FREG1)));
}
- FAIL_IF(push_inst32(compiler, VCVT_F32_S32 | (op & SLJIT_32) | DD4(dst_r) | DM4(TMP_FREG1)));
+ FAIL_IF(push_inst32(compiler, ins | VD4(dst_r) | VM4(TMP_FREG1)));
if (dst & SLJIT_MEM)
- return emit_fop_mem(compiler, (op & SLJIT_32), TMP_FREG1, dst, dstw);
+ return emit_fop_mem(compiler, (ins & SLJIT_32), TMP_FREG1, dst, dstw);
return SLJIT_SUCCESS;
}
+static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_f64_from_sw(struct sljit_compiler *compiler, sljit_s32 op,
+ sljit_s32 dst, sljit_sw dstw,
+ sljit_s32 src, sljit_sw srcw)
+{
+ return sljit_emit_fop1_conv_f64_from_w(compiler, VCVT_F32_S32 | (~op & SLJIT_32), dst, dstw, src, srcw);
+}
+
+static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_f64_from_uw(struct sljit_compiler *compiler, sljit_s32 op,
+ sljit_s32 dst, sljit_sw dstw,
+ sljit_s32 src, sljit_sw srcw)
+{
+ return sljit_emit_fop1_conv_f64_from_w(compiler, VCVT_F32_U32 | (~op & SLJIT_32), dst, dstw, src, srcw);
+}
+
static SLJIT_INLINE sljit_s32 sljit_emit_fop1_cmp(struct sljit_compiler *compiler, sljit_s32 op,
sljit_s32 src1, sljit_sw src1w,
sljit_s32 src2, sljit_sw src2w)
@@ -2038,17 +2185,23 @@ static SLJIT_INLINE sljit_s32 sljit_emit_fop1_cmp(struct sljit_compiler *compile
op ^= SLJIT_32;
if (src1 & SLJIT_MEM) {
- emit_fop_mem(compiler, (op & SLJIT_32) | FPU_LOAD, TMP_FREG1, src1, src1w);
+ FAIL_IF(emit_fop_mem(compiler, (op & SLJIT_32) | FPU_LOAD, TMP_FREG1, src1, src1w));
src1 = TMP_FREG1;
}
if (src2 & SLJIT_MEM) {
- emit_fop_mem(compiler, (op & SLJIT_32) | FPU_LOAD, TMP_FREG2, src2, src2w);
+ FAIL_IF(emit_fop_mem(compiler, (op & SLJIT_32) | FPU_LOAD, TMP_FREG2, src2, src2w));
src2 = TMP_FREG2;
}
- FAIL_IF(push_inst32(compiler, VCMP_F32 | (op & SLJIT_32) | DD4(src1) | DM4(src2)));
- return push_inst32(compiler, VMRS);
+ FAIL_IF(push_inst32(compiler, VCMP_F32 | (op & SLJIT_32) | VD4(src1) | VM4(src2)));
+ FAIL_IF(push_inst32(compiler, VMRS));
+
+ if (GET_FLAG_TYPE(op) != SLJIT_UNORDERED_OR_EQUAL)
+ return SLJIT_SUCCESS;
+
+ FAIL_IF(push_inst16(compiler, IT | (0x6 << 4) | 0x8));
+ return push_inst16(compiler, CMP /* Rm, Rn = r0 */);
}
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop1(struct sljit_compiler *compiler, sljit_s32 op,
@@ -2068,7 +2221,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop1(struct sljit_compiler *compil
op ^= SLJIT_32;
if (src & SLJIT_MEM) {
- emit_fop_mem(compiler, (op & SLJIT_32) | FPU_LOAD, dst_r, src, srcw);
+ FAIL_IF(emit_fop_mem(compiler, (op & SLJIT_32) | FPU_LOAD, dst_r, src, srcw));
src = dst_r;
}
@@ -2076,19 +2229,19 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop1(struct sljit_compiler *compil
case SLJIT_MOV_F64:
if (src != dst_r) {
if (dst_r != TMP_FREG1)
- FAIL_IF(push_inst32(compiler, VMOV_F32 | (op & SLJIT_32) | DD4(dst_r) | DM4(src)));
+ FAIL_IF(push_inst32(compiler, VMOV_F32 | (op & SLJIT_32) | VD4(dst_r) | VM4(src)));
else
dst_r = src;
}
break;
case SLJIT_NEG_F64:
- FAIL_IF(push_inst32(compiler, VNEG_F32 | (op & SLJIT_32) | DD4(dst_r) | DM4(src)));
+ FAIL_IF(push_inst32(compiler, VNEG_F32 | (op & SLJIT_32) | VD4(dst_r) | VM4(src)));
break;
case SLJIT_ABS_F64:
- FAIL_IF(push_inst32(compiler, VABS_F32 | (op & SLJIT_32) | DD4(dst_r) | DM4(src)));
+ FAIL_IF(push_inst32(compiler, VABS_F32 | (op & SLJIT_32) | VD4(dst_r) | VM4(src)));
break;
case SLJIT_CONV_F64_FROM_F32:
- FAIL_IF(push_inst32(compiler, VCVT_F64_F32 | (op & SLJIT_32) | DD4(dst_r) | DM4(src)));
+ FAIL_IF(push_inst32(compiler, VCVT_F64_F32 | (op & SLJIT_32) | VD4(dst_r) | VM4(src)));
op ^= SLJIT_32;
break;
}
@@ -2115,27 +2268,33 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop2(struct sljit_compiler *compil
dst_r = FAST_IS_REG(dst) ? dst : TMP_FREG1;
if (src1 & SLJIT_MEM) {
- emit_fop_mem(compiler, (op & SLJIT_32) | FPU_LOAD, TMP_FREG1, src1, src1w);
+ FAIL_IF(emit_fop_mem(compiler, (op & SLJIT_32) | FPU_LOAD, TMP_FREG1, src1, src1w));
src1 = TMP_FREG1;
}
if (src2 & SLJIT_MEM) {
- emit_fop_mem(compiler, (op & SLJIT_32) | FPU_LOAD, TMP_FREG2, src2, src2w);
+ FAIL_IF(emit_fop_mem(compiler, (op & SLJIT_32) | FPU_LOAD, TMP_FREG2, src2, src2w));
src2 = TMP_FREG2;
}
switch (GET_OPCODE(op)) {
case SLJIT_ADD_F64:
- FAIL_IF(push_inst32(compiler, VADD_F32 | (op & SLJIT_32) | DD4(dst_r) | DN4(src1) | DM4(src2)));
+ FAIL_IF(push_inst32(compiler, VADD_F32 | (op & SLJIT_32) | VD4(dst_r) | VN4(src1) | VM4(src2)));
break;
case SLJIT_SUB_F64:
- FAIL_IF(push_inst32(compiler, VSUB_F32 | (op & SLJIT_32) | DD4(dst_r) | DN4(src1) | DM4(src2)));
+ FAIL_IF(push_inst32(compiler, VSUB_F32 | (op & SLJIT_32) | VD4(dst_r) | VN4(src1) | VM4(src2)));
break;
case SLJIT_MUL_F64:
- FAIL_IF(push_inst32(compiler, VMUL_F32 | (op & SLJIT_32) | DD4(dst_r) | DN4(src1) | DM4(src2)));
+ FAIL_IF(push_inst32(compiler, VMUL_F32 | (op & SLJIT_32) | VD4(dst_r) | VN4(src1) | VM4(src2)));
break;
case SLJIT_DIV_F64:
- FAIL_IF(push_inst32(compiler, VDIV_F32 | (op & SLJIT_32) | DD4(dst_r) | DN4(src1) | DM4(src2)));
+ FAIL_IF(push_inst32(compiler, VDIV_F32 | (op & SLJIT_32) | VD4(dst_r) | VN4(src1) | VM4(src2)));
break;
+ case SLJIT_COPYSIGN_F64:
+ FAIL_IF(push_inst32(compiler, VMOV | (1 << 20) | VN4(src2) | RT4(TMP_REG1) | ((op & SLJIT_32) ? (1 << 7) : 0)));
+ FAIL_IF(push_inst32(compiler, VABS_F32 | (op & SLJIT_32) | VD4(dst_r) | VM4(src1)));
+ FAIL_IF(push_inst32(compiler, CMPI_W | RN4(TMP_REG1) | 0));
+ FAIL_IF(push_inst16(compiler, IT | (0xb << 4) | 0x8));
+ return push_inst32(compiler, VNEG_F32 | (op & SLJIT_32) | VD4(dst_r) | VM4(dst_r));
}
if (!(dst & SLJIT_MEM))
@@ -2143,23 +2302,99 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop2(struct sljit_compiler *compil
return emit_fop_mem(compiler, (op & SLJIT_32), TMP_FREG1, dst, dstw);
}
-/* --------------------------------------------------------------------- */
-/* Other instructions */
-/* --------------------------------------------------------------------- */
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fset32(struct sljit_compiler *compiler,
+ sljit_s32 freg, sljit_f32 value)
+{
+#if defined(__ARM_NEON) && __ARM_NEON
+ sljit_u32 exp;
+ sljit_ins ins;
+#endif /* NEON */
+ union {
+ sljit_u32 imm;
+ sljit_f32 value;
+ } u;
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_fset32(compiler, freg, value));
-SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fast_enter(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw dstw)
+ u.value = value;
+
+#if defined(__ARM_NEON) && __ARM_NEON
+ if ((u.imm << (32 - 19)) == 0) {
+ exp = (u.imm >> (23 + 2)) & 0x3f;
+
+ if (exp == 0x20 || exp == 0x1f) {
+ ins = ((u.imm >> 24) & 0x80) | ((u.imm >> 19) & 0x7f);
+ return push_inst32(compiler, (VMOV_F32 ^ (1 << 6)) | ((ins & 0xf0) << 12) | VD4(freg) | (ins & 0xf));
+ }
+ }
+#endif /* NEON */
+
+ FAIL_IF(load_immediate(compiler, TMP_REG1, u.imm));
+ return push_inst32(compiler, VMOV | VN4(freg) | RT4(TMP_REG1));
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fset64(struct sljit_compiler *compiler,
+ sljit_s32 freg, sljit_f64 value)
{
+#if defined(__ARM_NEON) && __ARM_NEON
+ sljit_u32 exp;
+ sljit_ins ins;
+#endif /* NEON */
+ union {
+ sljit_u32 imm[2];
+ sljit_f64 value;
+ } u;
+
CHECK_ERROR();
- CHECK(check_sljit_emit_fast_enter(compiler, dst, dstw));
- ADJUST_LOCAL_OFFSET(dst, dstw);
+ CHECK(check_sljit_emit_fset64(compiler, freg, value));
- SLJIT_ASSERT(reg_map[TMP_REG2] == 14);
+ u.value = value;
- if (FAST_IS_REG(dst))
- return push_inst16(compiler, MOV | SET_REGS44(dst, TMP_REG2));
+#if defined(__ARM_NEON) && __ARM_NEON
+ if (u.imm[0] == 0 && (u.imm[1] << (64 - 48)) == 0) {
+ exp = (u.imm[1] >> ((52 - 32) + 2)) & 0x1ff;
- /* Memory. */
- return emit_op_mem(compiler, WORD_SIZE | STORE, TMP_REG2, dst, dstw, TMP_REG1);
+ if (exp == 0x100 || exp == 0xff) {
+ ins = ((u.imm[1] >> (56 - 32)) & 0x80) | ((u.imm[1] >> (48 - 32)) & 0x7f);
+ return push_inst32(compiler, (VMOV_F32 ^ (1 << 6)) | (1 << 8) | ((ins & 0xf0) << 12) | VD4(freg) | (ins & 0xf));
+ }
+ }
+#endif /* NEON */
+
+ FAIL_IF(load_immediate(compiler, TMP_REG1, u.imm[0]));
+ if (u.imm[0] == u.imm[1])
+ return push_inst32(compiler, VMOV2 | RN4(TMP_REG1) | RT4(TMP_REG1) | VM4(freg));
+
+ FAIL_IF(load_immediate(compiler, TMP_REG2, u.imm[1]));
+ return push_inst32(compiler, VMOV2 | RN4(TMP_REG2) | RT4(TMP_REG1) | VM4(freg));
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fcopy(struct sljit_compiler *compiler, sljit_s32 op,
+ sljit_s32 freg, sljit_s32 reg)
+{
+ sljit_s32 reg2;
+ sljit_ins inst;
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_fcopy(compiler, op, freg, reg));
+
+ if (reg & REG_PAIR_MASK) {
+ reg2 = REG_PAIR_SECOND(reg);
+ reg = REG_PAIR_FIRST(reg);
+
+ inst = VMOV2 | RN4(reg) | RT4(reg2) | VM4(freg);
+ } else {
+ inst = VMOV | VN4(freg) | RT4(reg);
+
+ if (!(op & SLJIT_32))
+ inst |= 1 << 7;
+ }
+
+ if (GET_OPCODE(op) == SLJIT_COPY_FROM_F64)
+ inst |= 1 << 20;
+
+ return push_inst32(compiler, inst);
}
/* --------------------------------------------------------------------- */
@@ -2170,15 +2405,17 @@ static sljit_uw get_cc(struct sljit_compiler *compiler, sljit_s32 type)
{
switch (type) {
case SLJIT_EQUAL:
+ case SLJIT_ATOMIC_STORED:
case SLJIT_F_EQUAL:
case SLJIT_ORDERED_EQUAL:
- case SLJIT_UNORDERED_OR_EQUAL: /* Not supported. */
+ case SLJIT_UNORDERED_OR_EQUAL:
return 0x0;
case SLJIT_NOT_EQUAL:
+ case SLJIT_ATOMIC_NOT_STORED:
case SLJIT_F_NOT_EQUAL:
case SLJIT_UNORDERED_OR_NOT_EQUAL:
- case SLJIT_ORDERED_NOT_EQUAL: /* Not supported. */
+ case SLJIT_ORDERED_NOT_EQUAL:
return 0x1;
case SLJIT_CARRY:
@@ -2453,18 +2690,18 @@ static sljit_s32 hardfloat_call_with_args(struct sljit_compiler *compiler, sljit
switch (arg_types & SLJIT_ARG_MASK) {
case SLJIT_ARG_TYPE_F64:
if (offset != new_offset)
- FAIL_IF(push_inst32(compiler, VMOV_F32 | SLJIT_32 | DD4(new_offset) | DM4(offset)));
+ FAIL_IF(push_inst32(compiler, VMOV_F32 | SLJIT_32 | VD4(new_offset) | VM4(offset)));
new_offset++;
offset++;
break;
case SLJIT_ARG_TYPE_F32:
if (f32_offset != 0) {
- FAIL_IF(push_inst32(compiler, VMOV_F32 | 0x400000 | DD4(f32_offset) | DM4(offset)));
+ FAIL_IF(push_inst32(compiler, VMOV_F32 | 0x400000 | VD4(f32_offset) | VM4(offset)));
f32_offset = 0;
} else {
if (offset != new_offset)
- FAIL_IF(push_inst32(compiler, VMOV_F32 | 0x400000 | DD4(new_offset) | DM4(offset)));
+ FAIL_IF(push_inst32(compiler, VMOV_F32 | 0x400000 | VD4(new_offset) | VM4(offset)));
f32_offset = new_offset;
new_offset++;
}
@@ -2546,7 +2783,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_ijump(struct sljit_compiler *compi
SLJIT_ASSERT(reg_map[TMP_REG1] != 14);
- if (!(src & SLJIT_IMM)) {
+ if (src != SLJIT_IMM) {
if (FAST_IS_REG(src)) {
SLJIT_ASSERT(reg_map[src] != 14);
return push_inst16(compiler, (type <= SLJIT_JUMP ? BX : BLX) | RN3(src));
@@ -2645,8 +2882,8 @@ static SLJIT_INLINE sljit_s32 emit_fmov_before_return(struct sljit_compiler *com
if (FAST_IS_REG(src)) {
if (op & SLJIT_32)
- return push_inst32(compiler, VMOV | (1 << 20) | DN4(src) | RT4(SLJIT_R0));
- return push_inst32(compiler, VMOV2 | (1 << 20) | DM4(src) | RT4(SLJIT_R0) | RN4(SLJIT_R1));
+ return push_inst32(compiler, VMOV | (1 << 20) | VN4(src) | RT4(SLJIT_R0));
+ return push_inst32(compiler, VMOV2 | (1 << 20) | VM4(src) | RT4(SLJIT_R0) | RN4(SLJIT_R1));
}
SLJIT_SKIP_CHECKS(compiler);
@@ -2711,23 +2948,47 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_flags(struct sljit_compiler *co
return push_inst32(compiler, MOV_W | SET_FLAGS | RD4(TMP_REG1) | RM4(dst_r));
}
-SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_cmov(struct sljit_compiler *compiler, sljit_s32 type,
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_select(struct sljit_compiler *compiler, sljit_s32 type,
sljit_s32 dst_reg,
- sljit_s32 src, sljit_sw srcw)
+ sljit_s32 src1, sljit_sw src1w,
+ sljit_s32 src2_reg)
{
sljit_uw cc, tmp;
CHECK_ERROR();
- CHECK(check_sljit_emit_cmov(compiler, type, dst_reg, src, srcw));
+ CHECK(check_sljit_emit_select(compiler, type, dst_reg, src1, src1w, src2_reg));
+
+ ADJUST_LOCAL_OFFSET(src1, src1w);
+
+ if (src2_reg != dst_reg && src1 == dst_reg) {
+ src1 = src2_reg;
+ src1w = 0;
+ src2_reg = dst_reg;
+ type ^= 0x1;
+ }
+
+ if (src1 & SLJIT_MEM) {
+ FAIL_IF(emit_op_mem(compiler, WORD_SIZE, (src2_reg != dst_reg) ? dst_reg : TMP_REG1, src1, src1w, TMP_REG2));
+
+ if (src2_reg != dst_reg) {
+ src1 = src2_reg;
+ src1w = 0;
+ type ^= 0x1;
+ } else {
+ src1 = TMP_REG1;
+ src1w = 0;
+ }
+ } else if (dst_reg != src2_reg)
+ FAIL_IF(push_inst16(compiler, MOV | SET_REGS44(dst_reg, src2_reg)));
cc = get_cc(compiler, type & ~SLJIT_32);
- if (!(src & SLJIT_IMM)) {
+ if (src1 != SLJIT_IMM) {
FAIL_IF(push_inst16(compiler, IT | (cc << 4) | 0x8));
- return push_inst16(compiler, MOV | SET_REGS44(dst_reg, src));
+ return push_inst16(compiler, MOV | SET_REGS44(dst_reg, src1));
}
- tmp = (sljit_uw) srcw;
+ tmp = (sljit_uw)src1w;
if (tmp < 0x10000) {
/* set low 16 bits, set hi 16 bits to 0. */
@@ -2736,13 +2997,13 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_cmov(struct sljit_compiler *compil
| COPY_BITS(tmp, 12, 16, 4) | COPY_BITS(tmp, 11, 26, 1) | COPY_BITS(tmp, 8, 12, 3) | (tmp & 0xff));
}
- tmp = get_imm((sljit_uw)srcw);
+ tmp = get_imm((sljit_uw)src1w);
if (tmp != INVALID_IMM) {
FAIL_IF(push_inst16(compiler, IT | (cc << 4) | 0x8));
return push_inst32(compiler, MOV_WI | RD4(dst_reg) | tmp);
}
- tmp = get_imm(~(sljit_uw)srcw);
+ tmp = get_imm(~(sljit_uw)src1w);
if (tmp != INVALID_IMM) {
FAIL_IF(push_inst16(compiler, IT | (cc << 4) | 0x8));
return push_inst32(compiler, MVN_WI | RD4(dst_reg) | tmp);
@@ -2750,13 +3011,43 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_cmov(struct sljit_compiler *compil
FAIL_IF(push_inst16(compiler, IT | (cc << 4) | ((cc & 0x1) << 3) | 0x4));
- tmp = (sljit_uw) srcw;
+ tmp = (sljit_uw)src1w;
FAIL_IF(push_inst32(compiler, MOVW | RD4(dst_reg)
| COPY_BITS(tmp, 12, 16, 4) | COPY_BITS(tmp, 11, 26, 1) | COPY_BITS(tmp, 8, 12, 3) | (tmp & 0xff)));
return push_inst32(compiler, MOVT | RD4(dst_reg)
| COPY_BITS(tmp, 12 + 16, 16, 4) | COPY_BITS(tmp, 11 + 16, 26, 1) | COPY_BITS(tmp, 8 + 16, 12, 3) | ((tmp & 0xff0000) >> 16));
}
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fselect(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 dst_freg,
+ sljit_s32 src1, sljit_sw src1w,
+ sljit_s32 src2_freg)
+{
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_fselect(compiler, type, dst_freg, src1, src1w, src2_freg));
+
+ ADJUST_LOCAL_OFFSET(src1, src1w);
+
+ type ^= SLJIT_32;
+
+ if (dst_freg != src2_freg) {
+ if (dst_freg == src1) {
+ src1 = src2_freg;
+ src1w = 0;
+ type ^= 0x1;
+ } else
+ FAIL_IF(push_inst32(compiler, VMOV_F32 | (type & SLJIT_32) | VD4(dst_freg) | VM4(src2_freg)));
+ }
+
+ if (src1 & SLJIT_MEM) {
+ FAIL_IF(emit_fop_mem(compiler, (type & SLJIT_32) | FPU_LOAD, TMP_FREG1, src1, src1w));
+ src1 = TMP_FREG1;
+ }
+
+ FAIL_IF(push_inst16(compiler, IT | (get_cc(compiler, type & ~SLJIT_32) << 4) | 0x8));
+ return push_inst32(compiler, VMOV_F32 | (type & SLJIT_32) | VD4(dst_freg) | VM4(src1));
+}
+
SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_mem(struct sljit_compiler *compiler, sljit_s32 type,
sljit_s32 reg,
sljit_s32 mem, sljit_sw memw)
@@ -2770,7 +3061,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_mem(struct sljit_compiler *compile
if (!(reg & REG_PAIR_MASK))
return sljit_emit_mem_unaligned(compiler, type, reg, mem, memw);
- if (type & (SLJIT_MEM_UNALIGNED | SLJIT_MEM_UNALIGNED_16 | SLJIT_MEM_UNALIGNED_32)) {
+ if (type & (SLJIT_MEM_UNALIGNED | SLJIT_MEM_ALIGNED_16 | SLJIT_MEM_ALIGNED_32)) {
if ((mem & REG_MASK) == 0) {
if ((memw & 0xfff) >= (0x1000 - SSIZE_OF(sw))) {
imm = get_imm((sljit_uw)((memw + 0x1000) & ~0xfff));
@@ -2781,7 +3072,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_mem(struct sljit_compiler *compile
imm = get_imm((sljit_uw)(memw & ~0xfff));
if (imm != INVALID_IMM)
- memw &= 0xff;
+ memw &= 0xfff;
}
if (imm == INVALID_IMM) {
@@ -3058,11 +3349,11 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fmem(struct sljit_compiler *compil
CHECK_ERROR();
CHECK(check_sljit_emit_fmem(compiler, type, freg, mem, memw));
- if (type & SLJIT_MEM_UNALIGNED_32)
+ if (type & SLJIT_MEM_ALIGNED_32)
return emit_fop_mem(compiler, ((type ^ SLJIT_32) & SLJIT_32) | ((type & SLJIT_MEM_STORE) ? 0 : FPU_LOAD), freg, mem, memw);
if (type & SLJIT_MEM_STORE) {
- FAIL_IF(push_inst32(compiler, VMOV | (1 << 20) | DN4(freg) | RT4(TMP_REG2)));
+ FAIL_IF(push_inst32(compiler, VMOV | (1 << 20) | VN4(freg) | RT4(TMP_REG2)));
if (type & SLJIT_32)
return emit_op_mem(compiler, WORD_SIZE | STORE, TMP_REG2, mem, memw, TMP_REG1);
@@ -3071,13 +3362,13 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fmem(struct sljit_compiler *compil
mem |= SLJIT_MEM;
FAIL_IF(emit_op_mem(compiler, WORD_SIZE | STORE, TMP_REG2, mem, memw, TMP_REG1));
- FAIL_IF(push_inst32(compiler, VMOV | (1 << 20) | DN4(freg) | 0x80 | RT4(TMP_REG2)));
+ FAIL_IF(push_inst32(compiler, VMOV | (1 << 20) | VN4(freg) | 0x80 | RT4(TMP_REG2)));
return emit_op_mem(compiler, WORD_SIZE | STORE, TMP_REG2, mem, memw + 4, TMP_REG1);
}
if (type & SLJIT_32) {
FAIL_IF(emit_op_mem(compiler, WORD_SIZE, TMP_REG2, mem, memw, TMP_REG1));
- return push_inst32(compiler, VMOV | DN4(freg) | RT4(TMP_REG2));
+ return push_inst32(compiler, VMOV | VN4(freg) | RT4(TMP_REG2));
}
FAIL_IF(update_mem_addr(compiler, &mem, &memw, 0xfff - 4));
@@ -3085,11 +3376,715 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fmem(struct sljit_compiler *compil
FAIL_IF(emit_op_mem(compiler, WORD_SIZE, TMP_REG2, mem, memw, TMP_REG1));
FAIL_IF(emit_op_mem(compiler, WORD_SIZE, TMP_REG1, mem, memw + 4, TMP_REG1));
- return push_inst32(compiler, VMOV2 | DM4(freg) | RT4(TMP_REG2) | RN4(TMP_REG1));
+ return push_inst32(compiler, VMOV2 | VM4(freg) | RT4(TMP_REG2) | RN4(TMP_REG1));
+}
+
+static sljit_s32 sljit_emit_simd_mem_offset(struct sljit_compiler *compiler, sljit_s32 *mem_ptr, sljit_sw memw)
+{
+ sljit_uw imm;
+ sljit_s32 mem = *mem_ptr;
+
+ if (SLJIT_UNLIKELY(mem & OFFS_REG_MASK)) {
+ *mem_ptr = TMP_REG1;
+ return push_inst32(compiler, ADD_W | RD4(TMP_REG1) | RN4(mem & REG_MASK) | RM4(OFFS_REG(mem)) | ((sljit_uw)(memw & 0x3) << 6));
+ }
+
+ if (SLJIT_UNLIKELY(!(mem & REG_MASK))) {
+ *mem_ptr = TMP_REG1;
+ return load_immediate(compiler, TMP_REG1, (sljit_uw)memw);
+ }
+
+ mem &= REG_MASK;
+
+ if (memw == 0) {
+ *mem_ptr = mem;
+ return SLJIT_SUCCESS;
+ }
+
+ *mem_ptr = TMP_REG1;
+ imm = get_imm((sljit_uw)(memw < 0 ? -memw : memw));
+
+ if (imm != INVALID_IMM)
+ return push_inst32(compiler, ((memw < 0) ? SUB_WI : ADD_WI) | RD4(TMP_REG1) | RN4(mem) | imm);
+
+ FAIL_IF(load_immediate(compiler, TMP_REG1, (sljit_uw)memw));
+ return push_inst16(compiler, ADD | SET_REGS44(TMP_REG1, mem));
+}
+
+static SLJIT_INLINE sljit_s32 simd_get_quad_reg_index(sljit_s32 freg)
+{
+ freg += freg & 0x1;
+
+ SLJIT_ASSERT((freg_map[freg] & 0x1) == (freg <= SLJIT_NUMBER_OF_SCRATCH_FLOAT_REGISTERS));
+
+ if (freg <= SLJIT_NUMBER_OF_SCRATCH_FLOAT_REGISTERS)
+ freg--;
+
+ return freg;
+}
+
+#define SLJIT_QUAD_OTHER_HALF(freg) ((((freg) & 0x1) << 1) - 1)
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_simd_mov(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 freg,
+ sljit_s32 srcdst, sljit_sw srcdstw)
+{
+ sljit_s32 reg_size = SLJIT_SIMD_GET_REG_SIZE(type);
+ sljit_s32 elem_size = SLJIT_SIMD_GET_ELEM_SIZE(type);
+ sljit_s32 alignment = SLJIT_SIMD_GET_ELEM2_SIZE(type);
+ sljit_ins ins;
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_simd_mov(compiler, type, freg, srcdst, srcdstw));
+
+ ADJUST_LOCAL_OFFSET(srcdst, srcdstw);
+
+ if (reg_size != 3 && reg_size != 4)
+ return SLJIT_ERR_UNSUPPORTED;
+
+ if ((type & SLJIT_SIMD_FLOAT) && (elem_size < 2 || elem_size > 3))
+ return SLJIT_ERR_UNSUPPORTED;
+
+ if (type & SLJIT_SIMD_TEST)
+ return SLJIT_SUCCESS;
+
+ if (reg_size == 4)
+ freg = simd_get_quad_reg_index(freg);
+
+ if (!(srcdst & SLJIT_MEM)) {
+ if (reg_size == 4)
+ srcdst = simd_get_quad_reg_index(srcdst);
+
+ if (type & SLJIT_SIMD_STORE)
+ ins = VD4(srcdst) | VN4(freg) | VM4(freg);
+ else
+ ins = VD4(freg) | VN4(srcdst) | VM4(srcdst);
+
+ if (reg_size == 4)
+ ins |= (sljit_ins)1 << 6;
+
+ return push_inst32(compiler, VORR | ins);
+ }
+
+ FAIL_IF(sljit_emit_simd_mem_offset(compiler, &srcdst, srcdstw));
+
+ if (elem_size > 3)
+ elem_size = 3;
+
+ ins = ((type & SLJIT_SIMD_STORE) ? VST1 : VLD1) | VD4(freg)
+ | (sljit_ins)((reg_size == 3) ? (0x7 << 8) : (0xa << 8));
+
+ SLJIT_ASSERT(reg_size >= alignment);
+
+ if (alignment == 3)
+ ins |= 0x10;
+ else if (alignment >= 4)
+ ins |= 0x20;
+
+ return push_inst32(compiler, ins | RN4(srcdst) | ((sljit_ins)elem_size) << 6 | 0xf);
+}
+
+static sljit_ins simd_get_imm(sljit_s32 elem_size, sljit_uw value)
+{
+ sljit_ins result;
+
+ if (elem_size > 1 && (sljit_u16)value == (value >> 16)) {
+ elem_size = 1;
+ value = (sljit_u16)value;
+ }
+
+ if (elem_size == 1 && (sljit_u8)value == (value >> 8)) {
+ elem_size = 0;
+ value = (sljit_u8)value;
+ }
+
+ switch (elem_size) {
+ case 0:
+ SLJIT_ASSERT(value <= 0xff);
+ result = 0xe00;
+ break;
+ case 1:
+ SLJIT_ASSERT(value <= 0xffff);
+ result = 0;
+
+ while (1) {
+ if (value <= 0xff) {
+ result |= 0x800;
+ break;
+ }
+
+ if ((value & 0xff) == 0) {
+ value >>= 8;
+ result |= 0xa00;
+ break;
+ }
+
+ if (result != 0)
+ return ~(sljit_ins)0;
+
+ value ^= (sljit_uw)0xffff;
+ result = (1 << 5);
+ }
+ break;
+ default:
+ SLJIT_ASSERT(value <= 0xffffffff);
+ result = 0;
+
+ while (1) {
+ if (value <= 0xff) {
+ result |= 0x000;
+ break;
+ }
+
+ if ((value & ~(sljit_uw)0xff00) == 0) {
+ value >>= 8;
+ result |= 0x200;
+ break;
+ }
+
+ if ((value & ~(sljit_uw)0xff0000) == 0) {
+ value >>= 16;
+ result |= 0x400;
+ break;
+ }
+
+ if ((value & ~(sljit_uw)0xff000000) == 0) {
+ value >>= 24;
+ result |= 0x600;
+ break;
+ }
+
+ if ((value & (sljit_uw)0xff) == 0xff && (value >> 16) == 0) {
+ value >>= 8;
+ result |= 0xc00;
+ break;
+ }
+
+ if ((value & (sljit_uw)0xffff) == 0xffff && (value >> 24) == 0) {
+ value >>= 16;
+ result |= 0xd00;
+ break;
+ }
+
+ if (result != 0)
+ return ~(sljit_ins)0;
+
+ value = ~value;
+ result = (1 << 5);
+ }
+ break;
+ }
+
+ return ((sljit_ins)value & 0xf) | (((sljit_ins)value & 0x70) << 12) | (((sljit_ins)value & 0x80) << 21) | result;
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_simd_replicate(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 freg,
+ sljit_s32 src, sljit_sw srcw)
+{
+ sljit_s32 reg_size = SLJIT_SIMD_GET_REG_SIZE(type);
+ sljit_s32 elem_size = SLJIT_SIMD_GET_ELEM_SIZE(type);
+ sljit_ins ins, imm;
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_simd_replicate(compiler, type, freg, src, srcw));
+
+ ADJUST_LOCAL_OFFSET(src, srcw);
+
+ if (reg_size != 3 && reg_size != 4)
+ return SLJIT_ERR_UNSUPPORTED;
+
+ if ((type & SLJIT_SIMD_FLOAT) ? (elem_size < 2 || elem_size > 3) : (elem_size > 2))
+ return SLJIT_ERR_UNSUPPORTED;
+
+ if (type & SLJIT_SIMD_TEST)
+ return SLJIT_SUCCESS;
+
+ if (reg_size == 4)
+ freg = simd_get_quad_reg_index(freg);
+
+ if (src == SLJIT_IMM && srcw == 0)
+ return push_inst32(compiler, VMOV_i | ((reg_size == 4) ? (1 << 6) : 0) | VD4(freg));
+
+ if (SLJIT_UNLIKELY(elem_size == 3)) {
+ SLJIT_ASSERT(type & SLJIT_SIMD_FLOAT);
+
+ if (src & SLJIT_MEM) {
+ FAIL_IF(emit_fop_mem(compiler, FPU_LOAD | SLJIT_32, freg, src, srcw));
+ src = freg;
+ } else if (freg != src)
+ FAIL_IF(push_inst32(compiler, VORR | VD4(freg) | VN4(src) | VM4(src)));
+
+ freg += SLJIT_QUAD_OTHER_HALF(freg);
+
+ if (freg != src)
+ return push_inst32(compiler, VORR | VD4(freg) | VN4(src) | VM4(src));
+ return SLJIT_SUCCESS;
+ }
+
+ if (src & SLJIT_MEM) {
+ FAIL_IF(sljit_emit_simd_mem_offset(compiler, &src, srcw));
+
+ ins = (sljit_ins)(elem_size << 6);
+
+ if (reg_size == 4)
+ ins |= 1 << 5;
+
+ return push_inst32(compiler, VLD1_r | ins | VD4(freg) | RN4(src) | 0xf);
+ }
+
+ if (type & SLJIT_SIMD_FLOAT) {
+ SLJIT_ASSERT(elem_size == 2);
+ ins = ((sljit_ins)freg_ebit_map[src] << (16 + 2 + 1)) | ((sljit_ins)1 << (16 + 2));
+
+ if (reg_size == 4)
+ ins |= (sljit_ins)1 << 6;
+
+ return push_inst32(compiler, VDUP_s | ins | VD4(freg) | (sljit_ins)freg_map[src]);
+ }
+
+ if (src == SLJIT_IMM) {
+ if (elem_size < 2)
+ srcw &= ((sljit_sw)1 << (((sljit_sw)1 << elem_size) << 3)) - 1;
+
+ imm = simd_get_imm(elem_size, (sljit_uw)srcw);
+
+ if (imm != ~(sljit_ins)0) {
+ if (reg_size == 4)
+ imm |= (sljit_ins)1 << 6;
+
+ return push_inst32(compiler, VMOV_i | imm | VD4(freg));
+ }
+
+ FAIL_IF(load_immediate(compiler, TMP_REG1, (sljit_uw)srcw));
+ src = TMP_REG1;
+ }
+
+ switch (elem_size) {
+ case 0:
+ ins = 1 << 22;
+ break;
+ case 1:
+ ins = 1 << 5;
+ break;
+ default:
+ ins = 0;
+ break;
+ }
+
+ if (reg_size == 4)
+ ins |= (sljit_ins)1 << 21;
+
+ return push_inst32(compiler, VDUP | ins | VN4(freg) | RT4(src));
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_simd_lane_mov(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 freg, sljit_s32 lane_index,
+ sljit_s32 srcdst, sljit_sw srcdstw)
+{
+ sljit_s32 reg_size = SLJIT_SIMD_GET_REG_SIZE(type);
+ sljit_s32 elem_size = SLJIT_SIMD_GET_ELEM_SIZE(type);
+ sljit_ins ins;
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_simd_lane_mov(compiler, type, freg, lane_index, srcdst, srcdstw));
+
+ ADJUST_LOCAL_OFFSET(srcdst, srcdstw);
+
+ if (reg_size != 3 && reg_size != 4)
+ return SLJIT_ERR_UNSUPPORTED;
+
+ if ((type & SLJIT_SIMD_FLOAT) ? (elem_size < 2 || elem_size > 3) : (elem_size > 2))
+ return SLJIT_ERR_UNSUPPORTED;
+
+ if (type & SLJIT_SIMD_TEST)
+ return SLJIT_SUCCESS;
+
+ if (reg_size == 4)
+ freg = simd_get_quad_reg_index(freg);
+
+ if (type & SLJIT_SIMD_LANE_ZERO) {
+ ins = (reg_size == 3) ? 0 : ((sljit_ins)1 << 6);
+
+ if (type & SLJIT_SIMD_FLOAT) {
+ if (elem_size == 3 && !(srcdst & SLJIT_MEM)) {
+ if (lane_index == 1)
+ freg += SLJIT_QUAD_OTHER_HALF(freg);
+
+ if (srcdst != freg)
+ FAIL_IF(push_inst32(compiler, VORR | VD4(freg) | VN4(srcdst) | VM4(srcdst)));
+
+ freg += SLJIT_QUAD_OTHER_HALF(freg);
+ return push_inst32(compiler, VMOV_i | VD4(freg));
+ }
+
+ if (srcdst == freg || (elem_size == 3 && srcdst == (freg + SLJIT_QUAD_OTHER_HALF(freg)))) {
+ FAIL_IF(push_inst32(compiler, VORR | ins | VD4(TMP_FREG2) | VN4(freg) | VM4(freg)));
+ srcdst = TMP_FREG2;
+ srcdstw = 0;
+ }
+ }
+
+ FAIL_IF(push_inst32(compiler, VMOV_i | ins | VD4(freg)));
+ }
+
+ if (reg_size == 4 && lane_index >= (0x8 >> elem_size)) {
+ lane_index -= (0x8 >> elem_size);
+ freg += SLJIT_QUAD_OTHER_HALF(freg);
+ }
+
+ if (srcdst & SLJIT_MEM) {
+ if (elem_size == 3)
+ return emit_fop_mem(compiler, ((type & SLJIT_SIMD_STORE) ? 0 : FPU_LOAD) | SLJIT_32, freg, srcdst, srcdstw);
+
+ FAIL_IF(sljit_emit_simd_mem_offset(compiler, &srcdst, srcdstw));
+
+ lane_index = lane_index << elem_size;
+ ins = (sljit_ins)((elem_size << 10) | (lane_index << 5));
+ return push_inst32(compiler, ((type & SLJIT_SIMD_STORE) ? VST1_s : VLD1_s) | ins | VD4(freg) | RN4(srcdst) | 0xf);
+ }
+
+ if (type & SLJIT_SIMD_FLOAT) {
+ if (elem_size == 3) {
+ if (type & SLJIT_SIMD_STORE)
+ return push_inst32(compiler, VORR | VD4(srcdst) | VN4(freg) | VM4(freg));
+ return push_inst32(compiler, VMOV_F32 | SLJIT_32 | VD4(freg) | VM4(srcdst));
+ }
+
+ if (type & SLJIT_SIMD_STORE) {
+ if (freg_ebit_map[freg] == 0) {
+ if (lane_index == 1)
+ freg = SLJIT_F64_SECOND(freg);
+
+ return push_inst32(compiler, VMOV_F32 | VD4(srcdst) | VM4(freg));
+ }
+
+ FAIL_IF(push_inst32(compiler, VMOV_s | (1 << 20) | ((sljit_ins)lane_index << 21) | VN4(freg) | RT4(TMP_REG1)));
+ return push_inst32(compiler, VMOV | VN4(srcdst) | RT4(TMP_REG1));
+ }
+
+ FAIL_IF(push_inst32(compiler, VMOV | (1 << 20) | VN4(srcdst) | RT4(TMP_REG1)));
+ return push_inst32(compiler, VMOV_s | ((sljit_ins)lane_index << 21) | VN4(freg) | RT4(TMP_REG1));
+ }
+
+ if (srcdst == SLJIT_IMM) {
+ if (elem_size < 2)
+ srcdstw &= ((sljit_sw)1 << (((sljit_sw)1 << elem_size) << 3)) - 1;
+
+ FAIL_IF(load_immediate(compiler, TMP_REG1, (sljit_uw)srcdstw));
+ srcdst = TMP_REG1;
+ }
+
+ if (elem_size == 0)
+ ins = 0x400000;
+ else if (elem_size == 1)
+ ins = 0x20;
+ else
+ ins = 0;
+
+ lane_index = lane_index << elem_size;
+ ins |= (sljit_ins)(((lane_index & 0x4) << 19) | ((lane_index & 0x3) << 5));
+
+ if (type & SLJIT_SIMD_STORE) {
+ ins |= (1 << 20);
+
+ if (elem_size < 2 && !(type & SLJIT_SIMD_LANE_SIGNED))
+ ins |= (1 << 23);
+ }
+
+ return push_inst32(compiler, VMOV_s | ins | VN4(freg) | RT4(srcdst));
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_simd_lane_replicate(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 freg,
+ sljit_s32 src, sljit_s32 src_lane_index)
+{
+ sljit_s32 reg_size = SLJIT_SIMD_GET_REG_SIZE(type);
+ sljit_s32 elem_size = SLJIT_SIMD_GET_ELEM_SIZE(type);
+ sljit_ins ins;
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_simd_lane_replicate(compiler, type, freg, src, src_lane_index));
+
+ if (reg_size != 3 && reg_size != 4)
+ return SLJIT_ERR_UNSUPPORTED;
+
+ if ((type & SLJIT_SIMD_FLOAT) && (elem_size < 2 || elem_size > 3))
+ return SLJIT_ERR_UNSUPPORTED;
+
+ if (type & SLJIT_SIMD_TEST)
+ return SLJIT_SUCCESS;
+
+ if (reg_size == 4) {
+ freg = simd_get_quad_reg_index(freg);
+ src = simd_get_quad_reg_index(src);
+
+ if (src_lane_index >= (0x8 >> elem_size)) {
+ src_lane_index -= (0x8 >> elem_size);
+ src += SLJIT_QUAD_OTHER_HALF(src);
+ }
+ }
+
+ if (elem_size == 3) {
+ if (freg != src)
+ FAIL_IF(push_inst32(compiler, VORR | VD4(freg) | VN4(src) | VM4(src)));
+
+ freg += SLJIT_QUAD_OTHER_HALF(freg);
+
+ if (freg != src)
+ return push_inst32(compiler, VORR | VD4(freg) | VN4(src) | VM4(src));
+ return SLJIT_SUCCESS;
+ }
+
+ ins = ((((sljit_ins)src_lane_index << 1) | 1) << (16 + elem_size));
+
+ if (reg_size == 4)
+ ins |= (sljit_ins)1 << 6;
+
+ return push_inst32(compiler, VDUP_s | ins | VD4(freg) | VM4(src));
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_simd_extend(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 freg,
+ sljit_s32 src, sljit_sw srcw)
+{
+ sljit_s32 reg_size = SLJIT_SIMD_GET_REG_SIZE(type);
+ sljit_s32 elem_size = SLJIT_SIMD_GET_ELEM_SIZE(type);
+ sljit_s32 elem2_size = SLJIT_SIMD_GET_ELEM2_SIZE(type);
+ sljit_s32 dst_reg;
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_simd_extend(compiler, type, freg, src, srcw));
+
+ ADJUST_LOCAL_OFFSET(src, srcw);
+
+ if (reg_size != 3 && reg_size != 4)
+ return SLJIT_ERR_UNSUPPORTED;
+
+ if ((type & SLJIT_SIMD_FLOAT) && (elem_size != 2 || elem2_size != 3))
+ return SLJIT_ERR_UNSUPPORTED;
+
+ if (type & SLJIT_SIMD_TEST)
+ return SLJIT_SUCCESS;
+
+ if (reg_size == 4)
+ freg = simd_get_quad_reg_index(freg);
+
+ if (src & SLJIT_MEM) {
+ FAIL_IF(sljit_emit_simd_mem_offset(compiler, &src, srcw));
+ if (reg_size == 4 && elem2_size - elem_size == 1)
+ FAIL_IF(push_inst32(compiler, VLD1 | (0x7 << 8) | VD4(freg) | RN4(src) | 0xf));
+ else
+ FAIL_IF(push_inst32(compiler, VLD1_s | (sljit_ins)((reg_size - elem2_size + elem_size) << 10) | VD4(freg) | RN4(src) | 0xf));
+ src = freg;
+ } else if (reg_size == 4)
+ src = simd_get_quad_reg_index(src);
+
+ if (!(type & SLJIT_SIMD_FLOAT)) {
+ dst_reg = (reg_size == 4) ? freg : TMP_FREG2;
+
+ do {
+ FAIL_IF(push_inst32(compiler, VSHLL | ((type & SLJIT_SIMD_EXTEND_SIGNED) ? 0 : (1 << 28))
+ | ((sljit_ins)1 << (19 + elem_size)) | VD4(dst_reg) | VM4(src)));
+ src = dst_reg;
+ } while (++elem_size < elem2_size);
+
+ if (dst_reg == TMP_FREG2)
+ return push_inst32(compiler, VORR | VD4(freg) | VN4(TMP_FREG2) | VM4(TMP_FREG2));
+ return SLJIT_SUCCESS;
+ }
+
+ /* No SIMD variant, must use VFP instead. */
+ SLJIT_ASSERT(reg_size == 4);
+
+ if (freg == src) {
+ freg += SLJIT_QUAD_OTHER_HALF(freg);
+ FAIL_IF(push_inst32(compiler, VCVT_F64_F32 | VD4(freg) | VM4(src) | 0x20));
+ freg += SLJIT_QUAD_OTHER_HALF(freg);
+ return push_inst32(compiler, VCVT_F64_F32 | VD4(freg) | VM4(src));
+ }
+
+ FAIL_IF(push_inst32(compiler, VCVT_F64_F32 | VD4(freg) | VM4(src)));
+ freg += SLJIT_QUAD_OTHER_HALF(freg);
+ return push_inst32(compiler, VCVT_F64_F32 | VD4(freg) | VM4(src) | 0x20);
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_simd_sign(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 freg,
+ sljit_s32 dst, sljit_sw dstw)
+{
+ sljit_s32 reg_size = SLJIT_SIMD_GET_REG_SIZE(type);
+ sljit_s32 elem_size = SLJIT_SIMD_GET_ELEM_SIZE(type);
+ sljit_ins ins, imms;
+ sljit_s32 dst_r;
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_simd_sign(compiler, type, freg, dst, dstw));
+
+ ADJUST_LOCAL_OFFSET(dst, dstw);
+
+ if (reg_size != 3 && reg_size != 4)
+ return SLJIT_ERR_UNSUPPORTED;
+
+ if ((type & SLJIT_SIMD_FLOAT) && (elem_size < 2 || elem_size > 3))
+ return SLJIT_ERR_UNSUPPORTED;
+
+ if (type & SLJIT_SIMD_TEST)
+ return SLJIT_SUCCESS;
+
+ switch (elem_size) {
+ case 0:
+ imms = 0x243219;
+ ins = VSHR | (1 << 28) | (0x9 << 16);
+ break;
+ case 1:
+ imms = (reg_size == 4) ? 0x243219 : 0x2231;
+ ins = VSHR | (1 << 28) | (0x11 << 16);
+ break;
+ case 2:
+ imms = (reg_size == 4) ? 0x2231 : 0x21;
+ ins = VSHR | (1 << 28) | (0x21 << 16);
+ break;
+ default:
+ imms = 0x21;
+ ins = VSHR | (1 << 28) | (0x1 << 16) | (1 << 7);
+ break;
+ }
+
+ if (reg_size == 4) {
+ freg = simd_get_quad_reg_index(freg);
+ ins |= (sljit_ins)1 << 6;
+ }
+
+ SLJIT_ASSERT((freg_map[TMP_FREG2] & 0x1) == 0);
+ FAIL_IF(push_inst32(compiler, ins | VD4(TMP_FREG2) | VM4(freg)));
+
+ if (reg_size == 4 && elem_size > 0)
+ FAIL_IF(push_inst32(compiler, VMOVN | ((sljit_ins)(elem_size - 1) << 18) | VD4(TMP_FREG2) | VM4(TMP_FREG2)));
+
+ ins = (reg_size == 4 && elem_size == 0) ? (1 << 6) : 0;
+
+ while (imms >= 0x100) {
+ FAIL_IF(push_inst32(compiler, VSRA | (1 << 28) | ins | ((imms & 0xff) << 16) | VD4(TMP_FREG2) | VM4(TMP_FREG2)));
+ imms >>= 8;
+ }
+
+ FAIL_IF(push_inst32(compiler, VSRA | (1 << 28) | ins | (1 << 7) | (imms << 16) | VD4(TMP_FREG2) | VM4(TMP_FREG2)));
+
+ dst_r = FAST_IS_REG(dst) ? dst : TMP_REG1;
+ FAIL_IF(push_inst32(compiler, VMOV_s | (1 << 20) | (1 << 23) | (0x2 << 21) | RT4(dst_r) | VN4(TMP_FREG2)));
+
+ if (reg_size == 4 && elem_size == 0) {
+ SLJIT_ASSERT(freg_map[TMP_FREG2] + 1 == freg_map[TMP_FREG1]);
+ FAIL_IF(push_inst32(compiler, VMOV_s | (1 << 20) | (1 << 23) | (0x2 << 21) | RT4(TMP_REG2)| VN4(TMP_FREG1)));
+ FAIL_IF(push_inst32(compiler, ORR_W | RD4(dst_r) | RN4(dst_r) | RM4(TMP_REG2) | (0x2 << 12)));
+ }
+
+ if (dst_r == TMP_REG1)
+ return emit_op_mem(compiler, STORE | WORD_SIZE, TMP_REG1, dst, dstw, TMP_REG2);
+
+ return SLJIT_SUCCESS;
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_simd_op2(struct sljit_compiler *compiler, sljit_s32 type,
+ sljit_s32 dst_freg, sljit_s32 src1_freg, sljit_s32 src2_freg)
+{
+ sljit_s32 reg_size = SLJIT_SIMD_GET_REG_SIZE(type);
+ sljit_s32 elem_size = SLJIT_SIMD_GET_ELEM_SIZE(type);
+ sljit_ins ins = 0;
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_simd_op2(compiler, type, dst_freg, src1_freg, src2_freg));
+
+ if (reg_size != 3 && reg_size != 4)
+ return SLJIT_ERR_UNSUPPORTED;
+
+ if ((type & SLJIT_SIMD_FLOAT) && (elem_size < 2 || elem_size > 3))
+ return SLJIT_ERR_UNSUPPORTED;
+
+ switch (SLJIT_SIMD_GET_OPCODE(type)) {
+ case SLJIT_SIMD_OP2_AND:
+ ins = VAND;
+ break;
+ case SLJIT_SIMD_OP2_OR:
+ ins = VORR;
+ break;
+ case SLJIT_SIMD_OP2_XOR:
+ ins = VEOR;
+ break;
+ }
+
+ if (type & SLJIT_SIMD_TEST)
+ return SLJIT_SUCCESS;
+
+ if (reg_size == 4) {
+ dst_freg = simd_get_quad_reg_index(dst_freg);
+ src1_freg = simd_get_quad_reg_index(src1_freg);
+ src2_freg = simd_get_quad_reg_index(src2_freg);
+ ins |= (sljit_ins)1 << 6;
+ }
+
+ return push_inst32(compiler, ins | VD4(dst_freg) | VN4(src1_freg) | VM4(src2_freg));
}
#undef FPU_LOAD
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_atomic_load(struct sljit_compiler *compiler, sljit_s32 op,
+ sljit_s32 dst_reg,
+ sljit_s32 mem_reg)
+{
+ sljit_ins ins;
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_atomic_load(compiler, op, dst_reg, mem_reg));
+
+ switch (GET_OPCODE(op)) {
+ case SLJIT_MOV_U8:
+ ins = LDREXB;
+ break;
+ case SLJIT_MOV_U16:
+ ins = LDREXH;
+ break;
+ default:
+ ins = LDREX;
+ break;
+ }
+
+ return push_inst32(compiler, ins | RN4(mem_reg) | RT4(dst_reg));
+}
+
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_atomic_store(struct sljit_compiler *compiler, sljit_s32 op,
+ sljit_s32 src_reg,
+ sljit_s32 mem_reg,
+ sljit_s32 temp_reg)
+{
+ sljit_ins ins;
+
+ /* temp_reg == mem_reg is undefined so use another temp register */
+ SLJIT_UNUSED_ARG(temp_reg);
+
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_atomic_store(compiler, op, src_reg, mem_reg, temp_reg));
+
+ switch (GET_OPCODE(op)) {
+ case SLJIT_MOV_U8:
+ ins = STREXB | RM4(TMP_REG1);
+ break;
+ case SLJIT_MOV_U16:
+ ins = STREXH | RM4(TMP_REG1);
+ break;
+ default:
+ ins = STREX | RD4(TMP_REG1);
+ break;
+ }
+
+ FAIL_IF(push_inst32(compiler, ins | RN4(mem_reg) | RT4(src_reg)));
+ if (op & SLJIT_SET_ATOMIC_STORED)
+ return push_inst32(compiler, CMPI_W | RN4(TMP_REG1));
+
+ return SLJIT_SUCCESS;
+}
+
SLJIT_API_FUNC_ATTRIBUTE struct sljit_const* sljit_emit_const(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw dstw, sljit_sw init_value)
{
struct sljit_const *const_;